1 /*-------------------------------------------------------------------------
2 *
3 * nodeGatherMerge.c
4 * Scan a plan in multiple workers, and do order-preserving merge.
5 *
6 * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 * IDENTIFICATION
10 * src/backend/executor/nodeGatherMerge.c
11 *
12 *-------------------------------------------------------------------------
13 */
14
15 #include "postgres.h"
16
17 #include "access/relscan.h"
18 #include "access/xact.h"
19 #include "executor/execdebug.h"
20 #include "executor/execParallel.h"
21 #include "executor/nodeGatherMerge.h"
22 #include "executor/nodeSubplan.h"
23 #include "executor/tqueue.h"
24 #include "lib/binaryheap.h"
25 #include "miscadmin.h"
26 #include "optimizer/planmain.h"
27 #include "utils/memutils.h"
28 #include "utils/rel.h"
29
30 /*
31 * When we read tuples from workers, it's a good idea to read several at once
32 * for efficiency when possible: this minimizes context-switching overhead.
33 * But reading too many at a time wastes memory without improving performance.
34 * We'll read up to MAX_TUPLE_STORE tuples (in addition to the first one).
35 */
36 #define MAX_TUPLE_STORE 10
37
38 /*
39 * Pending-tuple array for each worker. This holds additional tuples that
40 * we were able to fetch from the worker, but can't process yet. In addition,
41 * this struct holds the "done" flag indicating the worker is known to have
42 * no more tuples. (We do not use this struct for the leader; we don't keep
43 * any pending tuples for the leader, and the need_to_scan_locally flag serves
44 * as its "done" indicator.)
45 */
46 typedef struct GMReaderTupleBuffer
47 {
48 HeapTuple *tuple; /* array of length MAX_TUPLE_STORE */
49 int nTuples; /* number of tuples currently stored */
50 int readCounter; /* index of next tuple to extract */
51 bool done; /* true if reader is known exhausted */
52 } GMReaderTupleBuffer;
53
54 static TupleTableSlot *ExecGatherMerge(PlanState *pstate);
55 static int32 heap_compare_slots(Datum a, Datum b, void *arg);
56 static TupleTableSlot *gather_merge_getnext(GatherMergeState *gm_state);
57 static HeapTuple gm_readnext_tuple(GatherMergeState *gm_state, int nreader,
58 bool nowait, bool *done);
59 static void ExecShutdownGatherMergeWorkers(GatherMergeState *node);
60 static void gather_merge_setup(GatherMergeState *gm_state);
61 static void gather_merge_init(GatherMergeState *gm_state);
62 static void gather_merge_clear_tuples(GatherMergeState *gm_state);
63 static bool gather_merge_readnext(GatherMergeState *gm_state, int reader,
64 bool nowait);
65 static void load_tuple_array(GatherMergeState *gm_state, int reader);
66
67 /* ----------------------------------------------------------------
68 * ExecInitGather
69 * ----------------------------------------------------------------
70 */
71 GatherMergeState *
ExecInitGatherMerge(GatherMerge * node,EState * estate,int eflags)72 ExecInitGatherMerge(GatherMerge *node, EState *estate, int eflags)
73 {
74 GatherMergeState *gm_state;
75 Plan *outerNode;
76 TupleDesc tupDesc;
77
78 /* Gather merge node doesn't have innerPlan node. */
79 Assert(innerPlan(node) == NULL);
80
81 /*
82 * create state structure
83 */
84 gm_state = makeNode(GatherMergeState);
85 gm_state->ps.plan = (Plan *) node;
86 gm_state->ps.state = estate;
87 gm_state->ps.ExecProcNode = ExecGatherMerge;
88
89 gm_state->initialized = false;
90 gm_state->gm_initialized = false;
91 gm_state->tuples_needed = -1;
92
93 /*
94 * Miscellaneous initialization
95 *
96 * create expression context for node
97 */
98 ExecAssignExprContext(estate, &gm_state->ps);
99
100 /*
101 * GatherMerge doesn't support checking a qual (it's always more efficient
102 * to do it in the child node).
103 */
104 Assert(!node->plan.qual);
105
106 /*
107 * now initialize outer plan
108 */
109 outerNode = outerPlan(node);
110 outerPlanState(gm_state) = ExecInitNode(outerNode, estate, eflags);
111
112 /*
113 * Store the tuple descriptor into gather merge state, so we can use it
114 * while initializing the gather merge slots.
115 */
116 tupDesc = ExecGetResultType(outerPlanState(gm_state));
117 gm_state->tupDesc = tupDesc;
118
119 /*
120 * Initialize result slot, type and projection.
121 */
122 ExecInitResultTupleSlotTL(estate, &gm_state->ps);
123 ExecConditionalAssignProjectionInfo(&gm_state->ps, tupDesc, OUTER_VAR);
124
125 /*
126 * initialize sort-key information
127 */
128 if (node->numCols)
129 {
130 int i;
131
132 gm_state->gm_nkeys = node->numCols;
133 gm_state->gm_sortkeys =
134 palloc0(sizeof(SortSupportData) * node->numCols);
135
136 for (i = 0; i < node->numCols; i++)
137 {
138 SortSupport sortKey = gm_state->gm_sortkeys + i;
139
140 sortKey->ssup_cxt = CurrentMemoryContext;
141 sortKey->ssup_collation = node->collations[i];
142 sortKey->ssup_nulls_first = node->nullsFirst[i];
143 sortKey->ssup_attno = node->sortColIdx[i];
144
145 /*
146 * We don't perform abbreviated key conversion here, for the same
147 * reasons that it isn't used in MergeAppend
148 */
149 sortKey->abbreviate = false;
150
151 PrepareSortSupportFromOrderingOp(node->sortOperators[i], sortKey);
152 }
153 }
154
155 /* Now allocate the workspace for gather merge */
156 gather_merge_setup(gm_state);
157
158 return gm_state;
159 }
160
161 /* ----------------------------------------------------------------
162 * ExecGatherMerge(node)
163 *
164 * Scans the relation via multiple workers and returns
165 * the next qualifying tuple.
166 * ----------------------------------------------------------------
167 */
168 static TupleTableSlot *
ExecGatherMerge(PlanState * pstate)169 ExecGatherMerge(PlanState *pstate)
170 {
171 GatherMergeState *node = castNode(GatherMergeState, pstate);
172 TupleTableSlot *slot;
173 ExprContext *econtext;
174
175 CHECK_FOR_INTERRUPTS();
176
177 /*
178 * As with Gather, we don't launch workers until this node is actually
179 * executed.
180 */
181 if (!node->initialized)
182 {
183 EState *estate = node->ps.state;
184 GatherMerge *gm = castNode(GatherMerge, node->ps.plan);
185
186 /*
187 * Sometimes we might have to run without parallelism; but if parallel
188 * mode is active then we can try to fire up some workers.
189 */
190 if (gm->num_workers > 0 && estate->es_use_parallel_mode)
191 {
192 ParallelContext *pcxt;
193
194 /* Initialize, or re-initialize, shared state needed by workers. */
195 if (!node->pei)
196 node->pei = ExecInitParallelPlan(node->ps.lefttree,
197 estate,
198 gm->initParam,
199 gm->num_workers,
200 node->tuples_needed);
201 else
202 ExecParallelReinitialize(node->ps.lefttree,
203 node->pei,
204 gm->initParam);
205
206 /* Try to launch workers. */
207 pcxt = node->pei->pcxt;
208 LaunchParallelWorkers(pcxt);
209 /* We save # workers launched for the benefit of EXPLAIN */
210 node->nworkers_launched = pcxt->nworkers_launched;
211
212 /* Set up tuple queue readers to read the results. */
213 if (pcxt->nworkers_launched > 0)
214 {
215 ExecParallelCreateReaders(node->pei);
216 /* Make a working array showing the active readers */
217 node->nreaders = pcxt->nworkers_launched;
218 node->reader = (TupleQueueReader **)
219 palloc(node->nreaders * sizeof(TupleQueueReader *));
220 memcpy(node->reader, node->pei->reader,
221 node->nreaders * sizeof(TupleQueueReader *));
222 }
223 else
224 {
225 /* No workers? Then never mind. */
226 node->nreaders = 0;
227 node->reader = NULL;
228 }
229 }
230
231 /* allow leader to participate if enabled or no choice */
232 if (parallel_leader_participation || node->nreaders == 0)
233 node->need_to_scan_locally = true;
234 node->initialized = true;
235 }
236
237 /*
238 * Reset per-tuple memory context to free any expression evaluation
239 * storage allocated in the previous tuple cycle.
240 */
241 econtext = node->ps.ps_ExprContext;
242 ResetExprContext(econtext);
243
244 /*
245 * Get next tuple, either from one of our workers, or by running the plan
246 * ourselves.
247 */
248 slot = gather_merge_getnext(node);
249 if (TupIsNull(slot))
250 return NULL;
251
252 /* If no projection is required, we're done. */
253 if (node->ps.ps_ProjInfo == NULL)
254 return slot;
255
256 /*
257 * Form the result tuple using ExecProject(), and return it.
258 */
259 econtext->ecxt_outertuple = slot;
260 return ExecProject(node->ps.ps_ProjInfo);
261 }
262
263 /* ----------------------------------------------------------------
264 * ExecEndGatherMerge
265 *
266 * frees any storage allocated through C routines.
267 * ----------------------------------------------------------------
268 */
269 void
ExecEndGatherMerge(GatherMergeState * node)270 ExecEndGatherMerge(GatherMergeState *node)
271 {
272 ExecEndNode(outerPlanState(node)); /* let children clean up first */
273 ExecShutdownGatherMerge(node);
274 ExecFreeExprContext(&node->ps);
275 ExecClearTuple(node->ps.ps_ResultTupleSlot);
276 }
277
278 /* ----------------------------------------------------------------
279 * ExecShutdownGatherMerge
280 *
281 * Destroy the setup for parallel workers including parallel context.
282 * ----------------------------------------------------------------
283 */
284 void
ExecShutdownGatherMerge(GatherMergeState * node)285 ExecShutdownGatherMerge(GatherMergeState *node)
286 {
287 ExecShutdownGatherMergeWorkers(node);
288
289 /* Now destroy the parallel context. */
290 if (node->pei != NULL)
291 {
292 ExecParallelCleanup(node->pei);
293 node->pei = NULL;
294 }
295 }
296
297 /* ----------------------------------------------------------------
298 * ExecShutdownGatherMergeWorkers
299 *
300 * Stop all the parallel workers.
301 * ----------------------------------------------------------------
302 */
303 static void
ExecShutdownGatherMergeWorkers(GatherMergeState * node)304 ExecShutdownGatherMergeWorkers(GatherMergeState *node)
305 {
306 if (node->pei != NULL)
307 ExecParallelFinish(node->pei);
308
309 /* Flush local copy of reader array */
310 if (node->reader)
311 pfree(node->reader);
312 node->reader = NULL;
313 }
314
315 /* ----------------------------------------------------------------
316 * ExecReScanGatherMerge
317 *
318 * Prepare to re-scan the result of a GatherMerge.
319 * ----------------------------------------------------------------
320 */
321 void
ExecReScanGatherMerge(GatherMergeState * node)322 ExecReScanGatherMerge(GatherMergeState *node)
323 {
324 GatherMerge *gm = (GatherMerge *) node->ps.plan;
325 PlanState *outerPlan = outerPlanState(node);
326
327 /* Make sure any existing workers are gracefully shut down */
328 ExecShutdownGatherMergeWorkers(node);
329
330 /* Free any unused tuples, so we don't leak memory across rescans */
331 gather_merge_clear_tuples(node);
332
333 /* Mark node so that shared state will be rebuilt at next call */
334 node->initialized = false;
335 node->gm_initialized = false;
336
337 /*
338 * Set child node's chgParam to tell it that the next scan might deliver a
339 * different set of rows within the leader process. (The overall rowset
340 * shouldn't change, but the leader process's subset might; hence nodes
341 * between here and the parallel table scan node mustn't optimize on the
342 * assumption of an unchanging rowset.)
343 */
344 if (gm->rescan_param >= 0)
345 outerPlan->chgParam = bms_add_member(outerPlan->chgParam,
346 gm->rescan_param);
347
348 /*
349 * If chgParam of subnode is not null then plan will be re-scanned by
350 * first ExecProcNode. Note: because this does nothing if we have a
351 * rescan_param, it's currently guaranteed that parallel-aware child nodes
352 * will not see a ReScan call until after they get a ReInitializeDSM call.
353 * That ordering might not be something to rely on, though. A good rule
354 * of thumb is that ReInitializeDSM should reset only shared state, ReScan
355 * should reset only local state, and anything that depends on both of
356 * those steps being finished must wait until the first ExecProcNode call.
357 */
358 if (outerPlan->chgParam == NULL)
359 ExecReScan(outerPlan);
360 }
361
362 /*
363 * Set up the data structures that we'll need for Gather Merge.
364 *
365 * We allocate these once on the basis of gm->num_workers, which is an
366 * upper bound for the number of workers we'll actually have. During
367 * a rescan, we reset the structures to empty. This approach simplifies
368 * not leaking memory across rescans.
369 *
370 * In the gm_slots[] array, index 0 is for the leader, and indexes 1 to n
371 * are for workers. The values placed into gm_heap correspond to indexes
372 * in gm_slots[]. The gm_tuple_buffers[] array, however, is indexed from
373 * 0 to n-1; it has no entry for the leader.
374 */
375 static void
gather_merge_setup(GatherMergeState * gm_state)376 gather_merge_setup(GatherMergeState *gm_state)
377 {
378 GatherMerge *gm = castNode(GatherMerge, gm_state->ps.plan);
379 int nreaders = gm->num_workers;
380 int i;
381
382 /*
383 * Allocate gm_slots for the number of workers + one more slot for leader.
384 * Slot 0 is always for the leader. Leader always calls ExecProcNode() to
385 * read the tuple, and then stores it directly into its gm_slots entry.
386 * For other slots, code below will call ExecInitExtraTupleSlot() to
387 * create a slot for the worker's results. Note that during any single
388 * scan, we might have fewer than num_workers available workers, in which
389 * case the extra array entries go unused.
390 */
391 gm_state->gm_slots = (TupleTableSlot **)
392 palloc0((nreaders + 1) * sizeof(TupleTableSlot *));
393
394 /* Allocate the tuple slot and tuple array for each worker */
395 gm_state->gm_tuple_buffers = (GMReaderTupleBuffer *)
396 palloc0(nreaders * sizeof(GMReaderTupleBuffer));
397
398 for (i = 0; i < nreaders; i++)
399 {
400 /* Allocate the tuple array with length MAX_TUPLE_STORE */
401 gm_state->gm_tuple_buffers[i].tuple =
402 (HeapTuple *) palloc0(sizeof(HeapTuple) * MAX_TUPLE_STORE);
403
404 /* Initialize tuple slot for worker */
405 gm_state->gm_slots[i + 1] =
406 ExecInitExtraTupleSlot(gm_state->ps.state, gm_state->tupDesc);
407 }
408
409 /* Allocate the resources for the merge */
410 gm_state->gm_heap = binaryheap_allocate(nreaders + 1,
411 heap_compare_slots,
412 gm_state);
413 }
414
415 /*
416 * Initialize the Gather Merge.
417 *
418 * Reset data structures to ensure they're empty. Then pull at least one
419 * tuple from leader + each worker (or set its "done" indicator), and set up
420 * the heap.
421 */
422 static void
gather_merge_init(GatherMergeState * gm_state)423 gather_merge_init(GatherMergeState *gm_state)
424 {
425 int nreaders = gm_state->nreaders;
426 bool nowait = true;
427 int i;
428
429 /* Assert that gather_merge_setup made enough space */
430 Assert(nreaders <= castNode(GatherMerge, gm_state->ps.plan)->num_workers);
431
432 /* Reset leader's tuple slot to empty */
433 gm_state->gm_slots[0] = NULL;
434
435 /* Reset the tuple slot and tuple array for each worker */
436 for (i = 0; i < nreaders; i++)
437 {
438 /* Reset tuple array to empty */
439 gm_state->gm_tuple_buffers[i].nTuples = 0;
440 gm_state->gm_tuple_buffers[i].readCounter = 0;
441 /* Reset done flag to not-done */
442 gm_state->gm_tuple_buffers[i].done = false;
443 /* Ensure output slot is empty */
444 ExecClearTuple(gm_state->gm_slots[i + 1]);
445 }
446
447 /* Reset binary heap to empty */
448 binaryheap_reset(gm_state->gm_heap);
449
450 /*
451 * First, try to read a tuple from each worker (including leader) in
452 * nowait mode. After this, if not all workers were able to produce a
453 * tuple (or a "done" indication), then re-read from remaining workers,
454 * this time using wait mode. Add all live readers (those producing at
455 * least one tuple) to the heap.
456 */
457 reread:
458 for (i = 0; i <= nreaders; i++)
459 {
460 CHECK_FOR_INTERRUPTS();
461
462 /* skip this source if already known done */
463 if ((i == 0) ? gm_state->need_to_scan_locally :
464 !gm_state->gm_tuple_buffers[i - 1].done)
465 {
466 if (TupIsNull(gm_state->gm_slots[i]))
467 {
468 /* Don't have a tuple yet, try to get one */
469 if (gather_merge_readnext(gm_state, i, nowait))
470 binaryheap_add_unordered(gm_state->gm_heap,
471 Int32GetDatum(i));
472 }
473 else
474 {
475 /*
476 * We already got at least one tuple from this worker, but
477 * might as well see if it has any more ready by now.
478 */
479 load_tuple_array(gm_state, i);
480 }
481 }
482 }
483
484 /* need not recheck leader, since nowait doesn't matter for it */
485 for (i = 1; i <= nreaders; i++)
486 {
487 if (!gm_state->gm_tuple_buffers[i - 1].done &&
488 TupIsNull(gm_state->gm_slots[i]))
489 {
490 nowait = false;
491 goto reread;
492 }
493 }
494
495 /* Now heapify the heap. */
496 binaryheap_build(gm_state->gm_heap);
497
498 gm_state->gm_initialized = true;
499 }
500
501 /*
502 * Clear out the tuple table slot, and any unused pending tuples,
503 * for each gather merge input.
504 */
505 static void
gather_merge_clear_tuples(GatherMergeState * gm_state)506 gather_merge_clear_tuples(GatherMergeState *gm_state)
507 {
508 int i;
509
510 for (i = 0; i < gm_state->nreaders; i++)
511 {
512 GMReaderTupleBuffer *tuple_buffer = &gm_state->gm_tuple_buffers[i];
513
514 while (tuple_buffer->readCounter < tuple_buffer->nTuples)
515 heap_freetuple(tuple_buffer->tuple[tuple_buffer->readCounter++]);
516
517 ExecClearTuple(gm_state->gm_slots[i + 1]);
518 }
519 }
520
521 /*
522 * Read the next tuple for gather merge.
523 *
524 * Fetch the sorted tuple out of the heap.
525 */
526 static TupleTableSlot *
gather_merge_getnext(GatherMergeState * gm_state)527 gather_merge_getnext(GatherMergeState *gm_state)
528 {
529 int i;
530
531 if (!gm_state->gm_initialized)
532 {
533 /*
534 * First time through: pull the first tuple from each participant, and
535 * set up the heap.
536 */
537 gather_merge_init(gm_state);
538 }
539 else
540 {
541 /*
542 * Otherwise, pull the next tuple from whichever participant we
543 * returned from last time, and reinsert that participant's index into
544 * the heap, because it might now compare differently against the
545 * other elements of the heap.
546 */
547 i = DatumGetInt32(binaryheap_first(gm_state->gm_heap));
548
549 if (gather_merge_readnext(gm_state, i, false))
550 binaryheap_replace_first(gm_state->gm_heap, Int32GetDatum(i));
551 else
552 {
553 /* reader exhausted, remove it from heap */
554 (void) binaryheap_remove_first(gm_state->gm_heap);
555 }
556 }
557
558 if (binaryheap_empty(gm_state->gm_heap))
559 {
560 /* All the queues are exhausted, and so is the heap */
561 gather_merge_clear_tuples(gm_state);
562 return NULL;
563 }
564 else
565 {
566 /* Return next tuple from whichever participant has the leading one */
567 i = DatumGetInt32(binaryheap_first(gm_state->gm_heap));
568 return gm_state->gm_slots[i];
569 }
570 }
571
572 /*
573 * Read tuple(s) for given reader in nowait mode, and load into its tuple
574 * array, until we have MAX_TUPLE_STORE of them or would have to block.
575 */
576 static void
load_tuple_array(GatherMergeState * gm_state,int reader)577 load_tuple_array(GatherMergeState *gm_state, int reader)
578 {
579 GMReaderTupleBuffer *tuple_buffer;
580 int i;
581
582 /* Don't do anything if this is the leader. */
583 if (reader == 0)
584 return;
585
586 tuple_buffer = &gm_state->gm_tuple_buffers[reader - 1];
587
588 /* If there's nothing in the array, reset the counters to zero. */
589 if (tuple_buffer->nTuples == tuple_buffer->readCounter)
590 tuple_buffer->nTuples = tuple_buffer->readCounter = 0;
591
592 /* Try to fill additional slots in the array. */
593 for (i = tuple_buffer->nTuples; i < MAX_TUPLE_STORE; i++)
594 {
595 HeapTuple tuple;
596
597 tuple = gm_readnext_tuple(gm_state,
598 reader,
599 true,
600 &tuple_buffer->done);
601 if (!HeapTupleIsValid(tuple))
602 break;
603 tuple_buffer->tuple[i] = tuple;
604 tuple_buffer->nTuples++;
605 }
606 }
607
608 /*
609 * Store the next tuple for a given reader into the appropriate slot.
610 *
611 * Returns true if successful, false if not (either reader is exhausted,
612 * or we didn't want to wait for a tuple). Sets done flag if reader
613 * is found to be exhausted.
614 */
615 static bool
gather_merge_readnext(GatherMergeState * gm_state,int reader,bool nowait)616 gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
617 {
618 GMReaderTupleBuffer *tuple_buffer;
619 HeapTuple tup;
620
621 /*
622 * If we're being asked to generate a tuple from the leader, then we just
623 * call ExecProcNode as normal to produce one.
624 */
625 if (reader == 0)
626 {
627 if (gm_state->need_to_scan_locally)
628 {
629 PlanState *outerPlan = outerPlanState(gm_state);
630 TupleTableSlot *outerTupleSlot;
631 EState *estate = gm_state->ps.state;
632
633 /* Install our DSA area while executing the plan. */
634 estate->es_query_dsa = gm_state->pei ? gm_state->pei->area : NULL;
635 outerTupleSlot = ExecProcNode(outerPlan);
636 estate->es_query_dsa = NULL;
637
638 if (!TupIsNull(outerTupleSlot))
639 {
640 gm_state->gm_slots[0] = outerTupleSlot;
641 return true;
642 }
643 /* need_to_scan_locally serves as "done" flag for leader */
644 gm_state->need_to_scan_locally = false;
645 }
646 return false;
647 }
648
649 /* Otherwise, check the state of the relevant tuple buffer. */
650 tuple_buffer = &gm_state->gm_tuple_buffers[reader - 1];
651
652 if (tuple_buffer->nTuples > tuple_buffer->readCounter)
653 {
654 /* Return any tuple previously read that is still buffered. */
655 tup = tuple_buffer->tuple[tuple_buffer->readCounter++];
656 }
657 else if (tuple_buffer->done)
658 {
659 /* Reader is known to be exhausted. */
660 return false;
661 }
662 else
663 {
664 /* Read and buffer next tuple. */
665 tup = gm_readnext_tuple(gm_state,
666 reader,
667 nowait,
668 &tuple_buffer->done);
669 if (!HeapTupleIsValid(tup))
670 return false;
671
672 /*
673 * Attempt to read more tuples in nowait mode and store them in the
674 * pending-tuple array for the reader.
675 */
676 load_tuple_array(gm_state, reader);
677 }
678
679 Assert(HeapTupleIsValid(tup));
680
681 /* Build the TupleTableSlot for the given tuple */
682 ExecStoreTuple(tup, /* tuple to store */
683 gm_state->gm_slots[reader], /* slot in which to store the
684 * tuple */
685 InvalidBuffer, /* no buffer associated with tuple */
686 true); /* pfree tuple when done with it */
687
688 return true;
689 }
690
691 /*
692 * Attempt to read a tuple from given worker.
693 */
694 static HeapTuple
gm_readnext_tuple(GatherMergeState * gm_state,int nreader,bool nowait,bool * done)695 gm_readnext_tuple(GatherMergeState *gm_state, int nreader, bool nowait,
696 bool *done)
697 {
698 TupleQueueReader *reader;
699 HeapTuple tup;
700
701 /* Check for async events, particularly messages from workers. */
702 CHECK_FOR_INTERRUPTS();
703
704 /*
705 * Attempt to read a tuple.
706 *
707 * Note that TupleQueueReaderNext will just return NULL for a worker which
708 * fails to initialize. We'll treat that worker as having produced no
709 * tuples; WaitForParallelWorkersToFinish will error out when we get
710 * there.
711 */
712 reader = gm_state->reader[nreader - 1];
713 tup = TupleQueueReaderNext(reader, nowait, done);
714
715 return tup;
716 }
717
718 /*
719 * We have one slot for each item in the heap array. We use SlotNumber
720 * to store slot indexes. This doesn't actually provide any formal
721 * type-safety, but it makes the code more self-documenting.
722 */
723 typedef int32 SlotNumber;
724
725 /*
726 * Compare the tuples in the two given slots.
727 */
728 static int32
heap_compare_slots(Datum a,Datum b,void * arg)729 heap_compare_slots(Datum a, Datum b, void *arg)
730 {
731 GatherMergeState *node = (GatherMergeState *) arg;
732 SlotNumber slot1 = DatumGetInt32(a);
733 SlotNumber slot2 = DatumGetInt32(b);
734
735 TupleTableSlot *s1 = node->gm_slots[slot1];
736 TupleTableSlot *s2 = node->gm_slots[slot2];
737 int nkey;
738
739 Assert(!TupIsNull(s1));
740 Assert(!TupIsNull(s2));
741
742 for (nkey = 0; nkey < node->gm_nkeys; nkey++)
743 {
744 SortSupport sortKey = node->gm_sortkeys + nkey;
745 AttrNumber attno = sortKey->ssup_attno;
746 Datum datum1,
747 datum2;
748 bool isNull1,
749 isNull2;
750 int compare;
751
752 datum1 = slot_getattr(s1, attno, &isNull1);
753 datum2 = slot_getattr(s2, attno, &isNull2);
754
755 compare = ApplySortComparator(datum1, isNull1,
756 datum2, isNull2,
757 sortKey);
758 if (compare != 0)
759 {
760 INVERT_COMPARE_RESULT(compare);
761 return compare;
762 }
763 }
764 return 0;
765 }
766