1 /*-------------------------------------------------------------------------
2  *
3  * nodeCtescan.c
4  *	  routines to handle CteScan nodes.
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *	  src/backend/executor/nodeCtescan.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 
16 #include "postgres.h"
17 
18 #include "executor/execdebug.h"
19 #include "executor/nodeCtescan.h"
20 #include "miscadmin.h"
21 
22 static TupleTableSlot *CteScanNext(CteScanState *node);
23 
24 /* ----------------------------------------------------------------
25  *		CteScanNext
26  *
27  *		This is a workhorse for ExecCteScan
28  * ----------------------------------------------------------------
29  */
30 static TupleTableSlot *
CteScanNext(CteScanState * node)31 CteScanNext(CteScanState *node)
32 {
33 	EState	   *estate;
34 	ScanDirection dir;
35 	bool		forward;
36 	Tuplestorestate *tuplestorestate;
37 	bool		eof_tuplestore;
38 	TupleTableSlot *slot;
39 
40 	/*
41 	 * get state info from node
42 	 */
43 	estate = node->ss.ps.state;
44 	dir = estate->es_direction;
45 	forward = ScanDirectionIsForward(dir);
46 	tuplestorestate = node->leader->cte_table;
47 	tuplestore_select_read_pointer(tuplestorestate, node->readptr);
48 	slot = node->ss.ss_ScanTupleSlot;
49 
50 	/*
51 	 * If we are not at the end of the tuplestore, or are going backwards, try
52 	 * to fetch a tuple from tuplestore.
53 	 */
54 	eof_tuplestore = tuplestore_ateof(tuplestorestate);
55 
56 	if (!forward && eof_tuplestore)
57 	{
58 		if (!node->leader->eof_cte)
59 		{
60 			/*
61 			 * When reversing direction at tuplestore EOF, the first
62 			 * gettupleslot call will fetch the last-added tuple; but we want
63 			 * to return the one before that, if possible. So do an extra
64 			 * fetch.
65 			 */
66 			if (!tuplestore_advance(tuplestorestate, forward))
67 				return NULL;	/* the tuplestore must be empty */
68 		}
69 		eof_tuplestore = false;
70 	}
71 
72 	/*
73 	 * If we can fetch another tuple from the tuplestore, return it.
74 	 *
75 	 * Note: we have to use copy=true in the tuplestore_gettupleslot call,
76 	 * because we are sharing the tuplestore with other nodes that might write
77 	 * into the tuplestore before we get called again.
78 	 */
79 	if (!eof_tuplestore)
80 	{
81 		if (tuplestore_gettupleslot(tuplestorestate, forward, true, slot))
82 			return slot;
83 		if (forward)
84 			eof_tuplestore = true;
85 	}
86 
87 	/*
88 	 * If necessary, try to fetch another row from the CTE query.
89 	 *
90 	 * Note: the eof_cte state variable exists to short-circuit further calls
91 	 * of the CTE plan.  It's not optional, unfortunately, because some plan
92 	 * node types are not robust about being called again when they've already
93 	 * returned NULL.
94 	 */
95 	if (eof_tuplestore && !node->leader->eof_cte)
96 	{
97 		TupleTableSlot *cteslot;
98 
99 		/*
100 		 * We can only get here with forward==true, so no need to worry about
101 		 * which direction the subplan will go.
102 		 */
103 		cteslot = ExecProcNode(node->cteplanstate);
104 		if (TupIsNull(cteslot))
105 		{
106 			node->leader->eof_cte = true;
107 			return NULL;
108 		}
109 
110 		/*
111 		 * There are corner cases where the subplan could change which
112 		 * tuplestore read pointer is active, so be sure to reselect ours
113 		 * before storing the tuple we got.
114 		 */
115 		tuplestore_select_read_pointer(tuplestorestate, node->readptr);
116 
117 		/*
118 		 * Append a copy of the returned tuple to tuplestore.  NOTE: because
119 		 * our read pointer is certainly in EOF state, its read position will
120 		 * move forward over the added tuple.  This is what we want.  Also,
121 		 * any other readers will *not* move past the new tuple, which is what
122 		 * they want.
123 		 */
124 		tuplestore_puttupleslot(tuplestorestate, cteslot);
125 
126 		/*
127 		 * We MUST copy the CTE query's output tuple into our own slot. This
128 		 * is because other CteScan nodes might advance the CTE query before
129 		 * we are called again, and our output tuple must stay stable over
130 		 * that.
131 		 */
132 		return ExecCopySlot(slot, cteslot);
133 	}
134 
135 	/*
136 	 * Nothing left ...
137 	 */
138 	return ExecClearTuple(slot);
139 }
140 
141 /*
142  * CteScanRecheck -- access method routine to recheck a tuple in EvalPlanQual
143  */
144 static bool
CteScanRecheck(CteScanState * node,TupleTableSlot * slot)145 CteScanRecheck(CteScanState *node, TupleTableSlot *slot)
146 {
147 	/* nothing to check */
148 	return true;
149 }
150 
151 /* ----------------------------------------------------------------
152  *		ExecCteScan(node)
153  *
154  *		Scans the CTE sequentially and returns the next qualifying tuple.
155  *		We call the ExecScan() routine and pass it the appropriate
156  *		access method functions.
157  * ----------------------------------------------------------------
158  */
159 static TupleTableSlot *
ExecCteScan(PlanState * pstate)160 ExecCteScan(PlanState *pstate)
161 {
162 	CteScanState *node = castNode(CteScanState, pstate);
163 
164 	return ExecScan(&node->ss,
165 					(ExecScanAccessMtd) CteScanNext,
166 					(ExecScanRecheckMtd) CteScanRecheck);
167 }
168 
169 
170 /* ----------------------------------------------------------------
171  *		ExecInitCteScan
172  * ----------------------------------------------------------------
173  */
174 CteScanState *
ExecInitCteScan(CteScan * node,EState * estate,int eflags)175 ExecInitCteScan(CteScan *node, EState *estate, int eflags)
176 {
177 	CteScanState *scanstate;
178 	ParamExecData *prmdata;
179 
180 	/* check for unsupported flags */
181 	Assert(!(eflags & EXEC_FLAG_MARK));
182 
183 	/*
184 	 * For the moment we have to force the tuplestore to allow REWIND, because
185 	 * we might be asked to rescan the CTE even though upper levels didn't
186 	 * tell us to be prepared to do it efficiently.  Annoying, since this
187 	 * prevents truncation of the tuplestore.  XXX FIXME
188 	 *
189 	 * Note: if we are in an EPQ recheck plan tree, it's likely that no access
190 	 * to the tuplestore is needed at all, making this even more annoying.
191 	 * It's not worth improving that as long as all the read pointers would
192 	 * have REWIND anyway, but if we ever improve this logic then that aspect
193 	 * should be considered too.
194 	 */
195 	eflags |= EXEC_FLAG_REWIND;
196 
197 	/*
198 	 * CteScan should not have any children.
199 	 */
200 	Assert(outerPlan(node) == NULL);
201 	Assert(innerPlan(node) == NULL);
202 
203 	/*
204 	 * create new CteScanState for node
205 	 */
206 	scanstate = makeNode(CteScanState);
207 	scanstate->ss.ps.plan = (Plan *) node;
208 	scanstate->ss.ps.state = estate;
209 	scanstate->ss.ps.ExecProcNode = ExecCteScan;
210 	scanstate->eflags = eflags;
211 	scanstate->cte_table = NULL;
212 	scanstate->eof_cte = false;
213 
214 	/*
215 	 * Find the already-initialized plan for the CTE query.
216 	 */
217 	scanstate->cteplanstate = (PlanState *) list_nth(estate->es_subplanstates,
218 													 node->ctePlanId - 1);
219 
220 	/*
221 	 * The Param slot associated with the CTE query is used to hold a pointer
222 	 * to the CteState of the first CteScan node that initializes for this
223 	 * CTE.  This node will be the one that holds the shared state for all the
224 	 * CTEs, particularly the shared tuplestore.
225 	 */
226 	prmdata = &(estate->es_param_exec_vals[node->cteParam]);
227 	Assert(prmdata->execPlan == NULL);
228 	Assert(!prmdata->isnull);
229 	scanstate->leader = castNode(CteScanState, DatumGetPointer(prmdata->value));
230 	if (scanstate->leader == NULL)
231 	{
232 		/* I am the leader */
233 		prmdata->value = PointerGetDatum(scanstate);
234 		scanstate->leader = scanstate;
235 		scanstate->cte_table = tuplestore_begin_heap(true, false, work_mem);
236 		tuplestore_set_eflags(scanstate->cte_table, scanstate->eflags);
237 		scanstate->readptr = 0;
238 	}
239 	else
240 	{
241 		/* Not the leader */
242 		/* Create my own read pointer, and ensure it is at start */
243 		scanstate->readptr =
244 			tuplestore_alloc_read_pointer(scanstate->leader->cte_table,
245 										  scanstate->eflags);
246 		tuplestore_select_read_pointer(scanstate->leader->cte_table,
247 									   scanstate->readptr);
248 		tuplestore_rescan(scanstate->leader->cte_table);
249 	}
250 
251 	/*
252 	 * Miscellaneous initialization
253 	 *
254 	 * create expression context for node
255 	 */
256 	ExecAssignExprContext(estate, &scanstate->ss.ps);
257 
258 	/*
259 	 * initialize child expressions
260 	 */
261 	scanstate->ss.ps.qual =
262 		ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate);
263 
264 	/*
265 	 * tuple table initialization
266 	 */
267 	ExecInitResultTupleSlot(estate, &scanstate->ss.ps);
268 	ExecInitScanTupleSlot(estate, &scanstate->ss);
269 
270 	/*
271 	 * The scan tuple type (ie, the rowtype we expect to find in the work
272 	 * table) is the same as the result rowtype of the CTE query.
273 	 */
274 	ExecAssignScanType(&scanstate->ss,
275 					   ExecGetResultType(scanstate->cteplanstate));
276 
277 	/*
278 	 * Initialize result tuple type and projection info.
279 	 */
280 	ExecAssignResultTypeFromTL(&scanstate->ss.ps);
281 	ExecAssignScanProjectionInfo(&scanstate->ss);
282 
283 	return scanstate;
284 }
285 
286 /* ----------------------------------------------------------------
287  *		ExecEndCteScan
288  *
289  *		frees any storage allocated through C routines.
290  * ----------------------------------------------------------------
291  */
292 void
ExecEndCteScan(CteScanState * node)293 ExecEndCteScan(CteScanState *node)
294 {
295 	/*
296 	 * Free exprcontext
297 	 */
298 	ExecFreeExprContext(&node->ss.ps);
299 
300 	/*
301 	 * clean out the tuple table
302 	 */
303 	ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
304 	ExecClearTuple(node->ss.ss_ScanTupleSlot);
305 
306 	/*
307 	 * If I am the leader, free the tuplestore.
308 	 */
309 	if (node->leader == node)
310 	{
311 		tuplestore_end(node->cte_table);
312 		node->cte_table = NULL;
313 	}
314 }
315 
316 /* ----------------------------------------------------------------
317  *		ExecReScanCteScan
318  *
319  *		Rescans the relation.
320  * ----------------------------------------------------------------
321  */
322 void
ExecReScanCteScan(CteScanState * node)323 ExecReScanCteScan(CteScanState *node)
324 {
325 	Tuplestorestate *tuplestorestate = node->leader->cte_table;
326 
327 	ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
328 
329 	ExecScanReScan(&node->ss);
330 
331 	/*
332 	 * Clear the tuplestore if a new scan of the underlying CTE is required.
333 	 * This implicitly resets all the tuplestore's read pointers.  Note that
334 	 * multiple CTE nodes might redundantly clear the tuplestore; that's OK,
335 	 * and not unduly expensive.  We'll stop taking this path as soon as
336 	 * somebody has attempted to read something from the underlying CTE
337 	 * (thereby causing its chgParam to be cleared).
338 	 */
339 	if (node->leader->cteplanstate->chgParam != NULL)
340 	{
341 		tuplestore_clear(tuplestorestate);
342 		node->leader->eof_cte = false;
343 	}
344 	else
345 	{
346 		/*
347 		 * Else, just rewind my own pointer.  Either the underlying CTE
348 		 * doesn't need a rescan (and we can re-read what's in the tuplestore
349 		 * now), or somebody else already took care of it.
350 		 */
351 		tuplestore_select_read_pointer(tuplestorestate, node->readptr);
352 		tuplestore_rescan(tuplestorestate);
353 	}
354 }
355