1 /*-------------------------------------------------------------------------
2 *
3 * pquery.c
4 * POSTGRES process query command code
5 *
6 * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/tcop/pquery.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16 #include "postgres.h"
17
18 #include <limits.h>
19
20 #include "access/xact.h"
21 #include "commands/prepare.h"
22 #include "executor/tstoreReceiver.h"
23 #include "miscadmin.h"
24 #include "pg_trace.h"
25 #include "tcop/pquery.h"
26 #include "tcop/utility.h"
27 #include "utils/memutils.h"
28 #include "utils/snapmgr.h"
29
30
31 /*
32 * ActivePortal is the currently executing Portal (the most closely nested,
33 * if there are several).
34 */
35 Portal ActivePortal = NULL;
36
37
38 static void ProcessQuery(PlannedStmt *plan,
39 const char *sourceText,
40 ParamListInfo params,
41 QueryEnvironment *queryEnv,
42 DestReceiver *dest,
43 QueryCompletion *qc);
44 static void FillPortalStore(Portal portal, bool isTopLevel);
45 static uint64 RunFromStore(Portal portal, ScanDirection direction, uint64 count,
46 DestReceiver *dest);
47 static uint64 PortalRunSelect(Portal portal, bool forward, long count,
48 DestReceiver *dest);
49 static void PortalRunUtility(Portal portal, PlannedStmt *pstmt,
50 bool isTopLevel, bool setHoldSnapshot,
51 DestReceiver *dest, QueryCompletion *qc);
52 static void PortalRunMulti(Portal portal,
53 bool isTopLevel, bool setHoldSnapshot,
54 DestReceiver *dest, DestReceiver *altdest,
55 QueryCompletion *qc);
56 static uint64 DoPortalRunFetch(Portal portal,
57 FetchDirection fdirection,
58 long count,
59 DestReceiver *dest);
60 static void DoPortalRewind(Portal portal);
61
62
63 /*
64 * CreateQueryDesc
65 */
66 QueryDesc *
CreateQueryDesc(PlannedStmt * plannedstmt,const char * sourceText,Snapshot snapshot,Snapshot crosscheck_snapshot,DestReceiver * dest,ParamListInfo params,QueryEnvironment * queryEnv,int instrument_options)67 CreateQueryDesc(PlannedStmt *plannedstmt,
68 const char *sourceText,
69 Snapshot snapshot,
70 Snapshot crosscheck_snapshot,
71 DestReceiver *dest,
72 ParamListInfo params,
73 QueryEnvironment *queryEnv,
74 int instrument_options)
75 {
76 QueryDesc *qd = (QueryDesc *) palloc(sizeof(QueryDesc));
77
78 qd->operation = plannedstmt->commandType; /* operation */
79 qd->plannedstmt = plannedstmt; /* plan */
80 qd->sourceText = sourceText; /* query text */
81 qd->snapshot = RegisterSnapshot(snapshot); /* snapshot */
82 /* RI check snapshot */
83 qd->crosscheck_snapshot = RegisterSnapshot(crosscheck_snapshot);
84 qd->dest = dest; /* output dest */
85 qd->params = params; /* parameter values passed into query */
86 qd->queryEnv = queryEnv;
87 qd->instrument_options = instrument_options; /* instrumentation wanted? */
88
89 /* null these fields until set by ExecutorStart */
90 qd->tupDesc = NULL;
91 qd->estate = NULL;
92 qd->planstate = NULL;
93 qd->totaltime = NULL;
94
95 /* not yet executed */
96 qd->already_executed = false;
97
98 return qd;
99 }
100
101 /*
102 * FreeQueryDesc
103 */
104 void
FreeQueryDesc(QueryDesc * qdesc)105 FreeQueryDesc(QueryDesc *qdesc)
106 {
107 /* Can't be a live query */
108 Assert(qdesc->estate == NULL);
109
110 /* forget our snapshots */
111 UnregisterSnapshot(qdesc->snapshot);
112 UnregisterSnapshot(qdesc->crosscheck_snapshot);
113
114 /* Only the QueryDesc itself need be freed */
115 pfree(qdesc);
116 }
117
118
119 /*
120 * ProcessQuery
121 * Execute a single plannable query within a PORTAL_MULTI_QUERY,
122 * PORTAL_ONE_RETURNING, or PORTAL_ONE_MOD_WITH portal
123 *
124 * plan: the plan tree for the query
125 * sourceText: the source text of the query
126 * params: any parameters needed
127 * dest: where to send results
128 * qc: where to store the command completion status data.
129 *
130 * qc may be NULL if caller doesn't want a status string.
131 *
132 * Must be called in a memory context that will be reset or deleted on
133 * error; otherwise the executor's memory usage will be leaked.
134 */
135 static void
ProcessQuery(PlannedStmt * plan,const char * sourceText,ParamListInfo params,QueryEnvironment * queryEnv,DestReceiver * dest,QueryCompletion * qc)136 ProcessQuery(PlannedStmt *plan,
137 const char *sourceText,
138 ParamListInfo params,
139 QueryEnvironment *queryEnv,
140 DestReceiver *dest,
141 QueryCompletion *qc)
142 {
143 QueryDesc *queryDesc;
144
145 /*
146 * Create the QueryDesc object
147 */
148 queryDesc = CreateQueryDesc(plan, sourceText,
149 GetActiveSnapshot(), InvalidSnapshot,
150 dest, params, queryEnv, 0);
151
152 /*
153 * Call ExecutorStart to prepare the plan for execution
154 */
155 ExecutorStart(queryDesc, 0);
156
157 /*
158 * Run the plan to completion.
159 */
160 ExecutorRun(queryDesc, ForwardScanDirection, 0L, true);
161
162 /*
163 * Build command completion status data, if caller wants one.
164 */
165 if (qc)
166 {
167 switch (queryDesc->operation)
168 {
169 case CMD_SELECT:
170 SetQueryCompletion(qc, CMDTAG_SELECT, queryDesc->estate->es_processed);
171 break;
172 case CMD_INSERT:
173 SetQueryCompletion(qc, CMDTAG_INSERT, queryDesc->estate->es_processed);
174 break;
175 case CMD_UPDATE:
176 SetQueryCompletion(qc, CMDTAG_UPDATE, queryDesc->estate->es_processed);
177 break;
178 case CMD_DELETE:
179 SetQueryCompletion(qc, CMDTAG_DELETE, queryDesc->estate->es_processed);
180 break;
181 default:
182 SetQueryCompletion(qc, CMDTAG_UNKNOWN, queryDesc->estate->es_processed);
183 break;
184 }
185 }
186
187 /*
188 * Now, we close down all the scans and free allocated resources.
189 */
190 ExecutorFinish(queryDesc);
191 ExecutorEnd(queryDesc);
192
193 FreeQueryDesc(queryDesc);
194 }
195
196 /*
197 * ChoosePortalStrategy
198 * Select portal execution strategy given the intended statement list.
199 *
200 * The list elements can be Querys or PlannedStmts.
201 * That's more general than portals need, but plancache.c uses this too.
202 *
203 * See the comments in portal.h.
204 */
205 PortalStrategy
ChoosePortalStrategy(List * stmts)206 ChoosePortalStrategy(List *stmts)
207 {
208 int nSetTag;
209 ListCell *lc;
210
211 /*
212 * PORTAL_ONE_SELECT and PORTAL_UTIL_SELECT need only consider the
213 * single-statement case, since there are no rewrite rules that can add
214 * auxiliary queries to a SELECT or a utility command. PORTAL_ONE_MOD_WITH
215 * likewise allows only one top-level statement.
216 */
217 if (list_length(stmts) == 1)
218 {
219 Node *stmt = (Node *) linitial(stmts);
220
221 if (IsA(stmt, Query))
222 {
223 Query *query = (Query *) stmt;
224
225 if (query->canSetTag)
226 {
227 if (query->commandType == CMD_SELECT)
228 {
229 if (query->hasModifyingCTE)
230 return PORTAL_ONE_MOD_WITH;
231 else
232 return PORTAL_ONE_SELECT;
233 }
234 if (query->commandType == CMD_UTILITY)
235 {
236 if (UtilityReturnsTuples(query->utilityStmt))
237 return PORTAL_UTIL_SELECT;
238 /* it can't be ONE_RETURNING, so give up */
239 return PORTAL_MULTI_QUERY;
240 }
241 }
242 }
243 else if (IsA(stmt, PlannedStmt))
244 {
245 PlannedStmt *pstmt = (PlannedStmt *) stmt;
246
247 if (pstmt->canSetTag)
248 {
249 if (pstmt->commandType == CMD_SELECT)
250 {
251 if (pstmt->hasModifyingCTE)
252 return PORTAL_ONE_MOD_WITH;
253 else
254 return PORTAL_ONE_SELECT;
255 }
256 if (pstmt->commandType == CMD_UTILITY)
257 {
258 if (UtilityReturnsTuples(pstmt->utilityStmt))
259 return PORTAL_UTIL_SELECT;
260 /* it can't be ONE_RETURNING, so give up */
261 return PORTAL_MULTI_QUERY;
262 }
263 }
264 }
265 else
266 elog(ERROR, "unrecognized node type: %d", (int) nodeTag(stmt));
267 }
268
269 /*
270 * PORTAL_ONE_RETURNING has to allow auxiliary queries added by rewrite.
271 * Choose PORTAL_ONE_RETURNING if there is exactly one canSetTag query and
272 * it has a RETURNING list.
273 */
274 nSetTag = 0;
275 foreach(lc, stmts)
276 {
277 Node *stmt = (Node *) lfirst(lc);
278
279 if (IsA(stmt, Query))
280 {
281 Query *query = (Query *) stmt;
282
283 if (query->canSetTag)
284 {
285 if (++nSetTag > 1)
286 return PORTAL_MULTI_QUERY; /* no need to look further */
287 if (query->commandType == CMD_UTILITY ||
288 query->returningList == NIL)
289 return PORTAL_MULTI_QUERY; /* no need to look further */
290 }
291 }
292 else if (IsA(stmt, PlannedStmt))
293 {
294 PlannedStmt *pstmt = (PlannedStmt *) stmt;
295
296 if (pstmt->canSetTag)
297 {
298 if (++nSetTag > 1)
299 return PORTAL_MULTI_QUERY; /* no need to look further */
300 if (pstmt->commandType == CMD_UTILITY ||
301 !pstmt->hasReturning)
302 return PORTAL_MULTI_QUERY; /* no need to look further */
303 }
304 }
305 else
306 elog(ERROR, "unrecognized node type: %d", (int) nodeTag(stmt));
307 }
308 if (nSetTag == 1)
309 return PORTAL_ONE_RETURNING;
310
311 /* Else, it's the general case... */
312 return PORTAL_MULTI_QUERY;
313 }
314
315 /*
316 * FetchPortalTargetList
317 * Given a portal that returns tuples, extract the query targetlist.
318 * Returns NIL if the portal doesn't have a determinable targetlist.
319 *
320 * Note: do not modify the result.
321 */
322 List *
FetchPortalTargetList(Portal portal)323 FetchPortalTargetList(Portal portal)
324 {
325 /* no point in looking if we determined it doesn't return tuples */
326 if (portal->strategy == PORTAL_MULTI_QUERY)
327 return NIL;
328 /* get the primary statement and find out what it returns */
329 return FetchStatementTargetList((Node *) PortalGetPrimaryStmt(portal));
330 }
331
332 /*
333 * FetchStatementTargetList
334 * Given a statement that returns tuples, extract the query targetlist.
335 * Returns NIL if the statement doesn't have a determinable targetlist.
336 *
337 * This can be applied to a Query or a PlannedStmt.
338 * That's more general than portals need, but plancache.c uses this too.
339 *
340 * Note: do not modify the result.
341 *
342 * XXX be careful to keep this in sync with UtilityReturnsTuples.
343 */
344 List *
FetchStatementTargetList(Node * stmt)345 FetchStatementTargetList(Node *stmt)
346 {
347 if (stmt == NULL)
348 return NIL;
349 if (IsA(stmt, Query))
350 {
351 Query *query = (Query *) stmt;
352
353 if (query->commandType == CMD_UTILITY)
354 {
355 /* transfer attention to utility statement */
356 stmt = query->utilityStmt;
357 }
358 else
359 {
360 if (query->commandType == CMD_SELECT)
361 return query->targetList;
362 if (query->returningList)
363 return query->returningList;
364 return NIL;
365 }
366 }
367 if (IsA(stmt, PlannedStmt))
368 {
369 PlannedStmt *pstmt = (PlannedStmt *) stmt;
370
371 if (pstmt->commandType == CMD_UTILITY)
372 {
373 /* transfer attention to utility statement */
374 stmt = pstmt->utilityStmt;
375 }
376 else
377 {
378 if (pstmt->commandType == CMD_SELECT)
379 return pstmt->planTree->targetlist;
380 if (pstmt->hasReturning)
381 return pstmt->planTree->targetlist;
382 return NIL;
383 }
384 }
385 if (IsA(stmt, FetchStmt))
386 {
387 FetchStmt *fstmt = (FetchStmt *) stmt;
388 Portal subportal;
389
390 Assert(!fstmt->ismove);
391 subportal = GetPortalByName(fstmt->portalname);
392 Assert(PortalIsValid(subportal));
393 return FetchPortalTargetList(subportal);
394 }
395 if (IsA(stmt, ExecuteStmt))
396 {
397 ExecuteStmt *estmt = (ExecuteStmt *) stmt;
398 PreparedStatement *entry;
399
400 entry = FetchPreparedStatement(estmt->name, true);
401 return FetchPreparedStatementTargetList(entry);
402 }
403 return NIL;
404 }
405
406 /*
407 * PortalStart
408 * Prepare a portal for execution.
409 *
410 * Caller must already have created the portal, done PortalDefineQuery(),
411 * and adjusted portal options if needed.
412 *
413 * If parameters are needed by the query, they must be passed in "params"
414 * (caller is responsible for giving them appropriate lifetime).
415 *
416 * The caller can also provide an initial set of "eflags" to be passed to
417 * ExecutorStart (but note these can be modified internally, and they are
418 * currently only honored for PORTAL_ONE_SELECT portals). Most callers
419 * should simply pass zero.
420 *
421 * The caller can optionally pass a snapshot to be used; pass InvalidSnapshot
422 * for the normal behavior of setting a new snapshot. This parameter is
423 * presently ignored for non-PORTAL_ONE_SELECT portals (it's only intended
424 * to be used for cursors).
425 *
426 * On return, portal is ready to accept PortalRun() calls, and the result
427 * tupdesc (if any) is known.
428 */
429 void
PortalStart(Portal portal,ParamListInfo params,int eflags,Snapshot snapshot)430 PortalStart(Portal portal, ParamListInfo params,
431 int eflags, Snapshot snapshot)
432 {
433 Portal saveActivePortal;
434 ResourceOwner saveResourceOwner;
435 MemoryContext savePortalContext;
436 MemoryContext oldContext;
437 QueryDesc *queryDesc;
438 int myeflags;
439
440 AssertArg(PortalIsValid(portal));
441 AssertState(portal->status == PORTAL_DEFINED);
442
443 /*
444 * Set up global portal context pointers.
445 */
446 saveActivePortal = ActivePortal;
447 saveResourceOwner = CurrentResourceOwner;
448 savePortalContext = PortalContext;
449 PG_TRY();
450 {
451 ActivePortal = portal;
452 if (portal->resowner)
453 CurrentResourceOwner = portal->resowner;
454 PortalContext = portal->portalContext;
455
456 oldContext = MemoryContextSwitchTo(PortalContext);
457
458 /* Must remember portal param list, if any */
459 portal->portalParams = params;
460
461 /*
462 * Determine the portal execution strategy
463 */
464 portal->strategy = ChoosePortalStrategy(portal->stmts);
465
466 /*
467 * Fire her up according to the strategy
468 */
469 switch (portal->strategy)
470 {
471 case PORTAL_ONE_SELECT:
472
473 /* Must set snapshot before starting executor. */
474 if (snapshot)
475 PushActiveSnapshot(snapshot);
476 else
477 PushActiveSnapshot(GetTransactionSnapshot());
478
479 /*
480 * We could remember the snapshot in portal->portalSnapshot,
481 * but presently there seems no need to, as this code path
482 * cannot be used for non-atomic execution. Hence there can't
483 * be any commit/abort that might destroy the snapshot. Since
484 * we don't do that, there's also no need to force a
485 * non-default nesting level for the snapshot.
486 */
487
488 /*
489 * Create QueryDesc in portal's context; for the moment, set
490 * the destination to DestNone.
491 */
492 queryDesc = CreateQueryDesc(linitial_node(PlannedStmt, portal->stmts),
493 portal->sourceText,
494 GetActiveSnapshot(),
495 InvalidSnapshot,
496 None_Receiver,
497 params,
498 portal->queryEnv,
499 0);
500
501 /*
502 * If it's a scrollable cursor, executor needs to support
503 * REWIND and backwards scan, as well as whatever the caller
504 * might've asked for.
505 */
506 if (portal->cursorOptions & CURSOR_OPT_SCROLL)
507 myeflags = eflags | EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD;
508 else
509 myeflags = eflags;
510
511 /*
512 * Call ExecutorStart to prepare the plan for execution
513 */
514 ExecutorStart(queryDesc, myeflags);
515
516 /*
517 * This tells PortalCleanup to shut down the executor
518 */
519 portal->queryDesc = queryDesc;
520
521 /*
522 * Remember tuple descriptor (computed by ExecutorStart)
523 */
524 portal->tupDesc = queryDesc->tupDesc;
525
526 /*
527 * Reset cursor position data to "start of query"
528 */
529 portal->atStart = true;
530 portal->atEnd = false; /* allow fetches */
531 portal->portalPos = 0;
532
533 PopActiveSnapshot();
534 break;
535
536 case PORTAL_ONE_RETURNING:
537 case PORTAL_ONE_MOD_WITH:
538
539 /*
540 * We don't start the executor until we are told to run the
541 * portal. We do need to set up the result tupdesc.
542 */
543 {
544 PlannedStmt *pstmt;
545
546 pstmt = PortalGetPrimaryStmt(portal);
547 portal->tupDesc =
548 ExecCleanTypeFromTL(pstmt->planTree->targetlist);
549 }
550
551 /*
552 * Reset cursor position data to "start of query"
553 */
554 portal->atStart = true;
555 portal->atEnd = false; /* allow fetches */
556 portal->portalPos = 0;
557 break;
558
559 case PORTAL_UTIL_SELECT:
560
561 /*
562 * We don't set snapshot here, because PortalRunUtility will
563 * take care of it if needed.
564 */
565 {
566 PlannedStmt *pstmt = PortalGetPrimaryStmt(portal);
567
568 Assert(pstmt->commandType == CMD_UTILITY);
569 portal->tupDesc = UtilityTupleDescriptor(pstmt->utilityStmt);
570 }
571
572 /*
573 * Reset cursor position data to "start of query"
574 */
575 portal->atStart = true;
576 portal->atEnd = false; /* allow fetches */
577 portal->portalPos = 0;
578 break;
579
580 case PORTAL_MULTI_QUERY:
581 /* Need do nothing now */
582 portal->tupDesc = NULL;
583 break;
584 }
585 }
586 PG_CATCH();
587 {
588 /* Uncaught error while executing portal: mark it dead */
589 MarkPortalFailed(portal);
590
591 /* Restore global vars and propagate error */
592 ActivePortal = saveActivePortal;
593 CurrentResourceOwner = saveResourceOwner;
594 PortalContext = savePortalContext;
595
596 PG_RE_THROW();
597 }
598 PG_END_TRY();
599
600 MemoryContextSwitchTo(oldContext);
601
602 ActivePortal = saveActivePortal;
603 CurrentResourceOwner = saveResourceOwner;
604 PortalContext = savePortalContext;
605
606 portal->status = PORTAL_READY;
607 }
608
609 /*
610 * PortalSetResultFormat
611 * Select the format codes for a portal's output.
612 *
613 * This must be run after PortalStart for a portal that will be read by
614 * a DestRemote or DestRemoteExecute destination. It is not presently needed
615 * for other destination types.
616 *
617 * formats[] is the client format request, as per Bind message conventions.
618 */
619 void
PortalSetResultFormat(Portal portal,int nFormats,int16 * formats)620 PortalSetResultFormat(Portal portal, int nFormats, int16 *formats)
621 {
622 int natts;
623 int i;
624
625 /* Do nothing if portal won't return tuples */
626 if (portal->tupDesc == NULL)
627 return;
628 natts = portal->tupDesc->natts;
629 portal->formats = (int16 *)
630 MemoryContextAlloc(portal->portalContext,
631 natts * sizeof(int16));
632 if (nFormats > 1)
633 {
634 /* format specified for each column */
635 if (nFormats != natts)
636 ereport(ERROR,
637 (errcode(ERRCODE_PROTOCOL_VIOLATION),
638 errmsg("bind message has %d result formats but query has %d columns",
639 nFormats, natts)));
640 memcpy(portal->formats, formats, natts * sizeof(int16));
641 }
642 else if (nFormats > 0)
643 {
644 /* single format specified, use for all columns */
645 int16 format1 = formats[0];
646
647 for (i = 0; i < natts; i++)
648 portal->formats[i] = format1;
649 }
650 else
651 {
652 /* use default format for all columns */
653 for (i = 0; i < natts; i++)
654 portal->formats[i] = 0;
655 }
656 }
657
658 /*
659 * PortalRun
660 * Run a portal's query or queries.
661 *
662 * count <= 0 is interpreted as a no-op: the destination gets started up
663 * and shut down, but nothing else happens. Also, count == FETCH_ALL is
664 * interpreted as "all rows". Note that count is ignored in multi-query
665 * situations, where we always run the portal to completion.
666 *
667 * isTopLevel: true if query is being executed at backend "top level"
668 * (that is, directly from a client command message)
669 *
670 * dest: where to send output of primary (canSetTag) query
671 *
672 * altdest: where to send output of non-primary queries
673 *
674 * qc: where to store command completion status data.
675 * May be NULL if caller doesn't want status data.
676 *
677 * Returns true if the portal's execution is complete, false if it was
678 * suspended due to exhaustion of the count parameter.
679 */
680 bool
PortalRun(Portal portal,long count,bool isTopLevel,bool run_once,DestReceiver * dest,DestReceiver * altdest,QueryCompletion * qc)681 PortalRun(Portal portal, long count, bool isTopLevel, bool run_once,
682 DestReceiver *dest, DestReceiver *altdest,
683 QueryCompletion *qc)
684 {
685 bool result;
686 uint64 nprocessed;
687 ResourceOwner saveTopTransactionResourceOwner;
688 MemoryContext saveTopTransactionContext;
689 Portal saveActivePortal;
690 ResourceOwner saveResourceOwner;
691 MemoryContext savePortalContext;
692 MemoryContext saveMemoryContext;
693
694 AssertArg(PortalIsValid(portal));
695
696 TRACE_POSTGRESQL_QUERY_EXECUTE_START();
697
698 /* Initialize empty completion data */
699 if (qc)
700 InitializeQueryCompletion(qc);
701
702 if (log_executor_stats && portal->strategy != PORTAL_MULTI_QUERY)
703 {
704 elog(DEBUG3, "PortalRun");
705 /* PORTAL_MULTI_QUERY logs its own stats per query */
706 ResetUsage();
707 }
708
709 /*
710 * Check for improper portal use, and mark portal active.
711 */
712 MarkPortalActive(portal);
713
714 /* Set run_once flag. Shouldn't be clear if previously set. */
715 Assert(!portal->run_once || run_once);
716 portal->run_once = run_once;
717
718 /*
719 * Set up global portal context pointers.
720 *
721 * We have to play a special game here to support utility commands like
722 * VACUUM and CLUSTER, which internally start and commit transactions.
723 * When we are called to execute such a command, CurrentResourceOwner will
724 * be pointing to the TopTransactionResourceOwner --- which will be
725 * destroyed and replaced in the course of the internal commit and
726 * restart. So we need to be prepared to restore it as pointing to the
727 * exit-time TopTransactionResourceOwner. (Ain't that ugly? This idea of
728 * internally starting whole new transactions is not good.)
729 * CurrentMemoryContext has a similar problem, but the other pointers we
730 * save here will be NULL or pointing to longer-lived objects.
731 */
732 saveTopTransactionResourceOwner = TopTransactionResourceOwner;
733 saveTopTransactionContext = TopTransactionContext;
734 saveActivePortal = ActivePortal;
735 saveResourceOwner = CurrentResourceOwner;
736 savePortalContext = PortalContext;
737 saveMemoryContext = CurrentMemoryContext;
738 PG_TRY();
739 {
740 ActivePortal = portal;
741 if (portal->resowner)
742 CurrentResourceOwner = portal->resowner;
743 PortalContext = portal->portalContext;
744
745 MemoryContextSwitchTo(PortalContext);
746
747 switch (portal->strategy)
748 {
749 case PORTAL_ONE_SELECT:
750 case PORTAL_ONE_RETURNING:
751 case PORTAL_ONE_MOD_WITH:
752 case PORTAL_UTIL_SELECT:
753
754 /*
755 * If we have not yet run the command, do so, storing its
756 * results in the portal's tuplestore. But we don't do that
757 * for the PORTAL_ONE_SELECT case.
758 */
759 if (portal->strategy != PORTAL_ONE_SELECT && !portal->holdStore)
760 FillPortalStore(portal, isTopLevel);
761
762 /*
763 * Now fetch desired portion of results.
764 */
765 nprocessed = PortalRunSelect(portal, true, count, dest);
766
767 /*
768 * If the portal result contains a command tag and the caller
769 * gave us a pointer to store it, copy it and update the
770 * rowcount.
771 */
772 if (qc && portal->qc.commandTag != CMDTAG_UNKNOWN)
773 {
774 CopyQueryCompletion(qc, &portal->qc);
775 qc->nprocessed = nprocessed;
776 }
777
778 /* Mark portal not active */
779 portal->status = PORTAL_READY;
780
781 /*
782 * Since it's a forward fetch, say DONE iff atEnd is now true.
783 */
784 result = portal->atEnd;
785 break;
786
787 case PORTAL_MULTI_QUERY:
788 PortalRunMulti(portal, isTopLevel, false,
789 dest, altdest, qc);
790
791 /* Prevent portal's commands from being re-executed */
792 MarkPortalDone(portal);
793
794 /* Always complete at end of RunMulti */
795 result = true;
796 break;
797
798 default:
799 elog(ERROR, "unrecognized portal strategy: %d",
800 (int) portal->strategy);
801 result = false; /* keep compiler quiet */
802 break;
803 }
804 }
805 PG_CATCH();
806 {
807 /* Uncaught error while executing portal: mark it dead */
808 MarkPortalFailed(portal);
809
810 /* Restore global vars and propagate error */
811 if (saveMemoryContext == saveTopTransactionContext)
812 MemoryContextSwitchTo(TopTransactionContext);
813 else
814 MemoryContextSwitchTo(saveMemoryContext);
815 ActivePortal = saveActivePortal;
816 if (saveResourceOwner == saveTopTransactionResourceOwner)
817 CurrentResourceOwner = TopTransactionResourceOwner;
818 else
819 CurrentResourceOwner = saveResourceOwner;
820 PortalContext = savePortalContext;
821
822 PG_RE_THROW();
823 }
824 PG_END_TRY();
825
826 if (saveMemoryContext == saveTopTransactionContext)
827 MemoryContextSwitchTo(TopTransactionContext);
828 else
829 MemoryContextSwitchTo(saveMemoryContext);
830 ActivePortal = saveActivePortal;
831 if (saveResourceOwner == saveTopTransactionResourceOwner)
832 CurrentResourceOwner = TopTransactionResourceOwner;
833 else
834 CurrentResourceOwner = saveResourceOwner;
835 PortalContext = savePortalContext;
836
837 if (log_executor_stats && portal->strategy != PORTAL_MULTI_QUERY)
838 ShowUsage("EXECUTOR STATISTICS");
839
840 TRACE_POSTGRESQL_QUERY_EXECUTE_DONE();
841
842 return result;
843 }
844
845 /*
846 * PortalRunSelect
847 * Execute a portal's query in PORTAL_ONE_SELECT mode, and also
848 * when fetching from a completed holdStore in PORTAL_ONE_RETURNING,
849 * PORTAL_ONE_MOD_WITH, and PORTAL_UTIL_SELECT cases.
850 *
851 * This handles simple N-rows-forward-or-backward cases. For more complex
852 * nonsequential access to a portal, see PortalRunFetch.
853 *
854 * count <= 0 is interpreted as a no-op: the destination gets started up
855 * and shut down, but nothing else happens. Also, count == FETCH_ALL is
856 * interpreted as "all rows". (cf FetchStmt.howMany)
857 *
858 * Caller must already have validated the Portal and done appropriate
859 * setup (cf. PortalRun).
860 *
861 * Returns number of rows processed (suitable for use in result tag)
862 */
863 static uint64
PortalRunSelect(Portal portal,bool forward,long count,DestReceiver * dest)864 PortalRunSelect(Portal portal,
865 bool forward,
866 long count,
867 DestReceiver *dest)
868 {
869 QueryDesc *queryDesc;
870 ScanDirection direction;
871 uint64 nprocessed;
872
873 /*
874 * NB: queryDesc will be NULL if we are fetching from a held cursor or a
875 * completed utility query; can't use it in that path.
876 */
877 queryDesc = portal->queryDesc;
878
879 /* Caller messed up if we have neither a ready query nor held data. */
880 Assert(queryDesc || portal->holdStore);
881
882 /*
883 * Force the queryDesc destination to the right thing. This supports
884 * MOVE, for example, which will pass in dest = DestNone. This is okay to
885 * change as long as we do it on every fetch. (The Executor must not
886 * assume that dest never changes.)
887 */
888 if (queryDesc)
889 queryDesc->dest = dest;
890
891 /*
892 * Determine which direction to go in, and check to see if we're already
893 * at the end of the available tuples in that direction. If so, set the
894 * direction to NoMovement to avoid trying to fetch any tuples. (This
895 * check exists because not all plan node types are robust about being
896 * called again if they've already returned NULL once.) Then call the
897 * executor (we must not skip this, because the destination needs to see a
898 * setup and shutdown even if no tuples are available). Finally, update
899 * the portal position state depending on the number of tuples that were
900 * retrieved.
901 */
902 if (forward)
903 {
904 if (portal->atEnd || count <= 0)
905 {
906 direction = NoMovementScanDirection;
907 count = 0; /* don't pass negative count to executor */
908 }
909 else
910 direction = ForwardScanDirection;
911
912 /* In the executor, zero count processes all rows */
913 if (count == FETCH_ALL)
914 count = 0;
915
916 if (portal->holdStore)
917 nprocessed = RunFromStore(portal, direction, (uint64) count, dest);
918 else
919 {
920 PushActiveSnapshot(queryDesc->snapshot);
921 ExecutorRun(queryDesc, direction, (uint64) count,
922 portal->run_once);
923 nprocessed = queryDesc->estate->es_processed;
924 PopActiveSnapshot();
925 }
926
927 if (!ScanDirectionIsNoMovement(direction))
928 {
929 if (nprocessed > 0)
930 portal->atStart = false; /* OK to go backward now */
931 if (count == 0 || nprocessed < (uint64) count)
932 portal->atEnd = true; /* we retrieved 'em all */
933 portal->portalPos += nprocessed;
934 }
935 }
936 else
937 {
938 if (portal->cursorOptions & CURSOR_OPT_NO_SCROLL)
939 ereport(ERROR,
940 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
941 errmsg("cursor can only scan forward"),
942 errhint("Declare it with SCROLL option to enable backward scan.")));
943
944 if (portal->atStart || count <= 0)
945 {
946 direction = NoMovementScanDirection;
947 count = 0; /* don't pass negative count to executor */
948 }
949 else
950 direction = BackwardScanDirection;
951
952 /* In the executor, zero count processes all rows */
953 if (count == FETCH_ALL)
954 count = 0;
955
956 if (portal->holdStore)
957 nprocessed = RunFromStore(portal, direction, (uint64) count, dest);
958 else
959 {
960 PushActiveSnapshot(queryDesc->snapshot);
961 ExecutorRun(queryDesc, direction, (uint64) count,
962 portal->run_once);
963 nprocessed = queryDesc->estate->es_processed;
964 PopActiveSnapshot();
965 }
966
967 if (!ScanDirectionIsNoMovement(direction))
968 {
969 if (nprocessed > 0 && portal->atEnd)
970 {
971 portal->atEnd = false; /* OK to go forward now */
972 portal->portalPos++; /* adjust for endpoint case */
973 }
974 if (count == 0 || nprocessed < (uint64) count)
975 {
976 portal->atStart = true; /* we retrieved 'em all */
977 portal->portalPos = 0;
978 }
979 else
980 {
981 portal->portalPos -= nprocessed;
982 }
983 }
984 }
985
986 return nprocessed;
987 }
988
989 /*
990 * FillPortalStore
991 * Run the query and load result tuples into the portal's tuple store.
992 *
993 * This is used for PORTAL_ONE_RETURNING, PORTAL_ONE_MOD_WITH, and
994 * PORTAL_UTIL_SELECT cases only.
995 */
996 static void
FillPortalStore(Portal portal,bool isTopLevel)997 FillPortalStore(Portal portal, bool isTopLevel)
998 {
999 DestReceiver *treceiver;
1000 QueryCompletion qc;
1001
1002 InitializeQueryCompletion(&qc);
1003 PortalCreateHoldStore(portal);
1004 treceiver = CreateDestReceiver(DestTuplestore);
1005 SetTuplestoreDestReceiverParams(treceiver,
1006 portal->holdStore,
1007 portal->holdContext,
1008 false,
1009 NULL,
1010 NULL);
1011
1012 switch (portal->strategy)
1013 {
1014 case PORTAL_ONE_RETURNING:
1015 case PORTAL_ONE_MOD_WITH:
1016
1017 /*
1018 * Run the portal to completion just as for the default
1019 * PORTAL_MULTI_QUERY case, but send the primary query's output to
1020 * the tuplestore. Auxiliary query outputs are discarded. Set the
1021 * portal's holdSnapshot to the snapshot used (or a copy of it).
1022 */
1023 PortalRunMulti(portal, isTopLevel, true,
1024 treceiver, None_Receiver, &qc);
1025 break;
1026
1027 case PORTAL_UTIL_SELECT:
1028 PortalRunUtility(portal, linitial_node(PlannedStmt, portal->stmts),
1029 isTopLevel, true, treceiver, &qc);
1030 break;
1031
1032 default:
1033 elog(ERROR, "unsupported portal strategy: %d",
1034 (int) portal->strategy);
1035 break;
1036 }
1037
1038 /* Override portal completion data with actual command results */
1039 if (qc.commandTag != CMDTAG_UNKNOWN)
1040 CopyQueryCompletion(&portal->qc, &qc);
1041
1042 treceiver->rDestroy(treceiver);
1043 }
1044
1045 /*
1046 * RunFromStore
1047 * Fetch tuples from the portal's tuple store.
1048 *
1049 * Calling conventions are similar to ExecutorRun, except that we
1050 * do not depend on having a queryDesc or estate. Therefore we return the
1051 * number of tuples processed as the result, not in estate->es_processed.
1052 *
1053 * One difference from ExecutorRun is that the destination receiver functions
1054 * are run in the caller's memory context (since we have no estate). Watch
1055 * out for memory leaks.
1056 */
1057 static uint64
RunFromStore(Portal portal,ScanDirection direction,uint64 count,DestReceiver * dest)1058 RunFromStore(Portal portal, ScanDirection direction, uint64 count,
1059 DestReceiver *dest)
1060 {
1061 uint64 current_tuple_count = 0;
1062 TupleTableSlot *slot;
1063
1064 slot = MakeSingleTupleTableSlot(portal->tupDesc, &TTSOpsMinimalTuple);
1065
1066 dest->rStartup(dest, CMD_SELECT, portal->tupDesc);
1067
1068 if (ScanDirectionIsNoMovement(direction))
1069 {
1070 /* do nothing except start/stop the destination */
1071 }
1072 else
1073 {
1074 bool forward = ScanDirectionIsForward(direction);
1075
1076 for (;;)
1077 {
1078 MemoryContext oldcontext;
1079 bool ok;
1080
1081 oldcontext = MemoryContextSwitchTo(portal->holdContext);
1082
1083 ok = tuplestore_gettupleslot(portal->holdStore, forward, false,
1084 slot);
1085
1086 MemoryContextSwitchTo(oldcontext);
1087
1088 if (!ok)
1089 break;
1090
1091 /*
1092 * If we are not able to send the tuple, we assume the destination
1093 * has closed and no more tuples can be sent. If that's the case,
1094 * end the loop.
1095 */
1096 if (!dest->receiveSlot(slot, dest))
1097 break;
1098
1099 ExecClearTuple(slot);
1100
1101 /*
1102 * check our tuple count.. if we've processed the proper number
1103 * then quit, else loop again and process more tuples. Zero count
1104 * means no limit.
1105 */
1106 current_tuple_count++;
1107 if (count && count == current_tuple_count)
1108 break;
1109 }
1110 }
1111
1112 dest->rShutdown(dest);
1113
1114 ExecDropSingleTupleTableSlot(slot);
1115
1116 return current_tuple_count;
1117 }
1118
1119 /*
1120 * PortalRunUtility
1121 * Execute a utility statement inside a portal.
1122 */
1123 static void
PortalRunUtility(Portal portal,PlannedStmt * pstmt,bool isTopLevel,bool setHoldSnapshot,DestReceiver * dest,QueryCompletion * qc)1124 PortalRunUtility(Portal portal, PlannedStmt *pstmt,
1125 bool isTopLevel, bool setHoldSnapshot,
1126 DestReceiver *dest, QueryCompletion *qc)
1127 {
1128 /*
1129 * Set snapshot if utility stmt needs one.
1130 */
1131 if (PlannedStmtRequiresSnapshot(pstmt))
1132 {
1133 Snapshot snapshot = GetTransactionSnapshot();
1134
1135 /* If told to, register the snapshot we're using and save in portal */
1136 if (setHoldSnapshot)
1137 {
1138 snapshot = RegisterSnapshot(snapshot);
1139 portal->holdSnapshot = snapshot;
1140 }
1141
1142 /*
1143 * In any case, make the snapshot active and remember it in portal.
1144 * Because the portal now references the snapshot, we must tell
1145 * snapmgr.c that the snapshot belongs to the portal's transaction
1146 * level, else we risk portalSnapshot becoming a dangling pointer.
1147 */
1148 PushActiveSnapshotWithLevel(snapshot, portal->createLevel);
1149 /* PushActiveSnapshotWithLevel might have copied the snapshot */
1150 portal->portalSnapshot = GetActiveSnapshot();
1151 }
1152 else
1153 portal->portalSnapshot = NULL;
1154
1155 ProcessUtility(pstmt,
1156 portal->sourceText,
1157 (portal->cplan != NULL), /* protect tree if in plancache */
1158 isTopLevel ? PROCESS_UTILITY_TOPLEVEL : PROCESS_UTILITY_QUERY,
1159 portal->portalParams,
1160 portal->queryEnv,
1161 dest,
1162 qc);
1163
1164 /* Some utility statements may change context on us */
1165 MemoryContextSwitchTo(portal->portalContext);
1166
1167 /*
1168 * Some utility commands (e.g., VACUUM) pop the ActiveSnapshot stack from
1169 * under us, so don't complain if it's now empty. Otherwise, our snapshot
1170 * should be the top one; pop it. Note that this could be a different
1171 * snapshot from the one we made above; see EnsurePortalSnapshotExists.
1172 */
1173 if (portal->portalSnapshot != NULL && ActiveSnapshotSet())
1174 {
1175 Assert(portal->portalSnapshot == GetActiveSnapshot());
1176 PopActiveSnapshot();
1177 }
1178 portal->portalSnapshot = NULL;
1179 }
1180
1181 /*
1182 * PortalRunMulti
1183 * Execute a portal's queries in the general case (multi queries
1184 * or non-SELECT-like queries)
1185 */
1186 static void
PortalRunMulti(Portal portal,bool isTopLevel,bool setHoldSnapshot,DestReceiver * dest,DestReceiver * altdest,QueryCompletion * qc)1187 PortalRunMulti(Portal portal,
1188 bool isTopLevel, bool setHoldSnapshot,
1189 DestReceiver *dest, DestReceiver *altdest,
1190 QueryCompletion *qc)
1191 {
1192 bool active_snapshot_set = false;
1193 ListCell *stmtlist_item;
1194
1195 /*
1196 * If the destination is DestRemoteExecute, change to DestNone. The
1197 * reason is that the client won't be expecting any tuples, and indeed has
1198 * no way to know what they are, since there is no provision for Describe
1199 * to send a RowDescription message when this portal execution strategy is
1200 * in effect. This presently will only affect SELECT commands added to
1201 * non-SELECT queries by rewrite rules: such commands will be executed,
1202 * but the results will be discarded unless you use "simple Query"
1203 * protocol.
1204 */
1205 if (dest->mydest == DestRemoteExecute)
1206 dest = None_Receiver;
1207 if (altdest->mydest == DestRemoteExecute)
1208 altdest = None_Receiver;
1209
1210 /*
1211 * Loop to handle the individual queries generated from a single parsetree
1212 * by analysis and rewrite.
1213 */
1214 foreach(stmtlist_item, portal->stmts)
1215 {
1216 PlannedStmt *pstmt = lfirst_node(PlannedStmt, stmtlist_item);
1217
1218 /*
1219 * If we got a cancel signal in prior command, quit
1220 */
1221 CHECK_FOR_INTERRUPTS();
1222
1223 if (pstmt->utilityStmt == NULL)
1224 {
1225 /*
1226 * process a plannable query.
1227 */
1228 TRACE_POSTGRESQL_QUERY_EXECUTE_START();
1229
1230 if (log_executor_stats)
1231 ResetUsage();
1232
1233 /*
1234 * Must always have a snapshot for plannable queries. First time
1235 * through, take a new snapshot; for subsequent queries in the
1236 * same portal, just update the snapshot's copy of the command
1237 * counter.
1238 */
1239 if (!active_snapshot_set)
1240 {
1241 Snapshot snapshot = GetTransactionSnapshot();
1242
1243 /* If told to, register the snapshot and save in portal */
1244 if (setHoldSnapshot)
1245 {
1246 snapshot = RegisterSnapshot(snapshot);
1247 portal->holdSnapshot = snapshot;
1248 }
1249
1250 /*
1251 * We can't have the holdSnapshot also be the active one,
1252 * because UpdateActiveSnapshotCommandId would complain. So
1253 * force an extra snapshot copy. Plain PushActiveSnapshot
1254 * would have copied the transaction snapshot anyway, so this
1255 * only adds a copy step when setHoldSnapshot is true. (It's
1256 * okay for the command ID of the active snapshot to diverge
1257 * from what holdSnapshot has.)
1258 */
1259 PushCopiedSnapshot(snapshot);
1260
1261 /*
1262 * As for PORTAL_ONE_SELECT portals, it does not seem
1263 * necessary to maintain portal->portalSnapshot here.
1264 */
1265
1266 active_snapshot_set = true;
1267 }
1268 else
1269 UpdateActiveSnapshotCommandId();
1270
1271 if (pstmt->canSetTag)
1272 {
1273 /* statement can set tag string */
1274 ProcessQuery(pstmt,
1275 portal->sourceText,
1276 portal->portalParams,
1277 portal->queryEnv,
1278 dest, qc);
1279 }
1280 else
1281 {
1282 /* stmt added by rewrite cannot set tag */
1283 ProcessQuery(pstmt,
1284 portal->sourceText,
1285 portal->portalParams,
1286 portal->queryEnv,
1287 altdest, NULL);
1288 }
1289
1290 if (log_executor_stats)
1291 ShowUsage("EXECUTOR STATISTICS");
1292
1293 TRACE_POSTGRESQL_QUERY_EXECUTE_DONE();
1294 }
1295 else
1296 {
1297 /*
1298 * process utility functions (create, destroy, etc..)
1299 *
1300 * We must not set a snapshot here for utility commands (if one is
1301 * needed, PortalRunUtility will do it). If a utility command is
1302 * alone in a portal then everything's fine. The only case where
1303 * a utility command can be part of a longer list is that rules
1304 * are allowed to include NotifyStmt. NotifyStmt doesn't care
1305 * whether it has a snapshot or not, so we just leave the current
1306 * snapshot alone if we have one.
1307 */
1308 if (pstmt->canSetTag)
1309 {
1310 Assert(!active_snapshot_set);
1311 /* statement can set tag string */
1312 PortalRunUtility(portal, pstmt, isTopLevel, false,
1313 dest, qc);
1314 }
1315 else
1316 {
1317 Assert(IsA(pstmt->utilityStmt, NotifyStmt));
1318 /* stmt added by rewrite cannot set tag */
1319 PortalRunUtility(portal, pstmt, isTopLevel, false,
1320 altdest, NULL);
1321 }
1322 }
1323
1324 /*
1325 * Clear subsidiary contexts to recover temporary memory.
1326 */
1327 Assert(portal->portalContext == CurrentMemoryContext);
1328
1329 MemoryContextDeleteChildren(portal->portalContext);
1330
1331 /*
1332 * Avoid crashing if portal->stmts has been reset. This can only
1333 * occur if a CALL or DO utility statement executed an internal
1334 * COMMIT/ROLLBACK (cf PortalReleaseCachedPlan). The CALL or DO must
1335 * have been the only statement in the portal, so there's nothing left
1336 * for us to do; but we don't want to dereference a now-dangling list
1337 * pointer.
1338 */
1339 if (portal->stmts == NIL)
1340 break;
1341
1342 /*
1343 * Increment command counter between queries, but not after the last
1344 * one.
1345 */
1346 if (lnext(portal->stmts, stmtlist_item) != NULL)
1347 CommandCounterIncrement();
1348 }
1349
1350 /* Pop the snapshot if we pushed one. */
1351 if (active_snapshot_set)
1352 PopActiveSnapshot();
1353
1354 /*
1355 * If a query completion data was supplied, use it. Otherwise use the
1356 * portal's query completion data.
1357 *
1358 * Exception: Clients expect INSERT/UPDATE/DELETE tags to have counts, so
1359 * fake them with zeros. This can happen with DO INSTEAD rules if there
1360 * is no replacement query of the same type as the original. We print "0
1361 * 0" here because technically there is no query of the matching tag type,
1362 * and printing a non-zero count for a different query type seems wrong,
1363 * e.g. an INSERT that does an UPDATE instead should not print "0 1" if
1364 * one row was updated. See QueryRewrite(), step 3, for details.
1365 */
1366 if (qc && qc->commandTag == CMDTAG_UNKNOWN)
1367 {
1368 if (portal->qc.commandTag != CMDTAG_UNKNOWN)
1369 CopyQueryCompletion(qc, &portal->qc);
1370 /* If the caller supplied a qc, we should have set it by now. */
1371 Assert(qc->commandTag != CMDTAG_UNKNOWN);
1372 }
1373 }
1374
1375 /*
1376 * PortalRunFetch
1377 * Variant form of PortalRun that supports SQL FETCH directions.
1378 *
1379 * Note: we presently assume that no callers of this want isTopLevel = true.
1380 *
1381 * count <= 0 is interpreted as a no-op: the destination gets started up
1382 * and shut down, but nothing else happens. Also, count == FETCH_ALL is
1383 * interpreted as "all rows". (cf FetchStmt.howMany)
1384 *
1385 * Returns number of rows processed (suitable for use in result tag)
1386 */
1387 uint64
PortalRunFetch(Portal portal,FetchDirection fdirection,long count,DestReceiver * dest)1388 PortalRunFetch(Portal portal,
1389 FetchDirection fdirection,
1390 long count,
1391 DestReceiver *dest)
1392 {
1393 uint64 result;
1394 Portal saveActivePortal;
1395 ResourceOwner saveResourceOwner;
1396 MemoryContext savePortalContext;
1397 MemoryContext oldContext;
1398
1399 AssertArg(PortalIsValid(portal));
1400
1401 /*
1402 * Check for improper portal use, and mark portal active.
1403 */
1404 MarkPortalActive(portal);
1405
1406 /* If supporting FETCH, portal can't be run-once. */
1407 Assert(!portal->run_once);
1408
1409 /*
1410 * Set up global portal context pointers.
1411 */
1412 saveActivePortal = ActivePortal;
1413 saveResourceOwner = CurrentResourceOwner;
1414 savePortalContext = PortalContext;
1415 PG_TRY();
1416 {
1417 ActivePortal = portal;
1418 if (portal->resowner)
1419 CurrentResourceOwner = portal->resowner;
1420 PortalContext = portal->portalContext;
1421
1422 oldContext = MemoryContextSwitchTo(PortalContext);
1423
1424 switch (portal->strategy)
1425 {
1426 case PORTAL_ONE_SELECT:
1427 result = DoPortalRunFetch(portal, fdirection, count, dest);
1428 break;
1429
1430 case PORTAL_ONE_RETURNING:
1431 case PORTAL_ONE_MOD_WITH:
1432 case PORTAL_UTIL_SELECT:
1433
1434 /*
1435 * If we have not yet run the command, do so, storing its
1436 * results in the portal's tuplestore.
1437 */
1438 if (!portal->holdStore)
1439 FillPortalStore(portal, false /* isTopLevel */ );
1440
1441 /*
1442 * Now fetch desired portion of results.
1443 */
1444 result = DoPortalRunFetch(portal, fdirection, count, dest);
1445 break;
1446
1447 default:
1448 elog(ERROR, "unsupported portal strategy");
1449 result = 0; /* keep compiler quiet */
1450 break;
1451 }
1452 }
1453 PG_CATCH();
1454 {
1455 /* Uncaught error while executing portal: mark it dead */
1456 MarkPortalFailed(portal);
1457
1458 /* Restore global vars and propagate error */
1459 ActivePortal = saveActivePortal;
1460 CurrentResourceOwner = saveResourceOwner;
1461 PortalContext = savePortalContext;
1462
1463 PG_RE_THROW();
1464 }
1465 PG_END_TRY();
1466
1467 MemoryContextSwitchTo(oldContext);
1468
1469 /* Mark portal not active */
1470 portal->status = PORTAL_READY;
1471
1472 ActivePortal = saveActivePortal;
1473 CurrentResourceOwner = saveResourceOwner;
1474 PortalContext = savePortalContext;
1475
1476 return result;
1477 }
1478
1479 /*
1480 * DoPortalRunFetch
1481 * Guts of PortalRunFetch --- the portal context is already set up
1482 *
1483 * Here, count < 0 typically reverses the direction. Also, count == FETCH_ALL
1484 * is interpreted as "all rows". (cf FetchStmt.howMany)
1485 *
1486 * Returns number of rows processed (suitable for use in result tag)
1487 */
1488 static uint64
DoPortalRunFetch(Portal portal,FetchDirection fdirection,long count,DestReceiver * dest)1489 DoPortalRunFetch(Portal portal,
1490 FetchDirection fdirection,
1491 long count,
1492 DestReceiver *dest)
1493 {
1494 bool forward;
1495
1496 Assert(portal->strategy == PORTAL_ONE_SELECT ||
1497 portal->strategy == PORTAL_ONE_RETURNING ||
1498 portal->strategy == PORTAL_ONE_MOD_WITH ||
1499 portal->strategy == PORTAL_UTIL_SELECT);
1500
1501 /*
1502 * Note: we disallow backwards fetch (including re-fetch of current row)
1503 * for NO SCROLL cursors, but we interpret that very loosely: you can use
1504 * any of the FetchDirection options, so long as the end result is to move
1505 * forwards by at least one row. Currently it's sufficient to check for
1506 * NO SCROLL in DoPortalRewind() and in the forward == false path in
1507 * PortalRunSelect(); but someday we might prefer to account for that
1508 * restriction explicitly here.
1509 */
1510 switch (fdirection)
1511 {
1512 case FETCH_FORWARD:
1513 if (count < 0)
1514 {
1515 fdirection = FETCH_BACKWARD;
1516 count = -count;
1517 }
1518 /* fall out of switch to share code with FETCH_BACKWARD */
1519 break;
1520 case FETCH_BACKWARD:
1521 if (count < 0)
1522 {
1523 fdirection = FETCH_FORWARD;
1524 count = -count;
1525 }
1526 /* fall out of switch to share code with FETCH_FORWARD */
1527 break;
1528 case FETCH_ABSOLUTE:
1529 if (count > 0)
1530 {
1531 /*
1532 * Definition: Rewind to start, advance count-1 rows, return
1533 * next row (if any).
1534 *
1535 * In practice, if the goal is less than halfway back to the
1536 * start, it's better to scan from where we are.
1537 *
1538 * Also, if current portalPos is outside the range of "long",
1539 * do it the hard way to avoid possible overflow of the count
1540 * argument to PortalRunSelect. We must exclude exactly
1541 * LONG_MAX, as well, lest the count look like FETCH_ALL.
1542 *
1543 * In any case, we arrange to fetch the target row going
1544 * forwards.
1545 */
1546 if ((uint64) (count - 1) <= portal->portalPos / 2 ||
1547 portal->portalPos >= (uint64) LONG_MAX)
1548 {
1549 DoPortalRewind(portal);
1550 if (count > 1)
1551 PortalRunSelect(portal, true, count - 1,
1552 None_Receiver);
1553 }
1554 else
1555 {
1556 long pos = (long) portal->portalPos;
1557
1558 if (portal->atEnd)
1559 pos++; /* need one extra fetch if off end */
1560 if (count <= pos)
1561 PortalRunSelect(portal, false, pos - count + 1,
1562 None_Receiver);
1563 else if (count > pos + 1)
1564 PortalRunSelect(portal, true, count - pos - 1,
1565 None_Receiver);
1566 }
1567 return PortalRunSelect(portal, true, 1L, dest);
1568 }
1569 else if (count < 0)
1570 {
1571 /*
1572 * Definition: Advance to end, back up abs(count)-1 rows,
1573 * return prior row (if any). We could optimize this if we
1574 * knew in advance where the end was, but typically we won't.
1575 * (Is it worth considering case where count > half of size of
1576 * query? We could rewind once we know the size ...)
1577 */
1578 PortalRunSelect(portal, true, FETCH_ALL, None_Receiver);
1579 if (count < -1)
1580 PortalRunSelect(portal, false, -count - 1, None_Receiver);
1581 return PortalRunSelect(portal, false, 1L, dest);
1582 }
1583 else
1584 {
1585 /* count == 0 */
1586 /* Rewind to start, return zero rows */
1587 DoPortalRewind(portal);
1588 return PortalRunSelect(portal, true, 0L, dest);
1589 }
1590 break;
1591 case FETCH_RELATIVE:
1592 if (count > 0)
1593 {
1594 /*
1595 * Definition: advance count-1 rows, return next row (if any).
1596 */
1597 if (count > 1)
1598 PortalRunSelect(portal, true, count - 1, None_Receiver);
1599 return PortalRunSelect(portal, true, 1L, dest);
1600 }
1601 else if (count < 0)
1602 {
1603 /*
1604 * Definition: back up abs(count)-1 rows, return prior row (if
1605 * any).
1606 */
1607 if (count < -1)
1608 PortalRunSelect(portal, false, -count - 1, None_Receiver);
1609 return PortalRunSelect(portal, false, 1L, dest);
1610 }
1611 else
1612 {
1613 /* count == 0 */
1614 /* Same as FETCH FORWARD 0, so fall out of switch */
1615 fdirection = FETCH_FORWARD;
1616 }
1617 break;
1618 default:
1619 elog(ERROR, "bogus direction");
1620 break;
1621 }
1622
1623 /*
1624 * Get here with fdirection == FETCH_FORWARD or FETCH_BACKWARD, and count
1625 * >= 0.
1626 */
1627 forward = (fdirection == FETCH_FORWARD);
1628
1629 /*
1630 * Zero count means to re-fetch the current row, if any (per SQL)
1631 */
1632 if (count == 0)
1633 {
1634 bool on_row;
1635
1636 /* Are we sitting on a row? */
1637 on_row = (!portal->atStart && !portal->atEnd);
1638
1639 if (dest->mydest == DestNone)
1640 {
1641 /* MOVE 0 returns 0/1 based on if FETCH 0 would return a row */
1642 return on_row ? 1 : 0;
1643 }
1644 else
1645 {
1646 /*
1647 * If we are sitting on a row, back up one so we can re-fetch it.
1648 * If we are not sitting on a row, we still have to start up and
1649 * shut down the executor so that the destination is initialized
1650 * and shut down correctly; so keep going. To PortalRunSelect,
1651 * count == 0 means we will retrieve no row.
1652 */
1653 if (on_row)
1654 {
1655 PortalRunSelect(portal, false, 1L, None_Receiver);
1656 /* Set up to fetch one row forward */
1657 count = 1;
1658 forward = true;
1659 }
1660 }
1661 }
1662
1663 /*
1664 * Optimize MOVE BACKWARD ALL into a Rewind.
1665 */
1666 if (!forward && count == FETCH_ALL && dest->mydest == DestNone)
1667 {
1668 uint64 result = portal->portalPos;
1669
1670 if (result > 0 && !portal->atEnd)
1671 result--;
1672 DoPortalRewind(portal);
1673 return result;
1674 }
1675
1676 return PortalRunSelect(portal, forward, count, dest);
1677 }
1678
1679 /*
1680 * DoPortalRewind - rewind a Portal to starting point
1681 */
1682 static void
DoPortalRewind(Portal portal)1683 DoPortalRewind(Portal portal)
1684 {
1685 QueryDesc *queryDesc;
1686
1687 /*
1688 * No work is needed if we've not advanced nor attempted to advance the
1689 * cursor (and we don't want to throw a NO SCROLL error in this case).
1690 */
1691 if (portal->atStart && !portal->atEnd)
1692 return;
1693
1694 /*
1695 * Otherwise, cursor should allow scrolling. However, we're only going to
1696 * enforce that policy fully beginning in v15. In older branches, insist
1697 * on this only if the portal has a holdStore. That prevents users from
1698 * seeing that the holdStore may not have all the rows of the query.
1699 */
1700 if ((portal->cursorOptions & CURSOR_OPT_NO_SCROLL) && portal->holdStore)
1701 ereport(ERROR,
1702 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1703 errmsg("cursor can only scan forward"),
1704 errhint("Declare it with SCROLL option to enable backward scan.")));
1705
1706 /* Rewind holdStore, if we have one */
1707 if (portal->holdStore)
1708 {
1709 MemoryContext oldcontext;
1710
1711 oldcontext = MemoryContextSwitchTo(portal->holdContext);
1712 tuplestore_rescan(portal->holdStore);
1713 MemoryContextSwitchTo(oldcontext);
1714 }
1715
1716 /* Rewind executor, if active */
1717 queryDesc = portal->queryDesc;
1718 if (queryDesc)
1719 {
1720 PushActiveSnapshot(queryDesc->snapshot);
1721 ExecutorRewind(queryDesc);
1722 PopActiveSnapshot();
1723 }
1724
1725 portal->atStart = true;
1726 portal->atEnd = false;
1727 portal->portalPos = 0;
1728 }
1729
1730 /*
1731 * PlannedStmtRequiresSnapshot - what it says on the tin
1732 */
1733 bool
PlannedStmtRequiresSnapshot(PlannedStmt * pstmt)1734 PlannedStmtRequiresSnapshot(PlannedStmt *pstmt)
1735 {
1736 Node *utilityStmt = pstmt->utilityStmt;
1737
1738 /* If it's not a utility statement, it definitely needs a snapshot */
1739 if (utilityStmt == NULL)
1740 return true;
1741
1742 /*
1743 * Most utility statements need a snapshot, and the default presumption
1744 * about new ones should be that they do too. Hence, enumerate those that
1745 * do not need one.
1746 *
1747 * Transaction control, LOCK, and SET must *not* set a snapshot, since
1748 * they need to be executable at the start of a transaction-snapshot-mode
1749 * transaction without freezing a snapshot. By extension we allow SHOW
1750 * not to set a snapshot. The other stmts listed are just efficiency
1751 * hacks. Beware of listing anything that can modify the database --- if,
1752 * say, it has to update an index with expressions that invoke
1753 * user-defined functions, then it had better have a snapshot.
1754 */
1755 if (IsA(utilityStmt, TransactionStmt) ||
1756 IsA(utilityStmt, LockStmt) ||
1757 IsA(utilityStmt, VariableSetStmt) ||
1758 IsA(utilityStmt, VariableShowStmt) ||
1759 IsA(utilityStmt, ConstraintsSetStmt) ||
1760 /* efficiency hacks from here down */
1761 IsA(utilityStmt, FetchStmt) ||
1762 IsA(utilityStmt, ListenStmt) ||
1763 IsA(utilityStmt, NotifyStmt) ||
1764 IsA(utilityStmt, UnlistenStmt) ||
1765 IsA(utilityStmt, CheckPointStmt))
1766 return false;
1767
1768 return true;
1769 }
1770
1771 /*
1772 * EnsurePortalSnapshotExists - recreate Portal-level snapshot, if needed
1773 *
1774 * Generally, we will have an active snapshot whenever we are executing
1775 * inside a Portal, unless the Portal's query is one of the utility
1776 * statements exempted from that rule (see PlannedStmtRequiresSnapshot).
1777 * However, procedures and DO blocks can commit or abort the transaction,
1778 * and thereby destroy all snapshots. This function can be called to
1779 * re-establish the Portal-level snapshot when none exists.
1780 */
1781 void
EnsurePortalSnapshotExists(void)1782 EnsurePortalSnapshotExists(void)
1783 {
1784 Portal portal;
1785
1786 /*
1787 * Nothing to do if a snapshot is set. (We take it on faith that the
1788 * outermost active snapshot belongs to some Portal; or if there is no
1789 * Portal, it's somebody else's responsibility to manage things.)
1790 */
1791 if (ActiveSnapshotSet())
1792 return;
1793
1794 /* Otherwise, we'd better have an active Portal */
1795 portal = ActivePortal;
1796 if (unlikely(portal == NULL))
1797 elog(ERROR, "cannot execute SQL without an outer snapshot or portal");
1798 Assert(portal->portalSnapshot == NULL);
1799
1800 /*
1801 * Create a new snapshot, make it active, and remember it in portal.
1802 * Because the portal now references the snapshot, we must tell snapmgr.c
1803 * that the snapshot belongs to the portal's transaction level, else we
1804 * risk portalSnapshot becoming a dangling pointer.
1805 */
1806 PushActiveSnapshotWithLevel(GetTransactionSnapshot(), portal->createLevel);
1807 /* PushActiveSnapshotWithLevel might have copied the snapshot */
1808 portal->portalSnapshot = GetActiveSnapshot();
1809 }
1810