1 /*-------------------------------------------------------------------------
2 *
3 * pquery.c
4 * POSTGRES process query command code
5 *
6 * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/tcop/pquery.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16 #include "postgres.h"
17
18 #include <limits.h>
19
20 #include "access/xact.h"
21 #include "commands/prepare.h"
22 #include "executor/tstoreReceiver.h"
23 #include "miscadmin.h"
24 #include "pg_trace.h"
25 #include "tcop/pquery.h"
26 #include "tcop/utility.h"
27 #include "utils/memutils.h"
28 #include "utils/snapmgr.h"
29
30
31 /*
32 * ActivePortal is the currently executing Portal (the most closely nested,
33 * if there are several).
34 */
35 Portal ActivePortal = NULL;
36
37
38 static void ProcessQuery(PlannedStmt *plan,
39 const char *sourceText,
40 ParamListInfo params,
41 QueryEnvironment *queryEnv,
42 DestReceiver *dest,
43 char *completionTag);
44 static void FillPortalStore(Portal portal, bool isTopLevel);
45 static uint64 RunFromStore(Portal portal, ScanDirection direction, uint64 count,
46 DestReceiver *dest);
47 static uint64 PortalRunSelect(Portal portal, bool forward, long count,
48 DestReceiver *dest);
49 static void PortalRunUtility(Portal portal, PlannedStmt *pstmt,
50 bool isTopLevel, bool setHoldSnapshot,
51 DestReceiver *dest, char *completionTag);
52 static void PortalRunMulti(Portal portal,
53 bool isTopLevel, bool setHoldSnapshot,
54 DestReceiver *dest, DestReceiver *altdest,
55 char *completionTag);
56 static uint64 DoPortalRunFetch(Portal portal,
57 FetchDirection fdirection,
58 long count,
59 DestReceiver *dest);
60 static void DoPortalRewind(Portal portal);
61
62
63 /*
64 * CreateQueryDesc
65 */
66 QueryDesc *
CreateQueryDesc(PlannedStmt * plannedstmt,const char * sourceText,Snapshot snapshot,Snapshot crosscheck_snapshot,DestReceiver * dest,ParamListInfo params,QueryEnvironment * queryEnv,int instrument_options)67 CreateQueryDesc(PlannedStmt *plannedstmt,
68 const char *sourceText,
69 Snapshot snapshot,
70 Snapshot crosscheck_snapshot,
71 DestReceiver *dest,
72 ParamListInfo params,
73 QueryEnvironment *queryEnv,
74 int instrument_options)
75 {
76 QueryDesc *qd = (QueryDesc *) palloc(sizeof(QueryDesc));
77
78 qd->operation = plannedstmt->commandType; /* operation */
79 qd->plannedstmt = plannedstmt; /* plan */
80 qd->sourceText = sourceText; /* query text */
81 qd->snapshot = RegisterSnapshot(snapshot); /* snapshot */
82 /* RI check snapshot */
83 qd->crosscheck_snapshot = RegisterSnapshot(crosscheck_snapshot);
84 qd->dest = dest; /* output dest */
85 qd->params = params; /* parameter values passed into query */
86 qd->queryEnv = queryEnv;
87 qd->instrument_options = instrument_options; /* instrumentation wanted? */
88
89 /* null these fields until set by ExecutorStart */
90 qd->tupDesc = NULL;
91 qd->estate = NULL;
92 qd->planstate = NULL;
93 qd->totaltime = NULL;
94
95 /* not yet executed */
96 qd->already_executed = false;
97
98 return qd;
99 }
100
101 /*
102 * FreeQueryDesc
103 */
104 void
FreeQueryDesc(QueryDesc * qdesc)105 FreeQueryDesc(QueryDesc *qdesc)
106 {
107 /* Can't be a live query */
108 Assert(qdesc->estate == NULL);
109
110 /* forget our snapshots */
111 UnregisterSnapshot(qdesc->snapshot);
112 UnregisterSnapshot(qdesc->crosscheck_snapshot);
113
114 /* Only the QueryDesc itself need be freed */
115 pfree(qdesc);
116 }
117
118
119 /*
120 * ProcessQuery
121 * Execute a single plannable query within a PORTAL_MULTI_QUERY,
122 * PORTAL_ONE_RETURNING, or PORTAL_ONE_MOD_WITH portal
123 *
124 * plan: the plan tree for the query
125 * sourceText: the source text of the query
126 * params: any parameters needed
127 * dest: where to send results
128 * completionTag: points to a buffer of size COMPLETION_TAG_BUFSIZE
129 * in which to store a command completion status string.
130 *
131 * completionTag may be NULL if caller doesn't want a status string.
132 *
133 * Must be called in a memory context that will be reset or deleted on
134 * error; otherwise the executor's memory usage will be leaked.
135 */
136 static void
ProcessQuery(PlannedStmt * plan,const char * sourceText,ParamListInfo params,QueryEnvironment * queryEnv,DestReceiver * dest,char * completionTag)137 ProcessQuery(PlannedStmt *plan,
138 const char *sourceText,
139 ParamListInfo params,
140 QueryEnvironment *queryEnv,
141 DestReceiver *dest,
142 char *completionTag)
143 {
144 QueryDesc *queryDesc;
145
146 /*
147 * Create the QueryDesc object
148 */
149 queryDesc = CreateQueryDesc(plan, sourceText,
150 GetActiveSnapshot(), InvalidSnapshot,
151 dest, params, queryEnv, 0);
152
153 /*
154 * Call ExecutorStart to prepare the plan for execution
155 */
156 ExecutorStart(queryDesc, 0);
157
158 /*
159 * Run the plan to completion.
160 */
161 ExecutorRun(queryDesc, ForwardScanDirection, 0L, true);
162
163 /*
164 * Build command completion status string, if caller wants one.
165 */
166 if (completionTag)
167 {
168 Oid lastOid;
169
170 switch (queryDesc->operation)
171 {
172 case CMD_SELECT:
173 snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
174 "SELECT " UINT64_FORMAT,
175 queryDesc->estate->es_processed);
176 break;
177 case CMD_INSERT:
178 if (queryDesc->estate->es_processed == 1)
179 lastOid = queryDesc->estate->es_lastoid;
180 else
181 lastOid = InvalidOid;
182 snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
183 "INSERT %u " UINT64_FORMAT,
184 lastOid, queryDesc->estate->es_processed);
185 break;
186 case CMD_UPDATE:
187 snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
188 "UPDATE " UINT64_FORMAT,
189 queryDesc->estate->es_processed);
190 break;
191 case CMD_DELETE:
192 snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
193 "DELETE " UINT64_FORMAT,
194 queryDesc->estate->es_processed);
195 break;
196 default:
197 strcpy(completionTag, "???");
198 break;
199 }
200 }
201
202 /*
203 * Now, we close down all the scans and free allocated resources.
204 */
205 ExecutorFinish(queryDesc);
206 ExecutorEnd(queryDesc);
207
208 FreeQueryDesc(queryDesc);
209 }
210
211 /*
212 * ChoosePortalStrategy
213 * Select portal execution strategy given the intended statement list.
214 *
215 * The list elements can be Querys or PlannedStmts.
216 * That's more general than portals need, but plancache.c uses this too.
217 *
218 * See the comments in portal.h.
219 */
220 PortalStrategy
ChoosePortalStrategy(List * stmts)221 ChoosePortalStrategy(List *stmts)
222 {
223 int nSetTag;
224 ListCell *lc;
225
226 /*
227 * PORTAL_ONE_SELECT and PORTAL_UTIL_SELECT need only consider the
228 * single-statement case, since there are no rewrite rules that can add
229 * auxiliary queries to a SELECT or a utility command. PORTAL_ONE_MOD_WITH
230 * likewise allows only one top-level statement.
231 */
232 if (list_length(stmts) == 1)
233 {
234 Node *stmt = (Node *) linitial(stmts);
235
236 if (IsA(stmt, Query))
237 {
238 Query *query = (Query *) stmt;
239
240 if (query->canSetTag)
241 {
242 if (query->commandType == CMD_SELECT)
243 {
244 if (query->hasModifyingCTE)
245 return PORTAL_ONE_MOD_WITH;
246 else
247 return PORTAL_ONE_SELECT;
248 }
249 if (query->commandType == CMD_UTILITY)
250 {
251 if (UtilityReturnsTuples(query->utilityStmt))
252 return PORTAL_UTIL_SELECT;
253 /* it can't be ONE_RETURNING, so give up */
254 return PORTAL_MULTI_QUERY;
255 }
256 }
257 }
258 else if (IsA(stmt, PlannedStmt))
259 {
260 PlannedStmt *pstmt = (PlannedStmt *) stmt;
261
262 if (pstmt->canSetTag)
263 {
264 if (pstmt->commandType == CMD_SELECT)
265 {
266 if (pstmt->hasModifyingCTE)
267 return PORTAL_ONE_MOD_WITH;
268 else
269 return PORTAL_ONE_SELECT;
270 }
271 if (pstmt->commandType == CMD_UTILITY)
272 {
273 if (UtilityReturnsTuples(pstmt->utilityStmt))
274 return PORTAL_UTIL_SELECT;
275 /* it can't be ONE_RETURNING, so give up */
276 return PORTAL_MULTI_QUERY;
277 }
278 }
279 }
280 else
281 elog(ERROR, "unrecognized node type: %d", (int) nodeTag(stmt));
282 }
283
284 /*
285 * PORTAL_ONE_RETURNING has to allow auxiliary queries added by rewrite.
286 * Choose PORTAL_ONE_RETURNING if there is exactly one canSetTag query and
287 * it has a RETURNING list.
288 */
289 nSetTag = 0;
290 foreach(lc, stmts)
291 {
292 Node *stmt = (Node *) lfirst(lc);
293
294 if (IsA(stmt, Query))
295 {
296 Query *query = (Query *) stmt;
297
298 if (query->canSetTag)
299 {
300 if (++nSetTag > 1)
301 return PORTAL_MULTI_QUERY; /* no need to look further */
302 if (query->commandType == CMD_UTILITY ||
303 query->returningList == NIL)
304 return PORTAL_MULTI_QUERY; /* no need to look further */
305 }
306 }
307 else if (IsA(stmt, PlannedStmt))
308 {
309 PlannedStmt *pstmt = (PlannedStmt *) stmt;
310
311 if (pstmt->canSetTag)
312 {
313 if (++nSetTag > 1)
314 return PORTAL_MULTI_QUERY; /* no need to look further */
315 if (pstmt->commandType == CMD_UTILITY ||
316 !pstmt->hasReturning)
317 return PORTAL_MULTI_QUERY; /* no need to look further */
318 }
319 }
320 else
321 elog(ERROR, "unrecognized node type: %d", (int) nodeTag(stmt));
322 }
323 if (nSetTag == 1)
324 return PORTAL_ONE_RETURNING;
325
326 /* Else, it's the general case... */
327 return PORTAL_MULTI_QUERY;
328 }
329
330 /*
331 * FetchPortalTargetList
332 * Given a portal that returns tuples, extract the query targetlist.
333 * Returns NIL if the portal doesn't have a determinable targetlist.
334 *
335 * Note: do not modify the result.
336 */
337 List *
FetchPortalTargetList(Portal portal)338 FetchPortalTargetList(Portal portal)
339 {
340 /* no point in looking if we determined it doesn't return tuples */
341 if (portal->strategy == PORTAL_MULTI_QUERY)
342 return NIL;
343 /* get the primary statement and find out what it returns */
344 return FetchStatementTargetList((Node *) PortalGetPrimaryStmt(portal));
345 }
346
347 /*
348 * FetchStatementTargetList
349 * Given a statement that returns tuples, extract the query targetlist.
350 * Returns NIL if the statement doesn't have a determinable targetlist.
351 *
352 * This can be applied to a Query or a PlannedStmt.
353 * That's more general than portals need, but plancache.c uses this too.
354 *
355 * Note: do not modify the result.
356 *
357 * XXX be careful to keep this in sync with UtilityReturnsTuples.
358 */
359 List *
FetchStatementTargetList(Node * stmt)360 FetchStatementTargetList(Node *stmt)
361 {
362 if (stmt == NULL)
363 return NIL;
364 if (IsA(stmt, Query))
365 {
366 Query *query = (Query *) stmt;
367
368 if (query->commandType == CMD_UTILITY)
369 {
370 /* transfer attention to utility statement */
371 stmt = query->utilityStmt;
372 }
373 else
374 {
375 if (query->commandType == CMD_SELECT)
376 return query->targetList;
377 if (query->returningList)
378 return query->returningList;
379 return NIL;
380 }
381 }
382 if (IsA(stmt, PlannedStmt))
383 {
384 PlannedStmt *pstmt = (PlannedStmt *) stmt;
385
386 if (pstmt->commandType == CMD_UTILITY)
387 {
388 /* transfer attention to utility statement */
389 stmt = pstmt->utilityStmt;
390 }
391 else
392 {
393 if (pstmt->commandType == CMD_SELECT)
394 return pstmt->planTree->targetlist;
395 if (pstmt->hasReturning)
396 return pstmt->planTree->targetlist;
397 return NIL;
398 }
399 }
400 if (IsA(stmt, FetchStmt))
401 {
402 FetchStmt *fstmt = (FetchStmt *) stmt;
403 Portal subportal;
404
405 Assert(!fstmt->ismove);
406 subportal = GetPortalByName(fstmt->portalname);
407 Assert(PortalIsValid(subportal));
408 return FetchPortalTargetList(subportal);
409 }
410 if (IsA(stmt, ExecuteStmt))
411 {
412 ExecuteStmt *estmt = (ExecuteStmt *) stmt;
413 PreparedStatement *entry;
414
415 entry = FetchPreparedStatement(estmt->name, true);
416 return FetchPreparedStatementTargetList(entry);
417 }
418 return NIL;
419 }
420
421 /*
422 * PortalStart
423 * Prepare a portal for execution.
424 *
425 * Caller must already have created the portal, done PortalDefineQuery(),
426 * and adjusted portal options if needed.
427 *
428 * If parameters are needed by the query, they must be passed in "params"
429 * (caller is responsible for giving them appropriate lifetime).
430 *
431 * The caller can also provide an initial set of "eflags" to be passed to
432 * ExecutorStart (but note these can be modified internally, and they are
433 * currently only honored for PORTAL_ONE_SELECT portals). Most callers
434 * should simply pass zero.
435 *
436 * The caller can optionally pass a snapshot to be used; pass InvalidSnapshot
437 * for the normal behavior of setting a new snapshot. This parameter is
438 * presently ignored for non-PORTAL_ONE_SELECT portals (it's only intended
439 * to be used for cursors).
440 *
441 * On return, portal is ready to accept PortalRun() calls, and the result
442 * tupdesc (if any) is known.
443 */
444 void
PortalStart(Portal portal,ParamListInfo params,int eflags,Snapshot snapshot)445 PortalStart(Portal portal, ParamListInfo params,
446 int eflags, Snapshot snapshot)
447 {
448 Portal saveActivePortal;
449 ResourceOwner saveResourceOwner;
450 MemoryContext savePortalContext;
451 MemoryContext oldContext;
452 QueryDesc *queryDesc;
453 int myeflags;
454
455 AssertArg(PortalIsValid(portal));
456 AssertState(portal->status == PORTAL_DEFINED);
457
458 /*
459 * Set up global portal context pointers.
460 */
461 saveActivePortal = ActivePortal;
462 saveResourceOwner = CurrentResourceOwner;
463 savePortalContext = PortalContext;
464 PG_TRY();
465 {
466 ActivePortal = portal;
467 if (portal->resowner)
468 CurrentResourceOwner = portal->resowner;
469 PortalContext = portal->portalContext;
470
471 oldContext = MemoryContextSwitchTo(PortalContext);
472
473 /* Must remember portal param list, if any */
474 portal->portalParams = params;
475
476 /*
477 * Determine the portal execution strategy
478 */
479 portal->strategy = ChoosePortalStrategy(portal->stmts);
480
481 /*
482 * Fire her up according to the strategy
483 */
484 switch (portal->strategy)
485 {
486 case PORTAL_ONE_SELECT:
487
488 /* Must set snapshot before starting executor. */
489 if (snapshot)
490 PushActiveSnapshot(snapshot);
491 else
492 PushActiveSnapshot(GetTransactionSnapshot());
493
494 /*
495 * We could remember the snapshot in portal->portalSnapshot,
496 * but presently there seems no need to, as this code path
497 * cannot be used for non-atomic execution. Hence there can't
498 * be any commit/abort that might destroy the snapshot. Since
499 * we don't do that, there's also no need to force a
500 * non-default nesting level for the snapshot.
501 */
502
503 /*
504 * Create QueryDesc in portal's context; for the moment, set
505 * the destination to DestNone.
506 */
507 queryDesc = CreateQueryDesc(linitial_node(PlannedStmt, portal->stmts),
508 portal->sourceText,
509 GetActiveSnapshot(),
510 InvalidSnapshot,
511 None_Receiver,
512 params,
513 portal->queryEnv,
514 0);
515
516 /*
517 * If it's a scrollable cursor, executor needs to support
518 * REWIND and backwards scan, as well as whatever the caller
519 * might've asked for.
520 */
521 if (portal->cursorOptions & CURSOR_OPT_SCROLL)
522 myeflags = eflags | EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD;
523 else
524 myeflags = eflags;
525
526 /*
527 * Call ExecutorStart to prepare the plan for execution
528 */
529 ExecutorStart(queryDesc, myeflags);
530
531 /*
532 * This tells PortalCleanup to shut down the executor
533 */
534 portal->queryDesc = queryDesc;
535
536 /*
537 * Remember tuple descriptor (computed by ExecutorStart)
538 */
539 portal->tupDesc = queryDesc->tupDesc;
540
541 /*
542 * Reset cursor position data to "start of query"
543 */
544 portal->atStart = true;
545 portal->atEnd = false; /* allow fetches */
546 portal->portalPos = 0;
547
548 PopActiveSnapshot();
549 break;
550
551 case PORTAL_ONE_RETURNING:
552 case PORTAL_ONE_MOD_WITH:
553
554 /*
555 * We don't start the executor until we are told to run the
556 * portal. We do need to set up the result tupdesc.
557 */
558 {
559 PlannedStmt *pstmt;
560
561 pstmt = PortalGetPrimaryStmt(portal);
562 portal->tupDesc =
563 ExecCleanTypeFromTL(pstmt->planTree->targetlist,
564 false);
565 }
566
567 /*
568 * Reset cursor position data to "start of query"
569 */
570 portal->atStart = true;
571 portal->atEnd = false; /* allow fetches */
572 portal->portalPos = 0;
573 break;
574
575 case PORTAL_UTIL_SELECT:
576
577 /*
578 * We don't set snapshot here, because PortalRunUtility will
579 * take care of it if needed.
580 */
581 {
582 PlannedStmt *pstmt = PortalGetPrimaryStmt(portal);
583
584 Assert(pstmt->commandType == CMD_UTILITY);
585 portal->tupDesc = UtilityTupleDescriptor(pstmt->utilityStmt);
586 }
587
588 /*
589 * Reset cursor position data to "start of query"
590 */
591 portal->atStart = true;
592 portal->atEnd = false; /* allow fetches */
593 portal->portalPos = 0;
594 break;
595
596 case PORTAL_MULTI_QUERY:
597 /* Need do nothing now */
598 portal->tupDesc = NULL;
599 break;
600 }
601 }
602 PG_CATCH();
603 {
604 /* Uncaught error while executing portal: mark it dead */
605 MarkPortalFailed(portal);
606
607 /* Restore global vars and propagate error */
608 ActivePortal = saveActivePortal;
609 CurrentResourceOwner = saveResourceOwner;
610 PortalContext = savePortalContext;
611
612 PG_RE_THROW();
613 }
614 PG_END_TRY();
615
616 MemoryContextSwitchTo(oldContext);
617
618 ActivePortal = saveActivePortal;
619 CurrentResourceOwner = saveResourceOwner;
620 PortalContext = savePortalContext;
621
622 portal->status = PORTAL_READY;
623 }
624
625 /*
626 * PortalSetResultFormat
627 * Select the format codes for a portal's output.
628 *
629 * This must be run after PortalStart for a portal that will be read by
630 * a DestRemote or DestRemoteExecute destination. It is not presently needed
631 * for other destination types.
632 *
633 * formats[] is the client format request, as per Bind message conventions.
634 */
635 void
PortalSetResultFormat(Portal portal,int nFormats,int16 * formats)636 PortalSetResultFormat(Portal portal, int nFormats, int16 *formats)
637 {
638 int natts;
639 int i;
640
641 /* Do nothing if portal won't return tuples */
642 if (portal->tupDesc == NULL)
643 return;
644 natts = portal->tupDesc->natts;
645 portal->formats = (int16 *)
646 MemoryContextAlloc(portal->portalContext,
647 natts * sizeof(int16));
648 if (nFormats > 1)
649 {
650 /* format specified for each column */
651 if (nFormats != natts)
652 ereport(ERROR,
653 (errcode(ERRCODE_PROTOCOL_VIOLATION),
654 errmsg("bind message has %d result formats but query has %d columns",
655 nFormats, natts)));
656 memcpy(portal->formats, formats, natts * sizeof(int16));
657 }
658 else if (nFormats > 0)
659 {
660 /* single format specified, use for all columns */
661 int16 format1 = formats[0];
662
663 for (i = 0; i < natts; i++)
664 portal->formats[i] = format1;
665 }
666 else
667 {
668 /* use default format for all columns */
669 for (i = 0; i < natts; i++)
670 portal->formats[i] = 0;
671 }
672 }
673
674 /*
675 * PortalRun
676 * Run a portal's query or queries.
677 *
678 * count <= 0 is interpreted as a no-op: the destination gets started up
679 * and shut down, but nothing else happens. Also, count == FETCH_ALL is
680 * interpreted as "all rows". Note that count is ignored in multi-query
681 * situations, where we always run the portal to completion.
682 *
683 * isTopLevel: true if query is being executed at backend "top level"
684 * (that is, directly from a client command message)
685 *
686 * dest: where to send output of primary (canSetTag) query
687 *
688 * altdest: where to send output of non-primary queries
689 *
690 * completionTag: points to a buffer of size COMPLETION_TAG_BUFSIZE
691 * in which to store a command completion status string.
692 * May be NULL if caller doesn't want a status string.
693 *
694 * Returns true if the portal's execution is complete, false if it was
695 * suspended due to exhaustion of the count parameter.
696 */
697 bool
PortalRun(Portal portal,long count,bool isTopLevel,bool run_once,DestReceiver * dest,DestReceiver * altdest,char * completionTag)698 PortalRun(Portal portal, long count, bool isTopLevel, bool run_once,
699 DestReceiver *dest, DestReceiver *altdest,
700 char *completionTag)
701 {
702 bool result;
703 uint64 nprocessed;
704 ResourceOwner saveTopTransactionResourceOwner;
705 MemoryContext saveTopTransactionContext;
706 Portal saveActivePortal;
707 ResourceOwner saveResourceOwner;
708 MemoryContext savePortalContext;
709 MemoryContext saveMemoryContext;
710
711 AssertArg(PortalIsValid(portal));
712
713 TRACE_POSTGRESQL_QUERY_EXECUTE_START();
714
715 /* Initialize completion tag to empty string */
716 if (completionTag)
717 completionTag[0] = '\0';
718
719 if (log_executor_stats && portal->strategy != PORTAL_MULTI_QUERY)
720 {
721 elog(DEBUG3, "PortalRun");
722 /* PORTAL_MULTI_QUERY logs its own stats per query */
723 ResetUsage();
724 }
725
726 /*
727 * Check for improper portal use, and mark portal active.
728 */
729 MarkPortalActive(portal);
730
731 /* Set run_once flag. Shouldn't be clear if previously set. */
732 Assert(!portal->run_once || run_once);
733 portal->run_once = run_once;
734
735 /*
736 * Set up global portal context pointers.
737 *
738 * We have to play a special game here to support utility commands like
739 * VACUUM and CLUSTER, which internally start and commit transactions.
740 * When we are called to execute such a command, CurrentResourceOwner will
741 * be pointing to the TopTransactionResourceOwner --- which will be
742 * destroyed and replaced in the course of the internal commit and
743 * restart. So we need to be prepared to restore it as pointing to the
744 * exit-time TopTransactionResourceOwner. (Ain't that ugly? This idea of
745 * internally starting whole new transactions is not good.)
746 * CurrentMemoryContext has a similar problem, but the other pointers we
747 * save here will be NULL or pointing to longer-lived objects.
748 */
749 saveTopTransactionResourceOwner = TopTransactionResourceOwner;
750 saveTopTransactionContext = TopTransactionContext;
751 saveActivePortal = ActivePortal;
752 saveResourceOwner = CurrentResourceOwner;
753 savePortalContext = PortalContext;
754 saveMemoryContext = CurrentMemoryContext;
755 PG_TRY();
756 {
757 ActivePortal = portal;
758 if (portal->resowner)
759 CurrentResourceOwner = portal->resowner;
760 PortalContext = portal->portalContext;
761
762 MemoryContextSwitchTo(PortalContext);
763
764 switch (portal->strategy)
765 {
766 case PORTAL_ONE_SELECT:
767 case PORTAL_ONE_RETURNING:
768 case PORTAL_ONE_MOD_WITH:
769 case PORTAL_UTIL_SELECT:
770
771 /*
772 * If we have not yet run the command, do so, storing its
773 * results in the portal's tuplestore. But we don't do that
774 * for the PORTAL_ONE_SELECT case.
775 */
776 if (portal->strategy != PORTAL_ONE_SELECT && !portal->holdStore)
777 FillPortalStore(portal, isTopLevel);
778
779 /*
780 * Now fetch desired portion of results.
781 */
782 nprocessed = PortalRunSelect(portal, true, count, dest);
783
784 /*
785 * If the portal result contains a command tag and the caller
786 * gave us a pointer to store it, copy it. Patch the "SELECT"
787 * tag to also provide the rowcount.
788 */
789 if (completionTag && portal->commandTag)
790 {
791 if (strcmp(portal->commandTag, "SELECT") == 0)
792 snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
793 "SELECT " UINT64_FORMAT, nprocessed);
794 else
795 strcpy(completionTag, portal->commandTag);
796 }
797
798 /* Mark portal not active */
799 portal->status = PORTAL_READY;
800
801 /*
802 * Since it's a forward fetch, say DONE iff atEnd is now true.
803 */
804 result = portal->atEnd;
805 break;
806
807 case PORTAL_MULTI_QUERY:
808 PortalRunMulti(portal, isTopLevel, false,
809 dest, altdest, completionTag);
810
811 /* Prevent portal's commands from being re-executed */
812 MarkPortalDone(portal);
813
814 /* Always complete at end of RunMulti */
815 result = true;
816 break;
817
818 default:
819 elog(ERROR, "unrecognized portal strategy: %d",
820 (int) portal->strategy);
821 result = false; /* keep compiler quiet */
822 break;
823 }
824 }
825 PG_CATCH();
826 {
827 /* Uncaught error while executing portal: mark it dead */
828 MarkPortalFailed(portal);
829
830 /* Restore global vars and propagate error */
831 if (saveMemoryContext == saveTopTransactionContext)
832 MemoryContextSwitchTo(TopTransactionContext);
833 else
834 MemoryContextSwitchTo(saveMemoryContext);
835 ActivePortal = saveActivePortal;
836 if (saveResourceOwner == saveTopTransactionResourceOwner)
837 CurrentResourceOwner = TopTransactionResourceOwner;
838 else
839 CurrentResourceOwner = saveResourceOwner;
840 PortalContext = savePortalContext;
841
842 PG_RE_THROW();
843 }
844 PG_END_TRY();
845
846 if (saveMemoryContext == saveTopTransactionContext)
847 MemoryContextSwitchTo(TopTransactionContext);
848 else
849 MemoryContextSwitchTo(saveMemoryContext);
850 ActivePortal = saveActivePortal;
851 if (saveResourceOwner == saveTopTransactionResourceOwner)
852 CurrentResourceOwner = TopTransactionResourceOwner;
853 else
854 CurrentResourceOwner = saveResourceOwner;
855 PortalContext = savePortalContext;
856
857 if (log_executor_stats && portal->strategy != PORTAL_MULTI_QUERY)
858 ShowUsage("EXECUTOR STATISTICS");
859
860 TRACE_POSTGRESQL_QUERY_EXECUTE_DONE();
861
862 return result;
863 }
864
865 /*
866 * PortalRunSelect
867 * Execute a portal's query in PORTAL_ONE_SELECT mode, and also
868 * when fetching from a completed holdStore in PORTAL_ONE_RETURNING,
869 * PORTAL_ONE_MOD_WITH, and PORTAL_UTIL_SELECT cases.
870 *
871 * This handles simple N-rows-forward-or-backward cases. For more complex
872 * nonsequential access to a portal, see PortalRunFetch.
873 *
874 * count <= 0 is interpreted as a no-op: the destination gets started up
875 * and shut down, but nothing else happens. Also, count == FETCH_ALL is
876 * interpreted as "all rows". (cf FetchStmt.howMany)
877 *
878 * Caller must already have validated the Portal and done appropriate
879 * setup (cf. PortalRun).
880 *
881 * Returns number of rows processed (suitable for use in result tag)
882 */
883 static uint64
PortalRunSelect(Portal portal,bool forward,long count,DestReceiver * dest)884 PortalRunSelect(Portal portal,
885 bool forward,
886 long count,
887 DestReceiver *dest)
888 {
889 QueryDesc *queryDesc;
890 ScanDirection direction;
891 uint64 nprocessed;
892
893 /*
894 * NB: queryDesc will be NULL if we are fetching from a held cursor or a
895 * completed utility query; can't use it in that path.
896 */
897 queryDesc = portal->queryDesc;
898
899 /* Caller messed up if we have neither a ready query nor held data. */
900 Assert(queryDesc || portal->holdStore);
901
902 /*
903 * Force the queryDesc destination to the right thing. This supports
904 * MOVE, for example, which will pass in dest = DestNone. This is okay to
905 * change as long as we do it on every fetch. (The Executor must not
906 * assume that dest never changes.)
907 */
908 if (queryDesc)
909 queryDesc->dest = dest;
910
911 /*
912 * Determine which direction to go in, and check to see if we're already
913 * at the end of the available tuples in that direction. If so, set the
914 * direction to NoMovement to avoid trying to fetch any tuples. (This
915 * check exists because not all plan node types are robust about being
916 * called again if they've already returned NULL once.) Then call the
917 * executor (we must not skip this, because the destination needs to see a
918 * setup and shutdown even if no tuples are available). Finally, update
919 * the portal position state depending on the number of tuples that were
920 * retrieved.
921 */
922 if (forward)
923 {
924 if (portal->atEnd || count <= 0)
925 {
926 direction = NoMovementScanDirection;
927 count = 0; /* don't pass negative count to executor */
928 }
929 else
930 direction = ForwardScanDirection;
931
932 /* In the executor, zero count processes all rows */
933 if (count == FETCH_ALL)
934 count = 0;
935
936 if (portal->holdStore)
937 nprocessed = RunFromStore(portal, direction, (uint64) count, dest);
938 else
939 {
940 PushActiveSnapshot(queryDesc->snapshot);
941 ExecutorRun(queryDesc, direction, (uint64) count,
942 portal->run_once);
943 nprocessed = queryDesc->estate->es_processed;
944 PopActiveSnapshot();
945 }
946
947 if (!ScanDirectionIsNoMovement(direction))
948 {
949 if (nprocessed > 0)
950 portal->atStart = false; /* OK to go backward now */
951 if (count == 0 || nprocessed < (uint64) count)
952 portal->atEnd = true; /* we retrieved 'em all */
953 portal->portalPos += nprocessed;
954 }
955 }
956 else
957 {
958 if (portal->cursorOptions & CURSOR_OPT_NO_SCROLL)
959 ereport(ERROR,
960 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
961 errmsg("cursor can only scan forward"),
962 errhint("Declare it with SCROLL option to enable backward scan.")));
963
964 if (portal->atStart || count <= 0)
965 {
966 direction = NoMovementScanDirection;
967 count = 0; /* don't pass negative count to executor */
968 }
969 else
970 direction = BackwardScanDirection;
971
972 /* In the executor, zero count processes all rows */
973 if (count == FETCH_ALL)
974 count = 0;
975
976 if (portal->holdStore)
977 nprocessed = RunFromStore(portal, direction, (uint64) count, dest);
978 else
979 {
980 PushActiveSnapshot(queryDesc->snapshot);
981 ExecutorRun(queryDesc, direction, (uint64) count,
982 portal->run_once);
983 nprocessed = queryDesc->estate->es_processed;
984 PopActiveSnapshot();
985 }
986
987 if (!ScanDirectionIsNoMovement(direction))
988 {
989 if (nprocessed > 0 && portal->atEnd)
990 {
991 portal->atEnd = false; /* OK to go forward now */
992 portal->portalPos++; /* adjust for endpoint case */
993 }
994 if (count == 0 || nprocessed < (uint64) count)
995 {
996 portal->atStart = true; /* we retrieved 'em all */
997 portal->portalPos = 0;
998 }
999 else
1000 {
1001 portal->portalPos -= nprocessed;
1002 }
1003 }
1004 }
1005
1006 return nprocessed;
1007 }
1008
1009 /*
1010 * FillPortalStore
1011 * Run the query and load result tuples into the portal's tuple store.
1012 *
1013 * This is used for PORTAL_ONE_RETURNING, PORTAL_ONE_MOD_WITH, and
1014 * PORTAL_UTIL_SELECT cases only.
1015 */
1016 static void
FillPortalStore(Portal portal,bool isTopLevel)1017 FillPortalStore(Portal portal, bool isTopLevel)
1018 {
1019 DestReceiver *treceiver;
1020 char completionTag[COMPLETION_TAG_BUFSIZE];
1021
1022 PortalCreateHoldStore(portal);
1023 treceiver = CreateDestReceiver(DestTuplestore);
1024 SetTuplestoreDestReceiverParams(treceiver,
1025 portal->holdStore,
1026 portal->holdContext,
1027 false);
1028
1029 completionTag[0] = '\0';
1030
1031 switch (portal->strategy)
1032 {
1033 case PORTAL_ONE_RETURNING:
1034 case PORTAL_ONE_MOD_WITH:
1035
1036 /*
1037 * Run the portal to completion just as for the default
1038 * MULTI_QUERY case, but send the primary query's output to the
1039 * tuplestore. Auxiliary query outputs are discarded. Set the
1040 * portal's holdSnapshot to the snapshot used (or a copy of it).
1041 */
1042 PortalRunMulti(portal, isTopLevel, true,
1043 treceiver, None_Receiver, completionTag);
1044 break;
1045
1046 case PORTAL_UTIL_SELECT:
1047 PortalRunUtility(portal, linitial_node(PlannedStmt, portal->stmts),
1048 isTopLevel, true, treceiver, completionTag);
1049 break;
1050
1051 default:
1052 elog(ERROR, "unsupported portal strategy: %d",
1053 (int) portal->strategy);
1054 break;
1055 }
1056
1057 /* Override default completion tag with actual command result */
1058 if (completionTag[0] != '\0')
1059 portal->commandTag = pstrdup(completionTag);
1060
1061 treceiver->rDestroy(treceiver);
1062 }
1063
1064 /*
1065 * RunFromStore
1066 * Fetch tuples from the portal's tuple store.
1067 *
1068 * Calling conventions are similar to ExecutorRun, except that we
1069 * do not depend on having a queryDesc or estate. Therefore we return the
1070 * number of tuples processed as the result, not in estate->es_processed.
1071 *
1072 * One difference from ExecutorRun is that the destination receiver functions
1073 * are run in the caller's memory context (since we have no estate). Watch
1074 * out for memory leaks.
1075 */
1076 static uint64
RunFromStore(Portal portal,ScanDirection direction,uint64 count,DestReceiver * dest)1077 RunFromStore(Portal portal, ScanDirection direction, uint64 count,
1078 DestReceiver *dest)
1079 {
1080 uint64 current_tuple_count = 0;
1081 TupleTableSlot *slot;
1082
1083 slot = MakeSingleTupleTableSlot(portal->tupDesc);
1084
1085 dest->rStartup(dest, CMD_SELECT, portal->tupDesc);
1086
1087 if (ScanDirectionIsNoMovement(direction))
1088 {
1089 /* do nothing except start/stop the destination */
1090 }
1091 else
1092 {
1093 bool forward = ScanDirectionIsForward(direction);
1094
1095 for (;;)
1096 {
1097 MemoryContext oldcontext;
1098 bool ok;
1099
1100 oldcontext = MemoryContextSwitchTo(portal->holdContext);
1101
1102 ok = tuplestore_gettupleslot(portal->holdStore, forward, false,
1103 slot);
1104
1105 MemoryContextSwitchTo(oldcontext);
1106
1107 if (!ok)
1108 break;
1109
1110 /*
1111 * If we are not able to send the tuple, we assume the destination
1112 * has closed and no more tuples can be sent. If that's the case,
1113 * end the loop.
1114 */
1115 if (!dest->receiveSlot(slot, dest))
1116 break;
1117
1118 ExecClearTuple(slot);
1119
1120 /*
1121 * check our tuple count.. if we've processed the proper number
1122 * then quit, else loop again and process more tuples. Zero count
1123 * means no limit.
1124 */
1125 current_tuple_count++;
1126 if (count && count == current_tuple_count)
1127 break;
1128 }
1129 }
1130
1131 dest->rShutdown(dest);
1132
1133 ExecDropSingleTupleTableSlot(slot);
1134
1135 return current_tuple_count;
1136 }
1137
1138 /*
1139 * PortalRunUtility
1140 * Execute a utility statement inside a portal.
1141 */
1142 static void
PortalRunUtility(Portal portal,PlannedStmt * pstmt,bool isTopLevel,bool setHoldSnapshot,DestReceiver * dest,char * completionTag)1143 PortalRunUtility(Portal portal, PlannedStmt *pstmt,
1144 bool isTopLevel, bool setHoldSnapshot,
1145 DestReceiver *dest, char *completionTag)
1146 {
1147 /*
1148 * Set snapshot if utility stmt needs one.
1149 */
1150 if (PlannedStmtRequiresSnapshot(pstmt))
1151 {
1152 Snapshot snapshot = GetTransactionSnapshot();
1153
1154 /* If told to, register the snapshot we're using and save in portal */
1155 if (setHoldSnapshot)
1156 {
1157 snapshot = RegisterSnapshot(snapshot);
1158 portal->holdSnapshot = snapshot;
1159 }
1160
1161 /*
1162 * In any case, make the snapshot active and remember it in portal.
1163 * Because the portal now references the snapshot, we must tell
1164 * snapmgr.c that the snapshot belongs to the portal's transaction
1165 * level, else we risk portalSnapshot becoming a dangling pointer.
1166 */
1167 PushActiveSnapshotWithLevel(snapshot, portal->createLevel);
1168 /* PushActiveSnapshotWithLevel might have copied the snapshot */
1169 portal->portalSnapshot = GetActiveSnapshot();
1170 }
1171 else
1172 portal->portalSnapshot = NULL;
1173
1174 ProcessUtility(pstmt,
1175 portal->sourceText,
1176 isTopLevel ? PROCESS_UTILITY_TOPLEVEL : PROCESS_UTILITY_QUERY,
1177 portal->portalParams,
1178 portal->queryEnv,
1179 dest,
1180 completionTag);
1181
1182 /* Some utility statements may change context on us */
1183 MemoryContextSwitchTo(portal->portalContext);
1184
1185 /*
1186 * Some utility commands (e.g., VACUUM) pop the ActiveSnapshot stack from
1187 * under us, so don't complain if it's now empty. Otherwise, our snapshot
1188 * should be the top one; pop it. Note that this could be a different
1189 * snapshot from the one we made above; see EnsurePortalSnapshotExists.
1190 */
1191 if (portal->portalSnapshot != NULL && ActiveSnapshotSet())
1192 {
1193 Assert(portal->portalSnapshot == GetActiveSnapshot());
1194 PopActiveSnapshot();
1195 }
1196 portal->portalSnapshot = NULL;
1197 }
1198
1199 /*
1200 * PortalRunMulti
1201 * Execute a portal's queries in the general case (multi queries
1202 * or non-SELECT-like queries)
1203 */
1204 static void
PortalRunMulti(Portal portal,bool isTopLevel,bool setHoldSnapshot,DestReceiver * dest,DestReceiver * altdest,char * completionTag)1205 PortalRunMulti(Portal portal,
1206 bool isTopLevel, bool setHoldSnapshot,
1207 DestReceiver *dest, DestReceiver *altdest,
1208 char *completionTag)
1209 {
1210 bool active_snapshot_set = false;
1211 ListCell *stmtlist_item;
1212
1213 /*
1214 * If the destination is DestRemoteExecute, change to DestNone. The
1215 * reason is that the client won't be expecting any tuples, and indeed has
1216 * no way to know what they are, since there is no provision for Describe
1217 * to send a RowDescription message when this portal execution strategy is
1218 * in effect. This presently will only affect SELECT commands added to
1219 * non-SELECT queries by rewrite rules: such commands will be executed,
1220 * but the results will be discarded unless you use "simple Query"
1221 * protocol.
1222 */
1223 if (dest->mydest == DestRemoteExecute)
1224 dest = None_Receiver;
1225 if (altdest->mydest == DestRemoteExecute)
1226 altdest = None_Receiver;
1227
1228 /*
1229 * Loop to handle the individual queries generated from a single parsetree
1230 * by analysis and rewrite.
1231 */
1232 foreach(stmtlist_item, portal->stmts)
1233 {
1234 PlannedStmt *pstmt = lfirst_node(PlannedStmt, stmtlist_item);
1235
1236 /*
1237 * If we got a cancel signal in prior command, quit
1238 */
1239 CHECK_FOR_INTERRUPTS();
1240
1241 if (pstmt->utilityStmt == NULL)
1242 {
1243 /*
1244 * process a plannable query.
1245 */
1246 TRACE_POSTGRESQL_QUERY_EXECUTE_START();
1247
1248 if (log_executor_stats)
1249 ResetUsage();
1250
1251 /*
1252 * Must always have a snapshot for plannable queries. First time
1253 * through, take a new snapshot; for subsequent queries in the
1254 * same portal, just update the snapshot's copy of the command
1255 * counter.
1256 */
1257 if (!active_snapshot_set)
1258 {
1259 Snapshot snapshot = GetTransactionSnapshot();
1260
1261 /* If told to, register the snapshot and save in portal */
1262 if (setHoldSnapshot)
1263 {
1264 snapshot = RegisterSnapshot(snapshot);
1265 portal->holdSnapshot = snapshot;
1266 }
1267
1268 /*
1269 * We can't have the holdSnapshot also be the active one,
1270 * because UpdateActiveSnapshotCommandId would complain. So
1271 * force an extra snapshot copy. Plain PushActiveSnapshot
1272 * would have copied the transaction snapshot anyway, so this
1273 * only adds a copy step when setHoldSnapshot is true. (It's
1274 * okay for the command ID of the active snapshot to diverge
1275 * from what holdSnapshot has.)
1276 */
1277 PushCopiedSnapshot(snapshot);
1278
1279 /*
1280 * As for PORTAL_ONE_SELECT portals, it does not seem
1281 * necessary to maintain portal->portalSnapshot here.
1282 */
1283
1284 active_snapshot_set = true;
1285 }
1286 else
1287 UpdateActiveSnapshotCommandId();
1288
1289 if (pstmt->canSetTag)
1290 {
1291 /* statement can set tag string */
1292 ProcessQuery(pstmt,
1293 portal->sourceText,
1294 portal->portalParams,
1295 portal->queryEnv,
1296 dest, completionTag);
1297 }
1298 else
1299 {
1300 /* stmt added by rewrite cannot set tag */
1301 ProcessQuery(pstmt,
1302 portal->sourceText,
1303 portal->portalParams,
1304 portal->queryEnv,
1305 altdest, NULL);
1306 }
1307
1308 if (log_executor_stats)
1309 ShowUsage("EXECUTOR STATISTICS");
1310
1311 TRACE_POSTGRESQL_QUERY_EXECUTE_DONE();
1312 }
1313 else
1314 {
1315 /*
1316 * process utility functions (create, destroy, etc..)
1317 *
1318 * We must not set a snapshot here for utility commands (if one is
1319 * needed, PortalRunUtility will do it). If a utility command is
1320 * alone in a portal then everything's fine. The only case where
1321 * a utility command can be part of a longer list is that rules
1322 * are allowed to include NotifyStmt. NotifyStmt doesn't care
1323 * whether it has a snapshot or not, so we just leave the current
1324 * snapshot alone if we have one.
1325 */
1326 if (pstmt->canSetTag)
1327 {
1328 Assert(!active_snapshot_set);
1329 /* statement can set tag string */
1330 PortalRunUtility(portal, pstmt, isTopLevel, false,
1331 dest, completionTag);
1332 }
1333 else
1334 {
1335 Assert(IsA(pstmt->utilityStmt, NotifyStmt));
1336 /* stmt added by rewrite cannot set tag */
1337 PortalRunUtility(portal, pstmt, isTopLevel, false,
1338 altdest, NULL);
1339 }
1340 }
1341
1342 /*
1343 * Clear subsidiary contexts to recover temporary memory.
1344 */
1345 Assert(portal->portalContext == CurrentMemoryContext);
1346
1347 MemoryContextDeleteChildren(portal->portalContext);
1348
1349 /*
1350 * Avoid crashing if portal->stmts has been reset. This can only
1351 * occur if a CALL or DO utility statement executed an internal
1352 * COMMIT/ROLLBACK (cf PortalReleaseCachedPlan). The CALL or DO must
1353 * have been the only statement in the portal, so there's nothing left
1354 * for us to do; but we don't want to dereference a now-dangling list
1355 * pointer.
1356 */
1357 if (portal->stmts == NIL)
1358 break;
1359
1360 /*
1361 * Increment command counter between queries, but not after the last
1362 * one.
1363 */
1364 if (lnext(stmtlist_item) != NULL)
1365 CommandCounterIncrement();
1366 }
1367
1368 /* Pop the snapshot if we pushed one. */
1369 if (active_snapshot_set)
1370 PopActiveSnapshot();
1371
1372 /*
1373 * If a command completion tag was supplied, use it. Otherwise use the
1374 * portal's commandTag as the default completion tag.
1375 *
1376 * Exception: Clients expect INSERT/UPDATE/DELETE tags to have counts, so
1377 * fake them with zeros. This can happen with DO INSTEAD rules if there
1378 * is no replacement query of the same type as the original. We print "0
1379 * 0" here because technically there is no query of the matching tag type,
1380 * and printing a non-zero count for a different query type seems wrong,
1381 * e.g. an INSERT that does an UPDATE instead should not print "0 1" if
1382 * one row was updated. See QueryRewrite(), step 3, for details.
1383 */
1384 if (completionTag && completionTag[0] == '\0')
1385 {
1386 if (portal->commandTag)
1387 strcpy(completionTag, portal->commandTag);
1388 if (strcmp(completionTag, "SELECT") == 0)
1389 sprintf(completionTag, "SELECT 0 0");
1390 else if (strcmp(completionTag, "INSERT") == 0)
1391 strcpy(completionTag, "INSERT 0 0");
1392 else if (strcmp(completionTag, "UPDATE") == 0)
1393 strcpy(completionTag, "UPDATE 0");
1394 else if (strcmp(completionTag, "DELETE") == 0)
1395 strcpy(completionTag, "DELETE 0");
1396 }
1397 }
1398
1399 /*
1400 * PortalRunFetch
1401 * Variant form of PortalRun that supports SQL FETCH directions.
1402 *
1403 * Note: we presently assume that no callers of this want isTopLevel = true.
1404 *
1405 * count <= 0 is interpreted as a no-op: the destination gets started up
1406 * and shut down, but nothing else happens. Also, count == FETCH_ALL is
1407 * interpreted as "all rows". (cf FetchStmt.howMany)
1408 *
1409 * Returns number of rows processed (suitable for use in result tag)
1410 */
1411 uint64
PortalRunFetch(Portal portal,FetchDirection fdirection,long count,DestReceiver * dest)1412 PortalRunFetch(Portal portal,
1413 FetchDirection fdirection,
1414 long count,
1415 DestReceiver *dest)
1416 {
1417 uint64 result;
1418 Portal saveActivePortal;
1419 ResourceOwner saveResourceOwner;
1420 MemoryContext savePortalContext;
1421 MemoryContext oldContext;
1422
1423 AssertArg(PortalIsValid(portal));
1424
1425 /*
1426 * Check for improper portal use, and mark portal active.
1427 */
1428 MarkPortalActive(portal);
1429
1430 /* If supporting FETCH, portal can't be run-once. */
1431 Assert(!portal->run_once);
1432
1433 /*
1434 * Set up global portal context pointers.
1435 */
1436 saveActivePortal = ActivePortal;
1437 saveResourceOwner = CurrentResourceOwner;
1438 savePortalContext = PortalContext;
1439 PG_TRY();
1440 {
1441 ActivePortal = portal;
1442 if (portal->resowner)
1443 CurrentResourceOwner = portal->resowner;
1444 PortalContext = portal->portalContext;
1445
1446 oldContext = MemoryContextSwitchTo(PortalContext);
1447
1448 switch (portal->strategy)
1449 {
1450 case PORTAL_ONE_SELECT:
1451 result = DoPortalRunFetch(portal, fdirection, count, dest);
1452 break;
1453
1454 case PORTAL_ONE_RETURNING:
1455 case PORTAL_ONE_MOD_WITH:
1456 case PORTAL_UTIL_SELECT:
1457
1458 /*
1459 * If we have not yet run the command, do so, storing its
1460 * results in the portal's tuplestore.
1461 */
1462 if (!portal->holdStore)
1463 FillPortalStore(portal, false /* isTopLevel */ );
1464
1465 /*
1466 * Now fetch desired portion of results.
1467 */
1468 result = DoPortalRunFetch(portal, fdirection, count, dest);
1469 break;
1470
1471 default:
1472 elog(ERROR, "unsupported portal strategy");
1473 result = 0; /* keep compiler quiet */
1474 break;
1475 }
1476 }
1477 PG_CATCH();
1478 {
1479 /* Uncaught error while executing portal: mark it dead */
1480 MarkPortalFailed(portal);
1481
1482 /* Restore global vars and propagate error */
1483 ActivePortal = saveActivePortal;
1484 CurrentResourceOwner = saveResourceOwner;
1485 PortalContext = savePortalContext;
1486
1487 PG_RE_THROW();
1488 }
1489 PG_END_TRY();
1490
1491 MemoryContextSwitchTo(oldContext);
1492
1493 /* Mark portal not active */
1494 portal->status = PORTAL_READY;
1495
1496 ActivePortal = saveActivePortal;
1497 CurrentResourceOwner = saveResourceOwner;
1498 PortalContext = savePortalContext;
1499
1500 return result;
1501 }
1502
1503 /*
1504 * DoPortalRunFetch
1505 * Guts of PortalRunFetch --- the portal context is already set up
1506 *
1507 * Here, count < 0 typically reverses the direction. Also, count == FETCH_ALL
1508 * is interpreted as "all rows". (cf FetchStmt.howMany)
1509 *
1510 * Returns number of rows processed (suitable for use in result tag)
1511 */
1512 static uint64
DoPortalRunFetch(Portal portal,FetchDirection fdirection,long count,DestReceiver * dest)1513 DoPortalRunFetch(Portal portal,
1514 FetchDirection fdirection,
1515 long count,
1516 DestReceiver *dest)
1517 {
1518 bool forward;
1519
1520 Assert(portal->strategy == PORTAL_ONE_SELECT ||
1521 portal->strategy == PORTAL_ONE_RETURNING ||
1522 portal->strategy == PORTAL_ONE_MOD_WITH ||
1523 portal->strategy == PORTAL_UTIL_SELECT);
1524
1525 /*
1526 * Note: we disallow backwards fetch (including re-fetch of current row)
1527 * for NO SCROLL cursors, but we interpret that very loosely: you can use
1528 * any of the FetchDirection options, so long as the end result is to move
1529 * forwards by at least one row. Currently it's sufficient to check for
1530 * NO SCROLL in DoPortalRewind() and in the forward == false path in
1531 * PortalRunSelect(); but someday we might prefer to account for that
1532 * restriction explicitly here.
1533 */
1534 switch (fdirection)
1535 {
1536 case FETCH_FORWARD:
1537 if (count < 0)
1538 {
1539 fdirection = FETCH_BACKWARD;
1540 count = -count;
1541 }
1542 /* fall out of switch to share code with FETCH_BACKWARD */
1543 break;
1544 case FETCH_BACKWARD:
1545 if (count < 0)
1546 {
1547 fdirection = FETCH_FORWARD;
1548 count = -count;
1549 }
1550 /* fall out of switch to share code with FETCH_FORWARD */
1551 break;
1552 case FETCH_ABSOLUTE:
1553 if (count > 0)
1554 {
1555 /*
1556 * Definition: Rewind to start, advance count-1 rows, return
1557 * next row (if any).
1558 *
1559 * In practice, if the goal is less than halfway back to the
1560 * start, it's better to scan from where we are.
1561 *
1562 * Also, if current portalPos is outside the range of "long",
1563 * do it the hard way to avoid possible overflow of the count
1564 * argument to PortalRunSelect. We must exclude exactly
1565 * LONG_MAX, as well, lest the count look like FETCH_ALL.
1566 *
1567 * In any case, we arrange to fetch the target row going
1568 * forwards.
1569 */
1570 if ((uint64) (count - 1) <= portal->portalPos / 2 ||
1571 portal->portalPos >= (uint64) LONG_MAX)
1572 {
1573 DoPortalRewind(portal);
1574 if (count > 1)
1575 PortalRunSelect(portal, true, count - 1,
1576 None_Receiver);
1577 }
1578 else
1579 {
1580 long pos = (long) portal->portalPos;
1581
1582 if (portal->atEnd)
1583 pos++; /* need one extra fetch if off end */
1584 if (count <= pos)
1585 PortalRunSelect(portal, false, pos - count + 1,
1586 None_Receiver);
1587 else if (count > pos + 1)
1588 PortalRunSelect(portal, true, count - pos - 1,
1589 None_Receiver);
1590 }
1591 return PortalRunSelect(portal, true, 1L, dest);
1592 }
1593 else if (count < 0)
1594 {
1595 /*
1596 * Definition: Advance to end, back up abs(count)-1 rows,
1597 * return prior row (if any). We could optimize this if we
1598 * knew in advance where the end was, but typically we won't.
1599 * (Is it worth considering case where count > half of size of
1600 * query? We could rewind once we know the size ...)
1601 */
1602 PortalRunSelect(portal, true, FETCH_ALL, None_Receiver);
1603 if (count < -1)
1604 PortalRunSelect(portal, false, -count - 1, None_Receiver);
1605 return PortalRunSelect(portal, false, 1L, dest);
1606 }
1607 else
1608 {
1609 /* count == 0 */
1610 /* Rewind to start, return zero rows */
1611 DoPortalRewind(portal);
1612 return PortalRunSelect(portal, true, 0L, dest);
1613 }
1614 break;
1615 case FETCH_RELATIVE:
1616 if (count > 0)
1617 {
1618 /*
1619 * Definition: advance count-1 rows, return next row (if any).
1620 */
1621 if (count > 1)
1622 PortalRunSelect(portal, true, count - 1, None_Receiver);
1623 return PortalRunSelect(portal, true, 1L, dest);
1624 }
1625 else if (count < 0)
1626 {
1627 /*
1628 * Definition: back up abs(count)-1 rows, return prior row (if
1629 * any).
1630 */
1631 if (count < -1)
1632 PortalRunSelect(portal, false, -count - 1, None_Receiver);
1633 return PortalRunSelect(portal, false, 1L, dest);
1634 }
1635 else
1636 {
1637 /* count == 0 */
1638 /* Same as FETCH FORWARD 0, so fall out of switch */
1639 fdirection = FETCH_FORWARD;
1640 }
1641 break;
1642 default:
1643 elog(ERROR, "bogus direction");
1644 break;
1645 }
1646
1647 /*
1648 * Get here with fdirection == FETCH_FORWARD or FETCH_BACKWARD, and count
1649 * >= 0.
1650 */
1651 forward = (fdirection == FETCH_FORWARD);
1652
1653 /*
1654 * Zero count means to re-fetch the current row, if any (per SQL)
1655 */
1656 if (count == 0)
1657 {
1658 bool on_row;
1659
1660 /* Are we sitting on a row? */
1661 on_row = (!portal->atStart && !portal->atEnd);
1662
1663 if (dest->mydest == DestNone)
1664 {
1665 /* MOVE 0 returns 0/1 based on if FETCH 0 would return a row */
1666 return on_row ? 1 : 0;
1667 }
1668 else
1669 {
1670 /*
1671 * If we are sitting on a row, back up one so we can re-fetch it.
1672 * If we are not sitting on a row, we still have to start up and
1673 * shut down the executor so that the destination is initialized
1674 * and shut down correctly; so keep going. To PortalRunSelect,
1675 * count == 0 means we will retrieve no row.
1676 */
1677 if (on_row)
1678 {
1679 PortalRunSelect(portal, false, 1L, None_Receiver);
1680 /* Set up to fetch one row forward */
1681 count = 1;
1682 forward = true;
1683 }
1684 }
1685 }
1686
1687 /*
1688 * Optimize MOVE BACKWARD ALL into a Rewind.
1689 */
1690 if (!forward && count == FETCH_ALL && dest->mydest == DestNone)
1691 {
1692 uint64 result = portal->portalPos;
1693
1694 if (result > 0 && !portal->atEnd)
1695 result--;
1696 DoPortalRewind(portal);
1697 return result;
1698 }
1699
1700 return PortalRunSelect(portal, forward, count, dest);
1701 }
1702
1703 /*
1704 * DoPortalRewind - rewind a Portal to starting point
1705 */
1706 static void
DoPortalRewind(Portal portal)1707 DoPortalRewind(Portal portal)
1708 {
1709 QueryDesc *queryDesc;
1710
1711 /*
1712 * No work is needed if we've not advanced nor attempted to advance the
1713 * cursor (and we don't want to throw a NO SCROLL error in this case).
1714 */
1715 if (portal->atStart && !portal->atEnd)
1716 return;
1717
1718 /*
1719 * Otherwise, cursor should allow scrolling. However, we're only going to
1720 * enforce that policy fully beginning in v15. In older branches, insist
1721 * on this only if the portal has a holdStore. That prevents users from
1722 * seeing that the holdStore may not have all the rows of the query.
1723 */
1724 if ((portal->cursorOptions & CURSOR_OPT_NO_SCROLL) && portal->holdStore)
1725 ereport(ERROR,
1726 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1727 errmsg("cursor can only scan forward"),
1728 errhint("Declare it with SCROLL option to enable backward scan.")));
1729
1730 /* Rewind holdStore, if we have one */
1731 if (portal->holdStore)
1732 {
1733 MemoryContext oldcontext;
1734
1735 oldcontext = MemoryContextSwitchTo(portal->holdContext);
1736 tuplestore_rescan(portal->holdStore);
1737 MemoryContextSwitchTo(oldcontext);
1738 }
1739
1740 /* Rewind executor, if active */
1741 queryDesc = portal->queryDesc;
1742 if (queryDesc)
1743 {
1744 PushActiveSnapshot(queryDesc->snapshot);
1745 ExecutorRewind(queryDesc);
1746 PopActiveSnapshot();
1747 }
1748
1749 portal->atStart = true;
1750 portal->atEnd = false;
1751 portal->portalPos = 0;
1752 }
1753
1754 /*
1755 * PlannedStmtRequiresSnapshot - what it says on the tin
1756 */
1757 bool
PlannedStmtRequiresSnapshot(PlannedStmt * pstmt)1758 PlannedStmtRequiresSnapshot(PlannedStmt *pstmt)
1759 {
1760 Node *utilityStmt = pstmt->utilityStmt;
1761
1762 /* If it's not a utility statement, it definitely needs a snapshot */
1763 if (utilityStmt == NULL)
1764 return true;
1765
1766 /*
1767 * Most utility statements need a snapshot, and the default presumption
1768 * about new ones should be that they do too. Hence, enumerate those that
1769 * do not need one.
1770 *
1771 * Transaction control, LOCK, and SET must *not* set a snapshot, since
1772 * they need to be executable at the start of a transaction-snapshot-mode
1773 * transaction without freezing a snapshot. By extension we allow SHOW
1774 * not to set a snapshot. The other stmts listed are just efficiency
1775 * hacks. Beware of listing anything that can modify the database --- if,
1776 * say, it has to update an index with expressions that invoke
1777 * user-defined functions, then it had better have a snapshot.
1778 */
1779 if (IsA(utilityStmt, TransactionStmt) ||
1780 IsA(utilityStmt, LockStmt) ||
1781 IsA(utilityStmt, VariableSetStmt) ||
1782 IsA(utilityStmt, VariableShowStmt) ||
1783 IsA(utilityStmt, ConstraintsSetStmt) ||
1784 /* efficiency hacks from here down */
1785 IsA(utilityStmt, FetchStmt) ||
1786 IsA(utilityStmt, ListenStmt) ||
1787 IsA(utilityStmt, NotifyStmt) ||
1788 IsA(utilityStmt, UnlistenStmt) ||
1789 IsA(utilityStmt, CheckPointStmt))
1790 return false;
1791
1792 return true;
1793 }
1794
1795 /*
1796 * EnsurePortalSnapshotExists - recreate Portal-level snapshot, if needed
1797 *
1798 * Generally, we will have an active snapshot whenever we are executing
1799 * inside a Portal, unless the Portal's query is one of the utility
1800 * statements exempted from that rule (see PlannedStmtRequiresSnapshot).
1801 * However, procedures and DO blocks can commit or abort the transaction,
1802 * and thereby destroy all snapshots. This function can be called to
1803 * re-establish the Portal-level snapshot when none exists.
1804 */
1805 void
EnsurePortalSnapshotExists(void)1806 EnsurePortalSnapshotExists(void)
1807 {
1808 Portal portal;
1809
1810 /*
1811 * Nothing to do if a snapshot is set. (We take it on faith that the
1812 * outermost active snapshot belongs to some Portal; or if there is no
1813 * Portal, it's somebody else's responsibility to manage things.)
1814 */
1815 if (ActiveSnapshotSet())
1816 return;
1817
1818 /* Otherwise, we'd better have an active Portal */
1819 portal = ActivePortal;
1820 if (unlikely(portal == NULL))
1821 elog(ERROR, "cannot execute SQL without an outer snapshot or portal");
1822 Assert(portal->portalSnapshot == NULL);
1823
1824 /*
1825 * Create a new snapshot, make it active, and remember it in portal.
1826 * Because the portal now references the snapshot, we must tell snapmgr.c
1827 * that the snapshot belongs to the portal's transaction level, else we
1828 * risk portalSnapshot becoming a dangling pointer.
1829 */
1830 PushActiveSnapshotWithLevel(GetTransactionSnapshot(), portal->createLevel);
1831 /* PushActiveSnapshotWithLevel might have copied the snapshot */
1832 portal->portalSnapshot = GetActiveSnapshot();
1833 }
1834