1 /*-------------------------------------------------------------------------
2  *
3  * portalmem.c
4  *	  backend portal memory management
5  *
6  * Portals are objects representing the execution state of a query.
7  * This module provides memory management services for portals, but it
8  * doesn't actually run the executor for them.
9  *
10  *
11  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
12  * Portions Copyright (c) 1994, Regents of the University of California
13  *
14  * IDENTIFICATION
15  *	  src/backend/utils/mmgr/portalmem.c
16  *
17  *-------------------------------------------------------------------------
18  */
19 #include "postgres.h"
20 
21 #include "access/xact.h"
22 #include "catalog/pg_type.h"
23 #include "commands/portalcmds.h"
24 #include "miscadmin.h"
25 #include "storage/ipc.h"
26 #include "utils/builtins.h"
27 #include "utils/memutils.h"
28 #include "utils/snapmgr.h"
29 #include "utils/timestamp.h"
30 
31 /*
32  * Estimate of the maximum number of open portals a user would have,
33  * used in initially sizing the PortalHashTable in EnablePortalManager().
34  * Since the hash table can expand, there's no need to make this overly
35  * generous, and keeping it small avoids unnecessary overhead in the
36  * hash_seq_search() calls executed during transaction end.
37  */
38 #define PORTALS_PER_USER	   16
39 
40 
41 /* ----------------
42  *		Global state
43  * ----------------
44  */
45 
46 #define MAX_PORTALNAME_LEN		NAMEDATALEN
47 
48 typedef struct portalhashent
49 {
50 	char		portalname[MAX_PORTALNAME_LEN];
51 	Portal		portal;
52 } PortalHashEnt;
53 
54 static HTAB *PortalHashTable = NULL;
55 
56 #define PortalHashTableLookup(NAME, PORTAL) \
57 do { \
58 	PortalHashEnt *hentry; \
59 	\
60 	hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
61 										   (NAME), HASH_FIND, NULL); \
62 	if (hentry) \
63 		PORTAL = hentry->portal; \
64 	else \
65 		PORTAL = NULL; \
66 } while(0)
67 
68 #define PortalHashTableInsert(PORTAL, NAME) \
69 do { \
70 	PortalHashEnt *hentry; bool found; \
71 	\
72 	hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
73 										   (NAME), HASH_ENTER, &found); \
74 	if (found) \
75 		elog(ERROR, "duplicate portal name"); \
76 	hentry->portal = PORTAL; \
77 	/* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
78 	PORTAL->name = hentry->portalname; \
79 } while(0)
80 
81 #define PortalHashTableDelete(PORTAL) \
82 do { \
83 	PortalHashEnt *hentry; \
84 	\
85 	hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
86 										   PORTAL->name, HASH_REMOVE, NULL); \
87 	if (hentry == NULL) \
88 		elog(WARNING, "trying to delete portal name that does not exist"); \
89 } while(0)
90 
91 static MemoryContext TopPortalContext = NULL;
92 
93 
94 /* ----------------------------------------------------------------
95  *				   public portal interface functions
96  * ----------------------------------------------------------------
97  */
98 
99 /*
100  * EnablePortalManager
101  *		Enables the portal management module at backend startup.
102  */
103 void
EnablePortalManager(void)104 EnablePortalManager(void)
105 {
106 	HASHCTL		ctl;
107 
108 	Assert(TopPortalContext == NULL);
109 
110 	TopPortalContext = AllocSetContextCreate(TopMemoryContext,
111 											 "TopPortalContext",
112 											 ALLOCSET_DEFAULT_SIZES);
113 
114 	ctl.keysize = MAX_PORTALNAME_LEN;
115 	ctl.entrysize = sizeof(PortalHashEnt);
116 
117 	/*
118 	 * use PORTALS_PER_USER as a guess of how many hash table entries to
119 	 * create, initially
120 	 */
121 	PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
122 								  &ctl, HASH_ELEM);
123 }
124 
125 /*
126  * GetPortalByName
127  *		Returns a portal given a portal name, or NULL if name not found.
128  */
129 Portal
GetPortalByName(const char * name)130 GetPortalByName(const char *name)
131 {
132 	Portal		portal;
133 
134 	if (PointerIsValid(name))
135 		PortalHashTableLookup(name, portal);
136 	else
137 		portal = NULL;
138 
139 	return portal;
140 }
141 
142 /*
143  * PortalGetPrimaryStmt
144  *		Get the "primary" stmt within a portal, ie, the one marked canSetTag.
145  *
146  * Returns NULL if no such stmt.  If multiple PlannedStmt structs within the
147  * portal are marked canSetTag, returns the first one.  Neither of these
148  * cases should occur in present usages of this function.
149  */
150 PlannedStmt *
PortalGetPrimaryStmt(Portal portal)151 PortalGetPrimaryStmt(Portal portal)
152 {
153 	ListCell   *lc;
154 
155 	foreach(lc, portal->stmts)
156 	{
157 		PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
158 
159 		if (stmt->canSetTag)
160 			return stmt;
161 	}
162 	return NULL;
163 }
164 
165 /*
166  * CreatePortal
167  *		Returns a new portal given a name.
168  *
169  * allowDup: if true, automatically drop any pre-existing portal of the
170  * same name (if false, an error is raised).
171  *
172  * dupSilent: if true, don't even emit a WARNING.
173  */
174 Portal
CreatePortal(const char * name,bool allowDup,bool dupSilent)175 CreatePortal(const char *name, bool allowDup, bool dupSilent)
176 {
177 	Portal		portal;
178 
179 	AssertArg(PointerIsValid(name));
180 
181 	portal = GetPortalByName(name);
182 	if (PortalIsValid(portal))
183 	{
184 		if (!allowDup)
185 			ereport(ERROR,
186 					(errcode(ERRCODE_DUPLICATE_CURSOR),
187 					 errmsg("cursor \"%s\" already exists", name)));
188 		if (!dupSilent)
189 			ereport(WARNING,
190 					(errcode(ERRCODE_DUPLICATE_CURSOR),
191 					 errmsg("closing existing cursor \"%s\"",
192 							name)));
193 		PortalDrop(portal, false);
194 	}
195 
196 	/* make new portal structure */
197 	portal = (Portal) MemoryContextAllocZero(TopPortalContext, sizeof *portal);
198 
199 	/* initialize portal context; typically it won't store much */
200 	portal->portalContext = AllocSetContextCreate(TopPortalContext,
201 												  "PortalContext",
202 												  ALLOCSET_SMALL_SIZES);
203 
204 	/* create a resource owner for the portal */
205 	portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
206 										   "Portal");
207 
208 	/* initialize portal fields that don't start off zero */
209 	portal->status = PORTAL_NEW;
210 	portal->cleanup = PortalCleanup;
211 	portal->createSubid = GetCurrentSubTransactionId();
212 	portal->activeSubid = portal->createSubid;
213 	portal->createLevel = GetCurrentTransactionNestLevel();
214 	portal->strategy = PORTAL_MULTI_QUERY;
215 	portal->cursorOptions = CURSOR_OPT_NO_SCROLL;
216 	portal->atStart = true;
217 	portal->atEnd = true;		/* disallow fetches until query is set */
218 	portal->visible = true;
219 	portal->creation_time = GetCurrentStatementStartTimestamp();
220 
221 	/* put portal in table (sets portal->name) */
222 	PortalHashTableInsert(portal, name);
223 
224 	/* reuse portal->name copy */
225 	MemoryContextSetIdentifier(portal->portalContext, portal->name);
226 
227 	return portal;
228 }
229 
230 /*
231  * CreateNewPortal
232  *		Create a new portal, assigning it a random nonconflicting name.
233  */
234 Portal
CreateNewPortal(void)235 CreateNewPortal(void)
236 {
237 	static unsigned int unnamed_portal_count = 0;
238 
239 	char		portalname[MAX_PORTALNAME_LEN];
240 
241 	/* Select a nonconflicting name */
242 	for (;;)
243 	{
244 		unnamed_portal_count++;
245 		sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
246 		if (GetPortalByName(portalname) == NULL)
247 			break;
248 	}
249 
250 	return CreatePortal(portalname, false, false);
251 }
252 
253 /*
254  * PortalDefineQuery
255  *		A simple subroutine to establish a portal's query.
256  *
257  * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
258  * allowed anymore to pass NULL.  (If you really don't have source text,
259  * you can pass a constant string, perhaps "(query not available)".)
260  *
261  * commandTag shall be NULL if and only if the original query string
262  * (before rewriting) was an empty string.  Also, the passed commandTag must
263  * be a pointer to a constant string, since it is not copied.
264  *
265  * If cplan is provided, then it is a cached plan containing the stmts, and
266  * the caller must have done GetCachedPlan(), causing a refcount increment.
267  * The refcount will be released when the portal is destroyed.
268  *
269  * If cplan is NULL, then it is the caller's responsibility to ensure that
270  * the passed plan trees have adequate lifetime.  Typically this is done by
271  * copying them into the portal's context.
272  *
273  * The caller is also responsible for ensuring that the passed prepStmtName
274  * (if not NULL) and sourceText have adequate lifetime.
275  *
276  * NB: this function mustn't do much beyond storing the passed values; in
277  * particular don't do anything that risks elog(ERROR).  If that were to
278  * happen here before storing the cplan reference, we'd leak the plancache
279  * refcount that the caller is trying to hand off to us.
280  */
281 void
PortalDefineQuery(Portal portal,const char * prepStmtName,const char * sourceText,const char * commandTag,List * stmts,CachedPlan * cplan)282 PortalDefineQuery(Portal portal,
283 				  const char *prepStmtName,
284 				  const char *sourceText,
285 				  const char *commandTag,
286 				  List *stmts,
287 				  CachedPlan *cplan)
288 {
289 	AssertArg(PortalIsValid(portal));
290 	AssertState(portal->status == PORTAL_NEW);
291 
292 	AssertArg(sourceText != NULL);
293 	AssertArg(commandTag != NULL || stmts == NIL);
294 
295 	portal->prepStmtName = prepStmtName;
296 	portal->sourceText = sourceText;
297 	portal->commandTag = commandTag;
298 	portal->stmts = stmts;
299 	portal->cplan = cplan;
300 	portal->status = PORTAL_DEFINED;
301 }
302 
303 /*
304  * PortalReleaseCachedPlan
305  *		Release a portal's reference to its cached plan, if any.
306  */
307 static void
PortalReleaseCachedPlan(Portal portal)308 PortalReleaseCachedPlan(Portal portal)
309 {
310 	if (portal->cplan)
311 	{
312 		ReleaseCachedPlan(portal->cplan, false);
313 		portal->cplan = NULL;
314 
315 		/*
316 		 * We must also clear portal->stmts which is now a dangling reference
317 		 * to the cached plan's plan list.  This protects any code that might
318 		 * try to examine the Portal later.
319 		 */
320 		portal->stmts = NIL;
321 	}
322 }
323 
324 /*
325  * PortalCreateHoldStore
326  *		Create the tuplestore for a portal.
327  */
328 void
PortalCreateHoldStore(Portal portal)329 PortalCreateHoldStore(Portal portal)
330 {
331 	MemoryContext oldcxt;
332 
333 	Assert(portal->holdContext == NULL);
334 	Assert(portal->holdStore == NULL);
335 	Assert(portal->holdSnapshot == NULL);
336 
337 	/*
338 	 * Create the memory context that is used for storage of the tuple set.
339 	 * Note this is NOT a child of the portal's portalContext.
340 	 */
341 	portal->holdContext =
342 		AllocSetContextCreate(TopPortalContext,
343 							  "PortalHoldContext",
344 							  ALLOCSET_DEFAULT_SIZES);
345 
346 	/*
347 	 * Create the tuple store, selecting cross-transaction temp files, and
348 	 * enabling random access only if cursor requires scrolling.
349 	 *
350 	 * XXX: Should maintenance_work_mem be used for the portal size?
351 	 */
352 	oldcxt = MemoryContextSwitchTo(portal->holdContext);
353 
354 	portal->holdStore =
355 		tuplestore_begin_heap(portal->cursorOptions & CURSOR_OPT_SCROLL,
356 							  true, work_mem);
357 
358 	MemoryContextSwitchTo(oldcxt);
359 }
360 
361 /*
362  * PinPortal
363  *		Protect a portal from dropping.
364  *
365  * A pinned portal is still unpinned and dropped at transaction or
366  * subtransaction abort.
367  */
368 void
PinPortal(Portal portal)369 PinPortal(Portal portal)
370 {
371 	if (portal->portalPinned)
372 		elog(ERROR, "portal already pinned");
373 
374 	portal->portalPinned = true;
375 }
376 
377 void
UnpinPortal(Portal portal)378 UnpinPortal(Portal portal)
379 {
380 	if (!portal->portalPinned)
381 		elog(ERROR, "portal not pinned");
382 
383 	portal->portalPinned = false;
384 }
385 
386 /*
387  * MarkPortalActive
388  *		Transition a portal from READY to ACTIVE state.
389  *
390  * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
391  */
392 void
MarkPortalActive(Portal portal)393 MarkPortalActive(Portal portal)
394 {
395 	/* For safety, this is a runtime test not just an Assert */
396 	if (portal->status != PORTAL_READY)
397 		ereport(ERROR,
398 				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
399 				 errmsg("portal \"%s\" cannot be run", portal->name)));
400 	/* Perform the state transition */
401 	portal->status = PORTAL_ACTIVE;
402 	portal->activeSubid = GetCurrentSubTransactionId();
403 }
404 
405 /*
406  * MarkPortalDone
407  *		Transition a portal from ACTIVE to DONE state.
408  *
409  * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
410  */
411 void
MarkPortalDone(Portal portal)412 MarkPortalDone(Portal portal)
413 {
414 	/* Perform the state transition */
415 	Assert(portal->status == PORTAL_ACTIVE);
416 	portal->status = PORTAL_DONE;
417 
418 	/*
419 	 * Allow portalcmds.c to clean up the state it knows about.  We might as
420 	 * well do that now, since the portal can't be executed any more.
421 	 *
422 	 * In some cases involving execution of a ROLLBACK command in an already
423 	 * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
424 	 * with the cleanup hook still unexecuted.
425 	 */
426 	if (PointerIsValid(portal->cleanup))
427 	{
428 		portal->cleanup(portal);
429 		portal->cleanup = NULL;
430 	}
431 }
432 
433 /*
434  * MarkPortalFailed
435  *		Transition a portal into FAILED state.
436  *
437  * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
438  */
439 void
MarkPortalFailed(Portal portal)440 MarkPortalFailed(Portal portal)
441 {
442 	/* Perform the state transition */
443 	Assert(portal->status != PORTAL_DONE);
444 	portal->status = PORTAL_FAILED;
445 
446 	/*
447 	 * Allow portalcmds.c to clean up the state it knows about.  We might as
448 	 * well do that now, since the portal can't be executed any more.
449 	 *
450 	 * In some cases involving cleanup of an already aborted transaction, this
451 	 * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
452 	 * still unexecuted.
453 	 */
454 	if (PointerIsValid(portal->cleanup))
455 	{
456 		portal->cleanup(portal);
457 		portal->cleanup = NULL;
458 	}
459 }
460 
461 /*
462  * PortalDrop
463  *		Destroy the portal.
464  */
465 void
PortalDrop(Portal portal,bool isTopCommit)466 PortalDrop(Portal portal, bool isTopCommit)
467 {
468 	AssertArg(PortalIsValid(portal));
469 
470 	/*
471 	 * Don't allow dropping a pinned portal, it's still needed by whoever
472 	 * pinned it.
473 	 */
474 	if (portal->portalPinned)
475 		ereport(ERROR,
476 				(errcode(ERRCODE_INVALID_CURSOR_STATE),
477 				 errmsg("cannot drop pinned portal \"%s\"", portal->name)));
478 
479 	/*
480 	 * Not sure if the PORTAL_ACTIVE case can validly happen or not...
481 	 */
482 	if (portal->status == PORTAL_ACTIVE)
483 		ereport(ERROR,
484 				(errcode(ERRCODE_INVALID_CURSOR_STATE),
485 				 errmsg("cannot drop active portal \"%s\"", portal->name)));
486 
487 	/*
488 	 * Allow portalcmds.c to clean up the state it knows about, in particular
489 	 * shutting down the executor if still active.  This step potentially runs
490 	 * user-defined code so failure has to be expected.  It's the cleanup
491 	 * hook's responsibility to not try to do that more than once, in the case
492 	 * that failure occurs and then we come back to drop the portal again
493 	 * during transaction abort.
494 	 *
495 	 * Note: in most paths of control, this will have been done already in
496 	 * MarkPortalDone or MarkPortalFailed.  We're just making sure.
497 	 */
498 	if (PointerIsValid(portal->cleanup))
499 	{
500 		portal->cleanup(portal);
501 		portal->cleanup = NULL;
502 	}
503 
504 	/* There shouldn't be an active snapshot anymore, except after error */
505 	Assert(portal->portalSnapshot == NULL || !isTopCommit);
506 
507 	/*
508 	 * Remove portal from hash table.  Because we do this here, we will not
509 	 * come back to try to remove the portal again if there's any error in the
510 	 * subsequent steps.  Better to leak a little memory than to get into an
511 	 * infinite error-recovery loop.
512 	 */
513 	PortalHashTableDelete(portal);
514 
515 	/* drop cached plan reference, if any */
516 	PortalReleaseCachedPlan(portal);
517 
518 	/*
519 	 * If portal has a snapshot protecting its data, release that.  This needs
520 	 * a little care since the registration will be attached to the portal's
521 	 * resowner; if the portal failed, we will already have released the
522 	 * resowner (and the snapshot) during transaction abort.
523 	 */
524 	if (portal->holdSnapshot)
525 	{
526 		if (portal->resowner)
527 			UnregisterSnapshotFromOwner(portal->holdSnapshot,
528 										portal->resowner);
529 		portal->holdSnapshot = NULL;
530 	}
531 
532 	/*
533 	 * Release any resources still attached to the portal.  There are several
534 	 * cases being covered here:
535 	 *
536 	 * Top transaction commit (indicated by isTopCommit): normally we should
537 	 * do nothing here and let the regular end-of-transaction resource
538 	 * releasing mechanism handle these resources too.  However, if we have a
539 	 * FAILED portal (eg, a cursor that got an error), we'd better clean up
540 	 * its resources to avoid resource-leakage warning messages.
541 	 *
542 	 * Sub transaction commit: never comes here at all, since we don't kill
543 	 * any portals in AtSubCommit_Portals().
544 	 *
545 	 * Main or sub transaction abort: we will do nothing here because
546 	 * portal->resowner was already set NULL; the resources were already
547 	 * cleaned up in transaction abort.
548 	 *
549 	 * Ordinary portal drop: must release resources.  However, if the portal
550 	 * is not FAILED then we do not release its locks.  The locks become the
551 	 * responsibility of the transaction's ResourceOwner (since it is the
552 	 * parent of the portal's owner) and will be released when the transaction
553 	 * eventually ends.
554 	 */
555 	if (portal->resowner &&
556 		(!isTopCommit || portal->status == PORTAL_FAILED))
557 	{
558 		bool		isCommit = (portal->status != PORTAL_FAILED);
559 
560 		ResourceOwnerRelease(portal->resowner,
561 							 RESOURCE_RELEASE_BEFORE_LOCKS,
562 							 isCommit, false);
563 		ResourceOwnerRelease(portal->resowner,
564 							 RESOURCE_RELEASE_LOCKS,
565 							 isCommit, false);
566 		ResourceOwnerRelease(portal->resowner,
567 							 RESOURCE_RELEASE_AFTER_LOCKS,
568 							 isCommit, false);
569 		ResourceOwnerDelete(portal->resowner);
570 	}
571 	portal->resowner = NULL;
572 
573 	/*
574 	 * Delete tuplestore if present.  We should do this even under error
575 	 * conditions; since the tuplestore would have been using cross-
576 	 * transaction storage, its temp files need to be explicitly deleted.
577 	 */
578 	if (portal->holdStore)
579 	{
580 		MemoryContext oldcontext;
581 
582 		oldcontext = MemoryContextSwitchTo(portal->holdContext);
583 		tuplestore_end(portal->holdStore);
584 		MemoryContextSwitchTo(oldcontext);
585 		portal->holdStore = NULL;
586 	}
587 
588 	/* delete tuplestore storage, if any */
589 	if (portal->holdContext)
590 		MemoryContextDelete(portal->holdContext);
591 
592 	/* release subsidiary storage */
593 	MemoryContextDelete(portal->portalContext);
594 
595 	/* release portal struct (it's in TopPortalContext) */
596 	pfree(portal);
597 }
598 
599 /*
600  * Delete all declared cursors.
601  *
602  * Used by commands: CLOSE ALL, DISCARD ALL
603  */
604 void
PortalHashTableDeleteAll(void)605 PortalHashTableDeleteAll(void)
606 {
607 	HASH_SEQ_STATUS status;
608 	PortalHashEnt *hentry;
609 
610 	if (PortalHashTable == NULL)
611 		return;
612 
613 	hash_seq_init(&status, PortalHashTable);
614 	while ((hentry = hash_seq_search(&status)) != NULL)
615 	{
616 		Portal		portal = hentry->portal;
617 
618 		/* Can't close the active portal (the one running the command) */
619 		if (portal->status == PORTAL_ACTIVE)
620 			continue;
621 
622 		PortalDrop(portal, false);
623 
624 		/* Restart the iteration in case that led to other drops */
625 		hash_seq_term(&status);
626 		hash_seq_init(&status, PortalHashTable);
627 	}
628 }
629 
630 /*
631  * "Hold" a portal.  Prepare it for access by later transactions.
632  */
633 static void
HoldPortal(Portal portal)634 HoldPortal(Portal portal)
635 {
636 	/*
637 	 * Note that PersistHoldablePortal() must release all resources used by
638 	 * the portal that are local to the creating transaction.
639 	 */
640 	PortalCreateHoldStore(portal);
641 	PersistHoldablePortal(portal);
642 
643 	/* drop cached plan reference, if any */
644 	PortalReleaseCachedPlan(portal);
645 
646 	/*
647 	 * Any resources belonging to the portal will be released in the upcoming
648 	 * transaction-wide cleanup; the portal will no longer have its own
649 	 * resources.
650 	 */
651 	portal->resowner = NULL;
652 
653 	/*
654 	 * Having successfully exported the holdable cursor, mark it as not
655 	 * belonging to this transaction.
656 	 */
657 	portal->createSubid = InvalidSubTransactionId;
658 	portal->activeSubid = InvalidSubTransactionId;
659 	portal->createLevel = 0;
660 }
661 
662 /*
663  * Pre-commit processing for portals.
664  *
665  * Holdable cursors created in this transaction need to be converted to
666  * materialized form, since we are going to close down the executor and
667  * release locks.  Non-holdable portals created in this transaction are
668  * simply removed.  Portals remaining from prior transactions should be
669  * left untouched.
670  *
671  * Returns true if any portals changed state (possibly causing user-defined
672  * code to be run), false if not.
673  */
674 bool
PreCommit_Portals(bool isPrepare)675 PreCommit_Portals(bool isPrepare)
676 {
677 	bool		result = false;
678 	HASH_SEQ_STATUS status;
679 	PortalHashEnt *hentry;
680 
681 	hash_seq_init(&status, PortalHashTable);
682 
683 	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
684 	{
685 		Portal		portal = hentry->portal;
686 
687 		/*
688 		 * There should be no pinned portals anymore. Complain if someone
689 		 * leaked one. Auto-held portals are allowed; we assume that whoever
690 		 * pinned them is managing them.
691 		 */
692 		if (portal->portalPinned && !portal->autoHeld)
693 			elog(ERROR, "cannot commit while a portal is pinned");
694 
695 		/*
696 		 * Do not touch active portals --- this can only happen in the case of
697 		 * a multi-transaction utility command, such as VACUUM, or a commit in
698 		 * a procedure.
699 		 *
700 		 * Note however that any resource owner attached to such a portal is
701 		 * still going to go away, so don't leave a dangling pointer.  Also
702 		 * unregister any snapshots held by the portal, mainly to avoid
703 		 * snapshot leak warnings from ResourceOwnerRelease().
704 		 */
705 		if (portal->status == PORTAL_ACTIVE)
706 		{
707 			if (portal->holdSnapshot)
708 			{
709 				if (portal->resowner)
710 					UnregisterSnapshotFromOwner(portal->holdSnapshot,
711 												portal->resowner);
712 				portal->holdSnapshot = NULL;
713 			}
714 			portal->resowner = NULL;
715 			/* Clear portalSnapshot too, for cleanliness */
716 			portal->portalSnapshot = NULL;
717 			continue;
718 		}
719 
720 		/* Is it a holdable portal created in the current xact? */
721 		if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
722 			portal->createSubid != InvalidSubTransactionId &&
723 			portal->status == PORTAL_READY)
724 		{
725 			/*
726 			 * We are exiting the transaction that created a holdable cursor.
727 			 * Instead of dropping the portal, prepare it for access by later
728 			 * transactions.
729 			 *
730 			 * However, if this is PREPARE TRANSACTION rather than COMMIT,
731 			 * refuse PREPARE, because the semantics seem pretty unclear.
732 			 */
733 			if (isPrepare)
734 				ereport(ERROR,
735 						(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
736 						 errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
737 
738 			HoldPortal(portal);
739 
740 			/* Report we changed state */
741 			result = true;
742 		}
743 		else if (portal->createSubid == InvalidSubTransactionId)
744 		{
745 			/*
746 			 * Do nothing to cursors held over from a previous transaction
747 			 * (including ones we just froze in a previous cycle of this loop)
748 			 */
749 			continue;
750 		}
751 		else
752 		{
753 			/* Zap all non-holdable portals */
754 			PortalDrop(portal, true);
755 
756 			/* Report we changed state */
757 			result = true;
758 		}
759 
760 		/*
761 		 * After either freezing or dropping a portal, we have to restart the
762 		 * iteration, because we could have invoked user-defined code that
763 		 * caused a drop of the next portal in the hash chain.
764 		 */
765 		hash_seq_term(&status);
766 		hash_seq_init(&status, PortalHashTable);
767 	}
768 
769 	return result;
770 }
771 
772 /*
773  * Abort processing for portals.
774  *
775  * At this point we run the cleanup hook if present, but we can't release the
776  * portal's memory until the cleanup call.
777  */
778 void
AtAbort_Portals(void)779 AtAbort_Portals(void)
780 {
781 	HASH_SEQ_STATUS status;
782 	PortalHashEnt *hentry;
783 
784 	hash_seq_init(&status, PortalHashTable);
785 
786 	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
787 	{
788 		Portal		portal = hentry->portal;
789 
790 		/*
791 		 * When elog(FATAL) is progress, we need to set the active portal to
792 		 * failed, so that PortalCleanup() doesn't run the executor shutdown.
793 		 */
794 		if (portal->status == PORTAL_ACTIVE && shmem_exit_inprogress)
795 			MarkPortalFailed(portal);
796 
797 		/*
798 		 * Do nothing else to cursors held over from a previous transaction.
799 		 */
800 		if (portal->createSubid == InvalidSubTransactionId)
801 			continue;
802 
803 		/*
804 		 * Do nothing to auto-held cursors.  This is similar to the case of a
805 		 * cursor from a previous transaction, but it could also be that the
806 		 * cursor was auto-held in this transaction, so it wants to live on.
807 		 */
808 		if (portal->autoHeld)
809 			continue;
810 
811 		/*
812 		 * If it was created in the current transaction, we can't do normal
813 		 * shutdown on a READY portal either; it might refer to objects
814 		 * created in the failed transaction.  See comments in
815 		 * AtSubAbort_Portals.
816 		 */
817 		if (portal->status == PORTAL_READY)
818 			MarkPortalFailed(portal);
819 
820 		/*
821 		 * Allow portalcmds.c to clean up the state it knows about, if we
822 		 * haven't already.
823 		 */
824 		if (PointerIsValid(portal->cleanup))
825 		{
826 			portal->cleanup(portal);
827 			portal->cleanup = NULL;
828 		}
829 
830 		/* drop cached plan reference, if any */
831 		PortalReleaseCachedPlan(portal);
832 
833 		/*
834 		 * Any resources belonging to the portal will be released in the
835 		 * upcoming transaction-wide cleanup; they will be gone before we run
836 		 * PortalDrop.
837 		 */
838 		portal->resowner = NULL;
839 
840 		/*
841 		 * Although we can't delete the portal data structure proper, we can
842 		 * release any memory in subsidiary contexts, such as executor state.
843 		 * The cleanup hook was the last thing that might have needed data
844 		 * there.  But leave active portals alone.
845 		 */
846 		if (portal->status != PORTAL_ACTIVE)
847 			MemoryContextDeleteChildren(portal->portalContext);
848 	}
849 }
850 
851 /*
852  * Post-abort cleanup for portals.
853  *
854  * Delete all portals not held over from prior transactions.  */
855 void
AtCleanup_Portals(void)856 AtCleanup_Portals(void)
857 {
858 	HASH_SEQ_STATUS status;
859 	PortalHashEnt *hentry;
860 
861 	hash_seq_init(&status, PortalHashTable);
862 
863 	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
864 	{
865 		Portal		portal = hentry->portal;
866 
867 		/*
868 		 * Do not touch active portals --- this can only happen in the case of
869 		 * a multi-transaction command.
870 		 */
871 		if (portal->status == PORTAL_ACTIVE)
872 			continue;
873 
874 		/*
875 		 * Do nothing to cursors held over from a previous transaction or
876 		 * auto-held ones.
877 		 */
878 		if (portal->createSubid == InvalidSubTransactionId || portal->autoHeld)
879 		{
880 			Assert(portal->status != PORTAL_ACTIVE);
881 			Assert(portal->resowner == NULL);
882 			continue;
883 		}
884 
885 		/*
886 		 * If a portal is still pinned, forcibly unpin it. PortalDrop will not
887 		 * let us drop the portal otherwise. Whoever pinned the portal was
888 		 * interrupted by the abort too and won't try to use it anymore.
889 		 */
890 		if (portal->portalPinned)
891 			portal->portalPinned = false;
892 
893 		/*
894 		 * We had better not call any user-defined code during cleanup, so if
895 		 * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
896 		 */
897 		if (PointerIsValid(portal->cleanup))
898 		{
899 			elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
900 			portal->cleanup = NULL;
901 		}
902 
903 		/* Zap it. */
904 		PortalDrop(portal, false);
905 	}
906 }
907 
908 /*
909  * Portal-related cleanup when we return to the main loop on error.
910  *
911  * This is different from the cleanup at transaction abort.  Auto-held portals
912  * are cleaned up on error but not on transaction abort.
913  */
914 void
PortalErrorCleanup(void)915 PortalErrorCleanup(void)
916 {
917 	HASH_SEQ_STATUS status;
918 	PortalHashEnt *hentry;
919 
920 	hash_seq_init(&status, PortalHashTable);
921 
922 	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
923 	{
924 		Portal		portal = hentry->portal;
925 
926 		if (portal->autoHeld)
927 		{
928 			portal->portalPinned = false;
929 			PortalDrop(portal, false);
930 		}
931 	}
932 }
933 
934 /*
935  * Pre-subcommit processing for portals.
936  *
937  * Reassign portals created or used in the current subtransaction to the
938  * parent subtransaction.
939  */
940 void
AtSubCommit_Portals(SubTransactionId mySubid,SubTransactionId parentSubid,int parentLevel,ResourceOwner parentXactOwner)941 AtSubCommit_Portals(SubTransactionId mySubid,
942 					SubTransactionId parentSubid,
943 					int parentLevel,
944 					ResourceOwner parentXactOwner)
945 {
946 	HASH_SEQ_STATUS status;
947 	PortalHashEnt *hentry;
948 
949 	hash_seq_init(&status, PortalHashTable);
950 
951 	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
952 	{
953 		Portal		portal = hentry->portal;
954 
955 		if (portal->createSubid == mySubid)
956 		{
957 			portal->createSubid = parentSubid;
958 			portal->createLevel = parentLevel;
959 			if (portal->resowner)
960 				ResourceOwnerNewParent(portal->resowner, parentXactOwner);
961 		}
962 		if (portal->activeSubid == mySubid)
963 			portal->activeSubid = parentSubid;
964 	}
965 }
966 
967 /*
968  * Subtransaction abort handling for portals.
969  *
970  * Deactivate portals created or used during the failed subtransaction.
971  * Note that per AtSubCommit_Portals, this will catch portals created/used
972  * in descendants of the subtransaction too.
973  *
974  * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
975  */
976 void
AtSubAbort_Portals(SubTransactionId mySubid,SubTransactionId parentSubid,ResourceOwner myXactOwner,ResourceOwner parentXactOwner)977 AtSubAbort_Portals(SubTransactionId mySubid,
978 				   SubTransactionId parentSubid,
979 				   ResourceOwner myXactOwner,
980 				   ResourceOwner parentXactOwner)
981 {
982 	HASH_SEQ_STATUS status;
983 	PortalHashEnt *hentry;
984 
985 	hash_seq_init(&status, PortalHashTable);
986 
987 	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
988 	{
989 		Portal		portal = hentry->portal;
990 
991 		/* Was it created in this subtransaction? */
992 		if (portal->createSubid != mySubid)
993 		{
994 			/* No, but maybe it was used in this subtransaction? */
995 			if (portal->activeSubid == mySubid)
996 			{
997 				/* Maintain activeSubid until the portal is removed */
998 				portal->activeSubid = parentSubid;
999 
1000 				/*
1001 				 * A MarkPortalActive() caller ran an upper-level portal in
1002 				 * this subtransaction and left the portal ACTIVE.  This can't
1003 				 * happen, but force the portal into FAILED state for the same
1004 				 * reasons discussed below.
1005 				 *
1006 				 * We assume we can get away without forcing upper-level READY
1007 				 * portals to fail, even if they were run and then suspended.
1008 				 * In theory a suspended upper-level portal could have
1009 				 * acquired some references to objects that are about to be
1010 				 * destroyed, but there should be sufficient defenses against
1011 				 * such cases: the portal's original query cannot contain such
1012 				 * references, and any references within, say, cached plans of
1013 				 * PL/pgSQL functions are not from active queries and should
1014 				 * be protected by revalidation logic.
1015 				 */
1016 				if (portal->status == PORTAL_ACTIVE)
1017 					MarkPortalFailed(portal);
1018 
1019 				/*
1020 				 * Also, if we failed it during the current subtransaction
1021 				 * (either just above, or earlier), reattach its resource
1022 				 * owner to the current subtransaction's resource owner, so
1023 				 * that any resources it still holds will be released while
1024 				 * cleaning up this subtransaction.  This prevents some corner
1025 				 * cases wherein we might get Asserts or worse while cleaning
1026 				 * up objects created during the current subtransaction
1027 				 * (because they're still referenced within this portal).
1028 				 */
1029 				if (portal->status == PORTAL_FAILED && portal->resowner)
1030 				{
1031 					ResourceOwnerNewParent(portal->resowner, myXactOwner);
1032 					portal->resowner = NULL;
1033 				}
1034 			}
1035 			/* Done if it wasn't created in this subtransaction */
1036 			continue;
1037 		}
1038 
1039 		/*
1040 		 * Force any live portals of my own subtransaction into FAILED state.
1041 		 * We have to do this because they might refer to objects created or
1042 		 * changed in the failed subtransaction, leading to crashes within
1043 		 * ExecutorEnd when portalcmds.c tries to close down the portal.
1044 		 * Currently, every MarkPortalActive() caller ensures it updates the
1045 		 * portal status again before relinquishing control, so ACTIVE can't
1046 		 * happen here.  If it does happen, dispose the portal like existing
1047 		 * MarkPortalActive() callers would.
1048 		 */
1049 		if (portal->status == PORTAL_READY ||
1050 			portal->status == PORTAL_ACTIVE)
1051 			MarkPortalFailed(portal);
1052 
1053 		/*
1054 		 * Allow portalcmds.c to clean up the state it knows about, if we
1055 		 * haven't already.
1056 		 */
1057 		if (PointerIsValid(portal->cleanup))
1058 		{
1059 			portal->cleanup(portal);
1060 			portal->cleanup = NULL;
1061 		}
1062 
1063 		/* drop cached plan reference, if any */
1064 		PortalReleaseCachedPlan(portal);
1065 
1066 		/*
1067 		 * Any resources belonging to the portal will be released in the
1068 		 * upcoming transaction-wide cleanup; they will be gone before we run
1069 		 * PortalDrop.
1070 		 */
1071 		portal->resowner = NULL;
1072 
1073 		/*
1074 		 * Although we can't delete the portal data structure proper, we can
1075 		 * release any memory in subsidiary contexts, such as executor state.
1076 		 * The cleanup hook was the last thing that might have needed data
1077 		 * there.
1078 		 */
1079 		MemoryContextDeleteChildren(portal->portalContext);
1080 	}
1081 }
1082 
1083 /*
1084  * Post-subabort cleanup for portals.
1085  *
1086  * Drop all portals created in the failed subtransaction (but note that
1087  * we will not drop any that were reassigned to the parent above).
1088  */
1089 void
AtSubCleanup_Portals(SubTransactionId mySubid)1090 AtSubCleanup_Portals(SubTransactionId mySubid)
1091 {
1092 	HASH_SEQ_STATUS status;
1093 	PortalHashEnt *hentry;
1094 
1095 	hash_seq_init(&status, PortalHashTable);
1096 
1097 	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1098 	{
1099 		Portal		portal = hentry->portal;
1100 
1101 		if (portal->createSubid != mySubid)
1102 			continue;
1103 
1104 		/*
1105 		 * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1106 		 * let us drop the portal otherwise. Whoever pinned the portal was
1107 		 * interrupted by the abort too and won't try to use it anymore.
1108 		 */
1109 		if (portal->portalPinned)
1110 			portal->portalPinned = false;
1111 
1112 		/*
1113 		 * We had better not call any user-defined code during cleanup, so if
1114 		 * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1115 		 */
1116 		if (PointerIsValid(portal->cleanup))
1117 		{
1118 			elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
1119 			portal->cleanup = NULL;
1120 		}
1121 
1122 		/* Zap it. */
1123 		PortalDrop(portal, false);
1124 	}
1125 }
1126 
1127 /* Find all available cursors */
1128 Datum
pg_cursor(PG_FUNCTION_ARGS)1129 pg_cursor(PG_FUNCTION_ARGS)
1130 {
1131 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1132 	TupleDesc	tupdesc;
1133 	Tuplestorestate *tupstore;
1134 	MemoryContext per_query_ctx;
1135 	MemoryContext oldcontext;
1136 	HASH_SEQ_STATUS hash_seq;
1137 	PortalHashEnt *hentry;
1138 
1139 	/* check to see if caller supports us returning a tuplestore */
1140 	if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
1141 		ereport(ERROR,
1142 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1143 				 errmsg("set-valued function called in context that cannot accept a set")));
1144 	if (!(rsinfo->allowedModes & SFRM_Materialize))
1145 		ereport(ERROR,
1146 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1147 				 errmsg("materialize mode required, but it is not " \
1148 						"allowed in this context")));
1149 
1150 	/* need to build tuplestore in query context */
1151 	per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
1152 	oldcontext = MemoryContextSwitchTo(per_query_ctx);
1153 
1154 	/*
1155 	 * build tupdesc for result tuples. This must match the definition of the
1156 	 * pg_cursors view in system_views.sql
1157 	 */
1158 	tupdesc = CreateTemplateTupleDesc(6);
1159 	TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
1160 					   TEXTOID, -1, 0);
1161 	TupleDescInitEntry(tupdesc, (AttrNumber) 2, "statement",
1162 					   TEXTOID, -1, 0);
1163 	TupleDescInitEntry(tupdesc, (AttrNumber) 3, "is_holdable",
1164 					   BOOLOID, -1, 0);
1165 	TupleDescInitEntry(tupdesc, (AttrNumber) 4, "is_binary",
1166 					   BOOLOID, -1, 0);
1167 	TupleDescInitEntry(tupdesc, (AttrNumber) 5, "is_scrollable",
1168 					   BOOLOID, -1, 0);
1169 	TupleDescInitEntry(tupdesc, (AttrNumber) 6, "creation_time",
1170 					   TIMESTAMPTZOID, -1, 0);
1171 
1172 	/*
1173 	 * We put all the tuples into a tuplestore in one scan of the hashtable.
1174 	 * This avoids any issue of the hashtable possibly changing between calls.
1175 	 */
1176 	tupstore =
1177 		tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random,
1178 							  false, work_mem);
1179 
1180 	/* generate junk in short-term context */
1181 	MemoryContextSwitchTo(oldcontext);
1182 
1183 	hash_seq_init(&hash_seq, PortalHashTable);
1184 	while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1185 	{
1186 		Portal		portal = hentry->portal;
1187 		Datum		values[6];
1188 		bool		nulls[6];
1189 
1190 		/* report only "visible" entries */
1191 		if (!portal->visible)
1192 			continue;
1193 
1194 		MemSet(nulls, 0, sizeof(nulls));
1195 
1196 		values[0] = CStringGetTextDatum(portal->name);
1197 		values[1] = CStringGetTextDatum(portal->sourceText);
1198 		values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
1199 		values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
1200 		values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
1201 		values[5] = TimestampTzGetDatum(portal->creation_time);
1202 
1203 		tuplestore_putvalues(tupstore, tupdesc, values, nulls);
1204 	}
1205 
1206 	/* clean up and return the tuplestore */
1207 	tuplestore_donestoring(tupstore);
1208 
1209 	rsinfo->returnMode = SFRM_Materialize;
1210 	rsinfo->setResult = tupstore;
1211 	rsinfo->setDesc = tupdesc;
1212 
1213 	return (Datum) 0;
1214 }
1215 
1216 bool
ThereAreNoReadyPortals(void)1217 ThereAreNoReadyPortals(void)
1218 {
1219 	HASH_SEQ_STATUS status;
1220 	PortalHashEnt *hentry;
1221 
1222 	hash_seq_init(&status, PortalHashTable);
1223 
1224 	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1225 	{
1226 		Portal		portal = hentry->portal;
1227 
1228 		if (portal->status == PORTAL_READY)
1229 			return false;
1230 	}
1231 
1232 	return true;
1233 }
1234 
1235 /*
1236  * Hold all pinned portals.
1237  *
1238  * When initiating a COMMIT or ROLLBACK inside a procedure, this must be
1239  * called to protect internally-generated cursors from being dropped during
1240  * the transaction shutdown.  Currently, SPI calls this automatically; PLs
1241  * that initiate COMMIT or ROLLBACK some other way are on the hook to do it
1242  * themselves.  (Note that we couldn't do this in, say, AtAbort_Portals
1243  * because we need to run user-defined code while persisting a portal.
1244  * It's too late to do that once transaction abort has started.)
1245  *
1246  * We protect such portals by converting them to held cursors.  We mark them
1247  * as "auto-held" so that exception exit knows to clean them up.  (In normal,
1248  * non-exception code paths, the PL needs to clean such portals itself, since
1249  * transaction end won't do it anymore; but that should be normal practice
1250  * anyway.)
1251  */
1252 void
HoldPinnedPortals(void)1253 HoldPinnedPortals(void)
1254 {
1255 	HASH_SEQ_STATUS status;
1256 	PortalHashEnt *hentry;
1257 
1258 	hash_seq_init(&status, PortalHashTable);
1259 
1260 	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1261 	{
1262 		Portal		portal = hentry->portal;
1263 
1264 		if (portal->portalPinned && !portal->autoHeld)
1265 		{
1266 			/*
1267 			 * Doing transaction control, especially abort, inside a cursor
1268 			 * loop that is not read-only, for example using UPDATE ...
1269 			 * RETURNING, has weird semantics issues.  Also, this
1270 			 * implementation wouldn't work, because such portals cannot be
1271 			 * held.  (The core grammar enforces that only SELECT statements
1272 			 * can drive a cursor, but for example PL/pgSQL does not restrict
1273 			 * it.)
1274 			 */
1275 			if (portal->strategy != PORTAL_ONE_SELECT)
1276 				ereport(ERROR,
1277 						(errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
1278 						 errmsg("cannot perform transaction commands inside a cursor loop that is not read-only")));
1279 
1280 			/* Verify it's in a suitable state to be held */
1281 			if (portal->status != PORTAL_READY)
1282 				elog(ERROR, "pinned portal is not ready to be auto-held");
1283 
1284 			HoldPortal(portal);
1285 			portal->autoHeld = true;
1286 		}
1287 	}
1288 }
1289 
1290 /*
1291  * Drop the outer active snapshots for all portals, so that no snapshots
1292  * remain active.
1293  *
1294  * Like HoldPinnedPortals, this must be called when initiating a COMMIT or
1295  * ROLLBACK inside a procedure.  This has to be separate from that since it
1296  * should not be run until we're done with steps that are likely to fail.
1297  *
1298  * It's tempting to fold this into PreCommit_Portals, but to do so, we'd
1299  * need to clean up snapshot management in VACUUM and perhaps other places.
1300  */
1301 void
ForgetPortalSnapshots(void)1302 ForgetPortalSnapshots(void)
1303 {
1304 	HASH_SEQ_STATUS status;
1305 	PortalHashEnt *hentry;
1306 	int			numPortalSnaps = 0;
1307 	int			numActiveSnaps = 0;
1308 
1309 	/* First, scan PortalHashTable and clear portalSnapshot fields */
1310 	hash_seq_init(&status, PortalHashTable);
1311 
1312 	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1313 	{
1314 		Portal		portal = hentry->portal;
1315 
1316 		if (portal->portalSnapshot != NULL)
1317 		{
1318 			portal->portalSnapshot = NULL;
1319 			numPortalSnaps++;
1320 		}
1321 		/* portal->holdSnapshot will be cleaned up in PreCommit_Portals */
1322 	}
1323 
1324 	/*
1325 	 * Now, pop all the active snapshots, which should be just those that were
1326 	 * portal snapshots.  Ideally we'd drive this directly off the portal
1327 	 * scan, but there's no good way to visit the portals in the correct
1328 	 * order.  So just cross-check after the fact.
1329 	 */
1330 	while (ActiveSnapshotSet())
1331 	{
1332 		PopActiveSnapshot();
1333 		numActiveSnaps++;
1334 	}
1335 
1336 	if (numPortalSnaps != numActiveSnaps)
1337 		elog(ERROR, "portal snapshots (%d) did not account for all active snapshots (%d)",
1338 			 numPortalSnaps, numActiveSnaps);
1339 }
1340