1 /*-------------------------------------------------------------------------
2 *
3 * spi.c
4 * Server Programming Interface
5 *
6 * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/executor/spi.c
12 *
13 *-------------------------------------------------------------------------
14 */
15 #include "postgres.h"
16
17 #include "access/htup_details.h"
18 #include "access/printtup.h"
19 #include "access/sysattr.h"
20 #include "access/xact.h"
21 #include "catalog/heap.h"
22 #include "catalog/pg_type.h"
23 #include "commands/trigger.h"
24 #include "executor/executor.h"
25 #include "executor/spi_priv.h"
26 #include "miscadmin.h"
27 #include "tcop/pquery.h"
28 #include "tcop/utility.h"
29 #include "utils/builtins.h"
30 #include "utils/datum.h"
31 #include "utils/lsyscache.h"
32 #include "utils/memutils.h"
33 #include "utils/rel.h"
34 #include "utils/snapmgr.h"
35 #include "utils/syscache.h"
36 #include "utils/typcache.h"
37
38
39 /*
40 * These global variables are part of the API for various SPI functions
41 * (a horrible API choice, but it's too late now). To reduce the risk of
42 * interference between different SPI callers, we save and restore them
43 * when entering/exiting a SPI nesting level.
44 */
45 uint64 SPI_processed = 0;
46 SPITupleTable *SPI_tuptable = NULL;
47 int SPI_result = 0;
48
49 static _SPI_connection *_SPI_stack = NULL;
50 static _SPI_connection *_SPI_current = NULL;
51 static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */
52 static int _SPI_connected = -1; /* current stack index */
53
54 static Portal SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
55 ParamListInfo paramLI, bool read_only);
56
57 static void _SPI_prepare_plan(const char *src, SPIPlanPtr plan);
58
59 static void _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan);
60
61 static int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
62 Snapshot snapshot, Snapshot crosscheck_snapshot,
63 bool read_only, bool fire_triggers, uint64 tcount);
64
65 static ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes,
66 Datum *Values, const char *Nulls);
67
68 static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount);
69
70 static void _SPI_error_callback(void *arg);
71
72 static void _SPI_cursor_operation(Portal portal,
73 FetchDirection direction, long count,
74 DestReceiver *dest);
75
76 static SPIPlanPtr _SPI_make_plan_non_temp(SPIPlanPtr plan);
77 static SPIPlanPtr _SPI_save_plan(SPIPlanPtr plan);
78
79 static int _SPI_begin_call(bool use_exec);
80 static int _SPI_end_call(bool use_exec);
81 static MemoryContext _SPI_execmem(void);
82 static MemoryContext _SPI_procmem(void);
83 static bool _SPI_checktuples(void);
84
85
86 /* =================== interface functions =================== */
87
88 int
SPI_connect(void)89 SPI_connect(void)
90 {
91 return SPI_connect_ext(0);
92 }
93
94 int
SPI_connect_ext(int options)95 SPI_connect_ext(int options)
96 {
97 int newdepth;
98
99 /* Enlarge stack if necessary */
100 if (_SPI_stack == NULL)
101 {
102 if (_SPI_connected != -1 || _SPI_stack_depth != 0)
103 elog(ERROR, "SPI stack corrupted");
104 newdepth = 16;
105 _SPI_stack = (_SPI_connection *)
106 MemoryContextAlloc(TopMemoryContext,
107 newdepth * sizeof(_SPI_connection));
108 _SPI_stack_depth = newdepth;
109 }
110 else
111 {
112 if (_SPI_stack_depth <= 0 || _SPI_stack_depth <= _SPI_connected)
113 elog(ERROR, "SPI stack corrupted");
114 if (_SPI_stack_depth == _SPI_connected + 1)
115 {
116 newdepth = _SPI_stack_depth * 2;
117 _SPI_stack = (_SPI_connection *)
118 repalloc(_SPI_stack,
119 newdepth * sizeof(_SPI_connection));
120 _SPI_stack_depth = newdepth;
121 }
122 }
123
124 /* Enter new stack level */
125 _SPI_connected++;
126 Assert(_SPI_connected >= 0 && _SPI_connected < _SPI_stack_depth);
127
128 _SPI_current = &(_SPI_stack[_SPI_connected]);
129 _SPI_current->processed = 0;
130 _SPI_current->tuptable = NULL;
131 _SPI_current->execSubid = InvalidSubTransactionId;
132 slist_init(&_SPI_current->tuptables);
133 _SPI_current->procCxt = NULL; /* in case we fail to create 'em */
134 _SPI_current->execCxt = NULL;
135 _SPI_current->connectSubid = GetCurrentSubTransactionId();
136 _SPI_current->queryEnv = NULL;
137 _SPI_current->atomic = (options & SPI_OPT_NONATOMIC ? false : true);
138 _SPI_current->internal_xact = false;
139 _SPI_current->outer_processed = SPI_processed;
140 _SPI_current->outer_tuptable = SPI_tuptable;
141 _SPI_current->outer_result = SPI_result;
142
143 /*
144 * Create memory contexts for this procedure
145 *
146 * In atomic contexts (the normal case), we use TopTransactionContext,
147 * otherwise PortalContext, so that it lives across transaction
148 * boundaries.
149 *
150 * XXX It could be better to use PortalContext as the parent context in
151 * all cases, but we may not be inside a portal (consider deferred-trigger
152 * execution). Perhaps CurTransactionContext could be an option? For now
153 * it doesn't matter because we clean up explicitly in AtEOSubXact_SPI().
154 */
155 _SPI_current->procCxt = AllocSetContextCreate(_SPI_current->atomic ? TopTransactionContext : PortalContext,
156 "SPI Proc",
157 ALLOCSET_DEFAULT_SIZES);
158 _SPI_current->execCxt = AllocSetContextCreate(_SPI_current->atomic ? TopTransactionContext : _SPI_current->procCxt,
159 "SPI Exec",
160 ALLOCSET_DEFAULT_SIZES);
161 /* ... and switch to procedure's context */
162 _SPI_current->savedcxt = MemoryContextSwitchTo(_SPI_current->procCxt);
163
164 /*
165 * Reset API global variables so that current caller cannot accidentally
166 * depend on state of an outer caller.
167 */
168 SPI_processed = 0;
169 SPI_tuptable = NULL;
170 SPI_result = 0;
171
172 return SPI_OK_CONNECT;
173 }
174
175 int
SPI_finish(void)176 SPI_finish(void)
177 {
178 int res;
179
180 res = _SPI_begin_call(false); /* just check we're connected */
181 if (res < 0)
182 return res;
183
184 /* Restore memory context as it was before procedure call */
185 MemoryContextSwitchTo(_SPI_current->savedcxt);
186
187 /* Release memory used in procedure call (including tuptables) */
188 MemoryContextDelete(_SPI_current->execCxt);
189 _SPI_current->execCxt = NULL;
190 MemoryContextDelete(_SPI_current->procCxt);
191 _SPI_current->procCxt = NULL;
192
193 /*
194 * Restore outer API variables, especially SPI_tuptable which is probably
195 * pointing at a just-deleted tuptable
196 */
197 SPI_processed = _SPI_current->outer_processed;
198 SPI_tuptable = _SPI_current->outer_tuptable;
199 SPI_result = _SPI_current->outer_result;
200
201 /* Exit stack level */
202 _SPI_connected--;
203 if (_SPI_connected < 0)
204 _SPI_current = NULL;
205 else
206 _SPI_current = &(_SPI_stack[_SPI_connected]);
207
208 return SPI_OK_FINISH;
209 }
210
211 void
SPI_start_transaction(void)212 SPI_start_transaction(void)
213 {
214 MemoryContext oldcontext = CurrentMemoryContext;
215
216 StartTransactionCommand();
217 MemoryContextSwitchTo(oldcontext);
218 }
219
220 static void
_SPI_commit(bool chain)221 _SPI_commit(bool chain)
222 {
223 MemoryContext oldcontext = CurrentMemoryContext;
224
225 if (_SPI_current->atomic)
226 ereport(ERROR,
227 (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
228 errmsg("invalid transaction termination")));
229
230 /*
231 * This restriction is required by PLs implemented on top of SPI. They
232 * use subtransactions to establish exception blocks that are supposed to
233 * be rolled back together if there is an error. Terminating the
234 * top-level transaction in such a block violates that idea. A future PL
235 * implementation might have different ideas about this, in which case
236 * this restriction would have to be refined or the check possibly be
237 * moved out of SPI into the PLs.
238 */
239 if (IsSubTransaction())
240 ereport(ERROR,
241 (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
242 errmsg("cannot commit while a subtransaction is active")));
243
244 /*
245 * Hold any pinned portals that any PLs might be using. We have to do
246 * this before changing transaction state, since this will run
247 * user-defined code that might throw an error.
248 */
249 HoldPinnedPortals();
250
251 /* Start the actual commit */
252 _SPI_current->internal_xact = true;
253
254 /* Release snapshots associated with portals */
255 ForgetPortalSnapshots();
256
257 if (chain)
258 SaveTransactionCharacteristics();
259
260 CommitTransactionCommand();
261
262 if (chain)
263 {
264 StartTransactionCommand();
265 RestoreTransactionCharacteristics();
266 }
267
268 MemoryContextSwitchTo(oldcontext);
269
270 _SPI_current->internal_xact = false;
271 }
272
273 void
SPI_commit(void)274 SPI_commit(void)
275 {
276 _SPI_commit(false);
277 }
278
279 void
SPI_commit_and_chain(void)280 SPI_commit_and_chain(void)
281 {
282 _SPI_commit(true);
283 }
284
285 static void
_SPI_rollback(bool chain)286 _SPI_rollback(bool chain)
287 {
288 MemoryContext oldcontext = CurrentMemoryContext;
289
290 if (_SPI_current->atomic)
291 ereport(ERROR,
292 (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
293 errmsg("invalid transaction termination")));
294
295 /* see under SPI_commit() */
296 if (IsSubTransaction())
297 ereport(ERROR,
298 (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
299 errmsg("cannot roll back while a subtransaction is active")));
300
301 /*
302 * Hold any pinned portals that any PLs might be using. We have to do
303 * this before changing transaction state, since this will run
304 * user-defined code that might throw an error, and in any case couldn't
305 * be run in an already-aborted transaction.
306 */
307 HoldPinnedPortals();
308
309 /* Start the actual rollback */
310 _SPI_current->internal_xact = true;
311
312 /* Release snapshots associated with portals */
313 ForgetPortalSnapshots();
314
315 if (chain)
316 SaveTransactionCharacteristics();
317
318 AbortCurrentTransaction();
319
320 if (chain)
321 {
322 StartTransactionCommand();
323 RestoreTransactionCharacteristics();
324 }
325
326 MemoryContextSwitchTo(oldcontext);
327
328 _SPI_current->internal_xact = false;
329 }
330
331 void
SPI_rollback(void)332 SPI_rollback(void)
333 {
334 _SPI_rollback(false);
335 }
336
337 void
SPI_rollback_and_chain(void)338 SPI_rollback_and_chain(void)
339 {
340 _SPI_rollback(true);
341 }
342
343 /*
344 * Clean up SPI state. Called on transaction end (of non-SPI-internal
345 * transactions) and when returning to the main loop on error.
346 */
347 void
SPICleanup(void)348 SPICleanup(void)
349 {
350 _SPI_current = NULL;
351 _SPI_connected = -1;
352 /* Reset API global variables, too */
353 SPI_processed = 0;
354 SPI_tuptable = NULL;
355 SPI_result = 0;
356 }
357
358 /*
359 * Clean up SPI state at transaction commit or abort.
360 */
361 void
AtEOXact_SPI(bool isCommit)362 AtEOXact_SPI(bool isCommit)
363 {
364 /* Do nothing if the transaction end was initiated by SPI. */
365 if (_SPI_current && _SPI_current->internal_xact)
366 return;
367
368 if (isCommit && _SPI_connected != -1)
369 ereport(WARNING,
370 (errcode(ERRCODE_WARNING),
371 errmsg("transaction left non-empty SPI stack"),
372 errhint("Check for missing \"SPI_finish\" calls.")));
373
374 SPICleanup();
375 }
376
377 /*
378 * Clean up SPI state at subtransaction commit or abort.
379 *
380 * During commit, there shouldn't be any unclosed entries remaining from
381 * the current subtransaction; we emit a warning if any are found.
382 */
383 void
AtEOSubXact_SPI(bool isCommit,SubTransactionId mySubid)384 AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid)
385 {
386 bool found = false;
387
388 while (_SPI_connected >= 0)
389 {
390 _SPI_connection *connection = &(_SPI_stack[_SPI_connected]);
391
392 if (connection->connectSubid != mySubid)
393 break; /* couldn't be any underneath it either */
394
395 if (connection->internal_xact)
396 break;
397
398 found = true;
399
400 /*
401 * Release procedure memory explicitly (see note in SPI_connect)
402 */
403 if (connection->execCxt)
404 {
405 MemoryContextDelete(connection->execCxt);
406 connection->execCxt = NULL;
407 }
408 if (connection->procCxt)
409 {
410 MemoryContextDelete(connection->procCxt);
411 connection->procCxt = NULL;
412 }
413
414 /*
415 * Restore outer global variables and pop the stack entry. Unlike
416 * SPI_finish(), we don't risk switching to memory contexts that might
417 * be already gone.
418 */
419 SPI_processed = connection->outer_processed;
420 SPI_tuptable = connection->outer_tuptable;
421 SPI_result = connection->outer_result;
422
423 _SPI_connected--;
424 if (_SPI_connected < 0)
425 _SPI_current = NULL;
426 else
427 _SPI_current = &(_SPI_stack[_SPI_connected]);
428 }
429
430 if (found && isCommit)
431 ereport(WARNING,
432 (errcode(ERRCODE_WARNING),
433 errmsg("subtransaction left non-empty SPI stack"),
434 errhint("Check for missing \"SPI_finish\" calls.")));
435
436 /*
437 * If we are aborting a subtransaction and there is an open SPI context
438 * surrounding the subxact, clean up to prevent memory leakage.
439 */
440 if (_SPI_current && !isCommit)
441 {
442 slist_mutable_iter siter;
443
444 /*
445 * Throw away executor state if current executor operation was started
446 * within current subxact (essentially, force a _SPI_end_call(true)).
447 */
448 if (_SPI_current->execSubid >= mySubid)
449 {
450 _SPI_current->execSubid = InvalidSubTransactionId;
451 MemoryContextResetAndDeleteChildren(_SPI_current->execCxt);
452 }
453
454 /* throw away any tuple tables created within current subxact */
455 slist_foreach_modify(siter, &_SPI_current->tuptables)
456 {
457 SPITupleTable *tuptable;
458
459 tuptable = slist_container(SPITupleTable, next, siter.cur);
460 if (tuptable->subid >= mySubid)
461 {
462 /*
463 * If we used SPI_freetuptable() here, its internal search of
464 * the tuptables list would make this operation O(N^2).
465 * Instead, just free the tuptable manually. This should
466 * match what SPI_freetuptable() does.
467 */
468 slist_delete_current(&siter);
469 if (tuptable == _SPI_current->tuptable)
470 _SPI_current->tuptable = NULL;
471 if (tuptable == SPI_tuptable)
472 SPI_tuptable = NULL;
473 MemoryContextDelete(tuptable->tuptabcxt);
474 }
475 }
476 }
477 }
478
479 /*
480 * Are we executing inside a procedure (that is, a nonatomic SPI context)?
481 */
482 bool
SPI_inside_nonatomic_context(void)483 SPI_inside_nonatomic_context(void)
484 {
485 if (_SPI_current == NULL)
486 return false; /* not in any SPI context at all */
487 if (_SPI_current->atomic)
488 return false; /* it's atomic (ie function not procedure) */
489 return true;
490 }
491
492
493 /* Parse, plan, and execute a query string */
494 int
SPI_execute(const char * src,bool read_only,long tcount)495 SPI_execute(const char *src, bool read_only, long tcount)
496 {
497 _SPI_plan plan;
498 int res;
499
500 if (src == NULL || tcount < 0)
501 return SPI_ERROR_ARGUMENT;
502
503 res = _SPI_begin_call(true);
504 if (res < 0)
505 return res;
506
507 memset(&plan, 0, sizeof(_SPI_plan));
508 plan.magic = _SPI_PLAN_MAGIC;
509 plan.cursor_options = CURSOR_OPT_PARALLEL_OK;
510
511 _SPI_prepare_oneshot_plan(src, &plan);
512
513 res = _SPI_execute_plan(&plan, NULL,
514 InvalidSnapshot, InvalidSnapshot,
515 read_only, true, tcount);
516
517 _SPI_end_call(true);
518 return res;
519 }
520
521 /* Obsolete version of SPI_execute */
522 int
SPI_exec(const char * src,long tcount)523 SPI_exec(const char *src, long tcount)
524 {
525 return SPI_execute(src, false, tcount);
526 }
527
528 /* Execute a previously prepared plan */
529 int
SPI_execute_plan(SPIPlanPtr plan,Datum * Values,const char * Nulls,bool read_only,long tcount)530 SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
531 bool read_only, long tcount)
532 {
533 int res;
534
535 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0)
536 return SPI_ERROR_ARGUMENT;
537
538 if (plan->nargs > 0 && Values == NULL)
539 return SPI_ERROR_PARAM;
540
541 res = _SPI_begin_call(true);
542 if (res < 0)
543 return res;
544
545 res = _SPI_execute_plan(plan,
546 _SPI_convert_params(plan->nargs, plan->argtypes,
547 Values, Nulls),
548 InvalidSnapshot, InvalidSnapshot,
549 read_only, true, tcount);
550
551 _SPI_end_call(true);
552 return res;
553 }
554
555 /* Obsolete version of SPI_execute_plan */
556 int
SPI_execp(SPIPlanPtr plan,Datum * Values,const char * Nulls,long tcount)557 SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, long tcount)
558 {
559 return SPI_execute_plan(plan, Values, Nulls, false, tcount);
560 }
561
562 /* Execute a previously prepared plan */
563 int
SPI_execute_plan_with_paramlist(SPIPlanPtr plan,ParamListInfo params,bool read_only,long tcount)564 SPI_execute_plan_with_paramlist(SPIPlanPtr plan, ParamListInfo params,
565 bool read_only, long tcount)
566 {
567 int res;
568
569 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0)
570 return SPI_ERROR_ARGUMENT;
571
572 res = _SPI_begin_call(true);
573 if (res < 0)
574 return res;
575
576 res = _SPI_execute_plan(plan, params,
577 InvalidSnapshot, InvalidSnapshot,
578 read_only, true, tcount);
579
580 _SPI_end_call(true);
581 return res;
582 }
583
584 /*
585 * SPI_execute_snapshot -- identical to SPI_execute_plan, except that we allow
586 * the caller to specify exactly which snapshots to use, which will be
587 * registered here. Also, the caller may specify that AFTER triggers should be
588 * queued as part of the outer query rather than being fired immediately at the
589 * end of the command.
590 *
591 * This is currently not documented in spi.sgml because it is only intended
592 * for use by RI triggers.
593 *
594 * Passing snapshot == InvalidSnapshot will select the normal behavior of
595 * fetching a new snapshot for each query.
596 */
597 int
SPI_execute_snapshot(SPIPlanPtr plan,Datum * Values,const char * Nulls,Snapshot snapshot,Snapshot crosscheck_snapshot,bool read_only,bool fire_triggers,long tcount)598 SPI_execute_snapshot(SPIPlanPtr plan,
599 Datum *Values, const char *Nulls,
600 Snapshot snapshot, Snapshot crosscheck_snapshot,
601 bool read_only, bool fire_triggers, long tcount)
602 {
603 int res;
604
605 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0)
606 return SPI_ERROR_ARGUMENT;
607
608 if (plan->nargs > 0 && Values == NULL)
609 return SPI_ERROR_PARAM;
610
611 res = _SPI_begin_call(true);
612 if (res < 0)
613 return res;
614
615 res = _SPI_execute_plan(plan,
616 _SPI_convert_params(plan->nargs, plan->argtypes,
617 Values, Nulls),
618 snapshot, crosscheck_snapshot,
619 read_only, fire_triggers, tcount);
620
621 _SPI_end_call(true);
622 return res;
623 }
624
625 /*
626 * SPI_execute_with_args -- plan and execute a query with supplied arguments
627 *
628 * This is functionally equivalent to SPI_prepare followed by
629 * SPI_execute_plan.
630 */
631 int
SPI_execute_with_args(const char * src,int nargs,Oid * argtypes,Datum * Values,const char * Nulls,bool read_only,long tcount)632 SPI_execute_with_args(const char *src,
633 int nargs, Oid *argtypes,
634 Datum *Values, const char *Nulls,
635 bool read_only, long tcount)
636 {
637 int res;
638 _SPI_plan plan;
639 ParamListInfo paramLI;
640
641 if (src == NULL || nargs < 0 || tcount < 0)
642 return SPI_ERROR_ARGUMENT;
643
644 if (nargs > 0 && (argtypes == NULL || Values == NULL))
645 return SPI_ERROR_PARAM;
646
647 res = _SPI_begin_call(true);
648 if (res < 0)
649 return res;
650
651 memset(&plan, 0, sizeof(_SPI_plan));
652 plan.magic = _SPI_PLAN_MAGIC;
653 plan.cursor_options = CURSOR_OPT_PARALLEL_OK;
654 plan.nargs = nargs;
655 plan.argtypes = argtypes;
656 plan.parserSetup = NULL;
657 plan.parserSetupArg = NULL;
658
659 paramLI = _SPI_convert_params(nargs, argtypes,
660 Values, Nulls);
661
662 _SPI_prepare_oneshot_plan(src, &plan);
663
664 res = _SPI_execute_plan(&plan, paramLI,
665 InvalidSnapshot, InvalidSnapshot,
666 read_only, true, tcount);
667
668 _SPI_end_call(true);
669 return res;
670 }
671
672 SPIPlanPtr
SPI_prepare(const char * src,int nargs,Oid * argtypes)673 SPI_prepare(const char *src, int nargs, Oid *argtypes)
674 {
675 return SPI_prepare_cursor(src, nargs, argtypes, 0);
676 }
677
678 SPIPlanPtr
SPI_prepare_cursor(const char * src,int nargs,Oid * argtypes,int cursorOptions)679 SPI_prepare_cursor(const char *src, int nargs, Oid *argtypes,
680 int cursorOptions)
681 {
682 _SPI_plan plan;
683 SPIPlanPtr result;
684
685 if (src == NULL || nargs < 0 || (nargs > 0 && argtypes == NULL))
686 {
687 SPI_result = SPI_ERROR_ARGUMENT;
688 return NULL;
689 }
690
691 SPI_result = _SPI_begin_call(true);
692 if (SPI_result < 0)
693 return NULL;
694
695 memset(&plan, 0, sizeof(_SPI_plan));
696 plan.magic = _SPI_PLAN_MAGIC;
697 plan.cursor_options = cursorOptions;
698 plan.nargs = nargs;
699 plan.argtypes = argtypes;
700 plan.parserSetup = NULL;
701 plan.parserSetupArg = NULL;
702
703 _SPI_prepare_plan(src, &plan);
704
705 /* copy plan to procedure context */
706 result = _SPI_make_plan_non_temp(&plan);
707
708 _SPI_end_call(true);
709
710 return result;
711 }
712
713 SPIPlanPtr
SPI_prepare_params(const char * src,ParserSetupHook parserSetup,void * parserSetupArg,int cursorOptions)714 SPI_prepare_params(const char *src,
715 ParserSetupHook parserSetup,
716 void *parserSetupArg,
717 int cursorOptions)
718 {
719 _SPI_plan plan;
720 SPIPlanPtr result;
721
722 if (src == NULL)
723 {
724 SPI_result = SPI_ERROR_ARGUMENT;
725 return NULL;
726 }
727
728 SPI_result = _SPI_begin_call(true);
729 if (SPI_result < 0)
730 return NULL;
731
732 memset(&plan, 0, sizeof(_SPI_plan));
733 plan.magic = _SPI_PLAN_MAGIC;
734 plan.cursor_options = cursorOptions;
735 plan.nargs = 0;
736 plan.argtypes = NULL;
737 plan.parserSetup = parserSetup;
738 plan.parserSetupArg = parserSetupArg;
739
740 _SPI_prepare_plan(src, &plan);
741
742 /* copy plan to procedure context */
743 result = _SPI_make_plan_non_temp(&plan);
744
745 _SPI_end_call(true);
746
747 return result;
748 }
749
750 int
SPI_keepplan(SPIPlanPtr plan)751 SPI_keepplan(SPIPlanPtr plan)
752 {
753 ListCell *lc;
754
755 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC ||
756 plan->saved || plan->oneshot)
757 return SPI_ERROR_ARGUMENT;
758
759 /*
760 * Mark it saved, reparent it under CacheMemoryContext, and mark all the
761 * component CachedPlanSources as saved. This sequence cannot fail
762 * partway through, so there's no risk of long-term memory leakage.
763 */
764 plan->saved = true;
765 MemoryContextSetParent(plan->plancxt, CacheMemoryContext);
766
767 foreach(lc, plan->plancache_list)
768 {
769 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
770
771 SaveCachedPlan(plansource);
772 }
773
774 return 0;
775 }
776
777 SPIPlanPtr
SPI_saveplan(SPIPlanPtr plan)778 SPI_saveplan(SPIPlanPtr plan)
779 {
780 SPIPlanPtr newplan;
781
782 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
783 {
784 SPI_result = SPI_ERROR_ARGUMENT;
785 return NULL;
786 }
787
788 SPI_result = _SPI_begin_call(false); /* don't change context */
789 if (SPI_result < 0)
790 return NULL;
791
792 newplan = _SPI_save_plan(plan);
793
794 SPI_result = _SPI_end_call(false);
795
796 return newplan;
797 }
798
799 int
SPI_freeplan(SPIPlanPtr plan)800 SPI_freeplan(SPIPlanPtr plan)
801 {
802 ListCell *lc;
803
804 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
805 return SPI_ERROR_ARGUMENT;
806
807 /* Release the plancache entries */
808 foreach(lc, plan->plancache_list)
809 {
810 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
811
812 DropCachedPlan(plansource);
813 }
814
815 /* Now get rid of the _SPI_plan and subsidiary data in its plancxt */
816 MemoryContextDelete(plan->plancxt);
817
818 return 0;
819 }
820
821 HeapTuple
SPI_copytuple(HeapTuple tuple)822 SPI_copytuple(HeapTuple tuple)
823 {
824 MemoryContext oldcxt;
825 HeapTuple ctuple;
826
827 if (tuple == NULL)
828 {
829 SPI_result = SPI_ERROR_ARGUMENT;
830 return NULL;
831 }
832
833 if (_SPI_current == NULL)
834 {
835 SPI_result = SPI_ERROR_UNCONNECTED;
836 return NULL;
837 }
838
839 oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
840
841 ctuple = heap_copytuple(tuple);
842
843 MemoryContextSwitchTo(oldcxt);
844
845 return ctuple;
846 }
847
848 HeapTupleHeader
SPI_returntuple(HeapTuple tuple,TupleDesc tupdesc)849 SPI_returntuple(HeapTuple tuple, TupleDesc tupdesc)
850 {
851 MemoryContext oldcxt;
852 HeapTupleHeader dtup;
853
854 if (tuple == NULL || tupdesc == NULL)
855 {
856 SPI_result = SPI_ERROR_ARGUMENT;
857 return NULL;
858 }
859
860 if (_SPI_current == NULL)
861 {
862 SPI_result = SPI_ERROR_UNCONNECTED;
863 return NULL;
864 }
865
866 /* For RECORD results, make sure a typmod has been assigned */
867 if (tupdesc->tdtypeid == RECORDOID &&
868 tupdesc->tdtypmod < 0)
869 assign_record_type_typmod(tupdesc);
870
871 oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
872
873 dtup = DatumGetHeapTupleHeader(heap_copy_tuple_as_datum(tuple, tupdesc));
874
875 MemoryContextSwitchTo(oldcxt);
876
877 return dtup;
878 }
879
880 HeapTuple
SPI_modifytuple(Relation rel,HeapTuple tuple,int natts,int * attnum,Datum * Values,const char * Nulls)881 SPI_modifytuple(Relation rel, HeapTuple tuple, int natts, int *attnum,
882 Datum *Values, const char *Nulls)
883 {
884 MemoryContext oldcxt;
885 HeapTuple mtuple;
886 int numberOfAttributes;
887 Datum *v;
888 bool *n;
889 int i;
890
891 if (rel == NULL || tuple == NULL || natts < 0 || attnum == NULL || Values == NULL)
892 {
893 SPI_result = SPI_ERROR_ARGUMENT;
894 return NULL;
895 }
896
897 if (_SPI_current == NULL)
898 {
899 SPI_result = SPI_ERROR_UNCONNECTED;
900 return NULL;
901 }
902
903 oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
904
905 SPI_result = 0;
906
907 numberOfAttributes = rel->rd_att->natts;
908 v = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
909 n = (bool *) palloc(numberOfAttributes * sizeof(bool));
910
911 /* fetch old values and nulls */
912 heap_deform_tuple(tuple, rel->rd_att, v, n);
913
914 /* replace values and nulls */
915 for (i = 0; i < natts; i++)
916 {
917 if (attnum[i] <= 0 || attnum[i] > numberOfAttributes)
918 break;
919 v[attnum[i] - 1] = Values[i];
920 n[attnum[i] - 1] = (Nulls && Nulls[i] == 'n') ? true : false;
921 }
922
923 if (i == natts) /* no errors in *attnum */
924 {
925 mtuple = heap_form_tuple(rel->rd_att, v, n);
926
927 /*
928 * copy the identification info of the old tuple: t_ctid, t_self, and
929 * OID (if any)
930 */
931 mtuple->t_data->t_ctid = tuple->t_data->t_ctid;
932 mtuple->t_self = tuple->t_self;
933 mtuple->t_tableOid = tuple->t_tableOid;
934 }
935 else
936 {
937 mtuple = NULL;
938 SPI_result = SPI_ERROR_NOATTRIBUTE;
939 }
940
941 pfree(v);
942 pfree(n);
943
944 MemoryContextSwitchTo(oldcxt);
945
946 return mtuple;
947 }
948
949 int
SPI_fnumber(TupleDesc tupdesc,const char * fname)950 SPI_fnumber(TupleDesc tupdesc, const char *fname)
951 {
952 int res;
953 const FormData_pg_attribute *sysatt;
954
955 for (res = 0; res < tupdesc->natts; res++)
956 {
957 Form_pg_attribute attr = TupleDescAttr(tupdesc, res);
958
959 if (namestrcmp(&attr->attname, fname) == 0 &&
960 !attr->attisdropped)
961 return res + 1;
962 }
963
964 sysatt = SystemAttributeByName(fname);
965 if (sysatt != NULL)
966 return sysatt->attnum;
967
968 /* SPI_ERROR_NOATTRIBUTE is different from all sys column numbers */
969 return SPI_ERROR_NOATTRIBUTE;
970 }
971
972 char *
SPI_fname(TupleDesc tupdesc,int fnumber)973 SPI_fname(TupleDesc tupdesc, int fnumber)
974 {
975 const FormData_pg_attribute *att;
976
977 SPI_result = 0;
978
979 if (fnumber > tupdesc->natts || fnumber == 0 ||
980 fnumber <= FirstLowInvalidHeapAttributeNumber)
981 {
982 SPI_result = SPI_ERROR_NOATTRIBUTE;
983 return NULL;
984 }
985
986 if (fnumber > 0)
987 att = TupleDescAttr(tupdesc, fnumber - 1);
988 else
989 att = SystemAttributeDefinition(fnumber);
990
991 return pstrdup(NameStr(att->attname));
992 }
993
994 char *
SPI_getvalue(HeapTuple tuple,TupleDesc tupdesc,int fnumber)995 SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber)
996 {
997 Datum val;
998 bool isnull;
999 Oid typoid,
1000 foutoid;
1001 bool typisvarlena;
1002
1003 SPI_result = 0;
1004
1005 if (fnumber > tupdesc->natts || fnumber == 0 ||
1006 fnumber <= FirstLowInvalidHeapAttributeNumber)
1007 {
1008 SPI_result = SPI_ERROR_NOATTRIBUTE;
1009 return NULL;
1010 }
1011
1012 val = heap_getattr(tuple, fnumber, tupdesc, &isnull);
1013 if (isnull)
1014 return NULL;
1015
1016 if (fnumber > 0)
1017 typoid = TupleDescAttr(tupdesc, fnumber - 1)->atttypid;
1018 else
1019 typoid = (SystemAttributeDefinition(fnumber))->atttypid;
1020
1021 getTypeOutputInfo(typoid, &foutoid, &typisvarlena);
1022
1023 return OidOutputFunctionCall(foutoid, val);
1024 }
1025
1026 Datum
SPI_getbinval(HeapTuple tuple,TupleDesc tupdesc,int fnumber,bool * isnull)1027 SPI_getbinval(HeapTuple tuple, TupleDesc tupdesc, int fnumber, bool *isnull)
1028 {
1029 SPI_result = 0;
1030
1031 if (fnumber > tupdesc->natts || fnumber == 0 ||
1032 fnumber <= FirstLowInvalidHeapAttributeNumber)
1033 {
1034 SPI_result = SPI_ERROR_NOATTRIBUTE;
1035 *isnull = true;
1036 return (Datum) NULL;
1037 }
1038
1039 return heap_getattr(tuple, fnumber, tupdesc, isnull);
1040 }
1041
1042 char *
SPI_gettype(TupleDesc tupdesc,int fnumber)1043 SPI_gettype(TupleDesc tupdesc, int fnumber)
1044 {
1045 Oid typoid;
1046 HeapTuple typeTuple;
1047 char *result;
1048
1049 SPI_result = 0;
1050
1051 if (fnumber > tupdesc->natts || fnumber == 0 ||
1052 fnumber <= FirstLowInvalidHeapAttributeNumber)
1053 {
1054 SPI_result = SPI_ERROR_NOATTRIBUTE;
1055 return NULL;
1056 }
1057
1058 if (fnumber > 0)
1059 typoid = TupleDescAttr(tupdesc, fnumber - 1)->atttypid;
1060 else
1061 typoid = (SystemAttributeDefinition(fnumber))->atttypid;
1062
1063 typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typoid));
1064
1065 if (!HeapTupleIsValid(typeTuple))
1066 {
1067 SPI_result = SPI_ERROR_TYPUNKNOWN;
1068 return NULL;
1069 }
1070
1071 result = pstrdup(NameStr(((Form_pg_type) GETSTRUCT(typeTuple))->typname));
1072 ReleaseSysCache(typeTuple);
1073 return result;
1074 }
1075
1076 /*
1077 * Get the data type OID for a column.
1078 *
1079 * There's nothing similar for typmod and typcollation. The rare consumers
1080 * thereof should inspect the TupleDesc directly.
1081 */
1082 Oid
SPI_gettypeid(TupleDesc tupdesc,int fnumber)1083 SPI_gettypeid(TupleDesc tupdesc, int fnumber)
1084 {
1085 SPI_result = 0;
1086
1087 if (fnumber > tupdesc->natts || fnumber == 0 ||
1088 fnumber <= FirstLowInvalidHeapAttributeNumber)
1089 {
1090 SPI_result = SPI_ERROR_NOATTRIBUTE;
1091 return InvalidOid;
1092 }
1093
1094 if (fnumber > 0)
1095 return TupleDescAttr(tupdesc, fnumber - 1)->atttypid;
1096 else
1097 return (SystemAttributeDefinition(fnumber))->atttypid;
1098 }
1099
1100 char *
SPI_getrelname(Relation rel)1101 SPI_getrelname(Relation rel)
1102 {
1103 return pstrdup(RelationGetRelationName(rel));
1104 }
1105
1106 char *
SPI_getnspname(Relation rel)1107 SPI_getnspname(Relation rel)
1108 {
1109 return get_namespace_name(RelationGetNamespace(rel));
1110 }
1111
1112 void *
SPI_palloc(Size size)1113 SPI_palloc(Size size)
1114 {
1115 if (_SPI_current == NULL)
1116 elog(ERROR, "SPI_palloc called while not connected to SPI");
1117
1118 return MemoryContextAlloc(_SPI_current->savedcxt, size);
1119 }
1120
1121 void *
SPI_repalloc(void * pointer,Size size)1122 SPI_repalloc(void *pointer, Size size)
1123 {
1124 /* No longer need to worry which context chunk was in... */
1125 return repalloc(pointer, size);
1126 }
1127
1128 void
SPI_pfree(void * pointer)1129 SPI_pfree(void *pointer)
1130 {
1131 /* No longer need to worry which context chunk was in... */
1132 pfree(pointer);
1133 }
1134
1135 Datum
SPI_datumTransfer(Datum value,bool typByVal,int typLen)1136 SPI_datumTransfer(Datum value, bool typByVal, int typLen)
1137 {
1138 MemoryContext oldcxt;
1139 Datum result;
1140
1141 if (_SPI_current == NULL)
1142 elog(ERROR, "SPI_datumTransfer called while not connected to SPI");
1143
1144 oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
1145
1146 result = datumTransfer(value, typByVal, typLen);
1147
1148 MemoryContextSwitchTo(oldcxt);
1149
1150 return result;
1151 }
1152
1153 void
SPI_freetuple(HeapTuple tuple)1154 SPI_freetuple(HeapTuple tuple)
1155 {
1156 /* No longer need to worry which context tuple was in... */
1157 heap_freetuple(tuple);
1158 }
1159
1160 void
SPI_freetuptable(SPITupleTable * tuptable)1161 SPI_freetuptable(SPITupleTable *tuptable)
1162 {
1163 bool found = false;
1164
1165 /* ignore call if NULL pointer */
1166 if (tuptable == NULL)
1167 return;
1168
1169 /*
1170 * Search only the topmost SPI context for a matching tuple table.
1171 */
1172 if (_SPI_current != NULL)
1173 {
1174 slist_mutable_iter siter;
1175
1176 /* find tuptable in active list, then remove it */
1177 slist_foreach_modify(siter, &_SPI_current->tuptables)
1178 {
1179 SPITupleTable *tt;
1180
1181 tt = slist_container(SPITupleTable, next, siter.cur);
1182 if (tt == tuptable)
1183 {
1184 slist_delete_current(&siter);
1185 found = true;
1186 break;
1187 }
1188 }
1189 }
1190
1191 /*
1192 * Refuse the deletion if we didn't find it in the topmost SPI context.
1193 * This is primarily a guard against double deletion, but might prevent
1194 * other errors as well. Since the worst consequence of not deleting a
1195 * tuptable would be a transient memory leak, this is just a WARNING.
1196 */
1197 if (!found)
1198 {
1199 elog(WARNING, "attempt to delete invalid SPITupleTable %p", tuptable);
1200 return;
1201 }
1202
1203 /* for safety, reset global variables that might point at tuptable */
1204 if (tuptable == _SPI_current->tuptable)
1205 _SPI_current->tuptable = NULL;
1206 if (tuptable == SPI_tuptable)
1207 SPI_tuptable = NULL;
1208
1209 /* release all memory belonging to tuptable */
1210 MemoryContextDelete(tuptable->tuptabcxt);
1211 }
1212
1213
1214 /*
1215 * SPI_cursor_open()
1216 *
1217 * Open a prepared SPI plan as a portal
1218 */
1219 Portal
SPI_cursor_open(const char * name,SPIPlanPtr plan,Datum * Values,const char * Nulls,bool read_only)1220 SPI_cursor_open(const char *name, SPIPlanPtr plan,
1221 Datum *Values, const char *Nulls,
1222 bool read_only)
1223 {
1224 Portal portal;
1225 ParamListInfo paramLI;
1226
1227 /* build transient ParamListInfo in caller's context */
1228 paramLI = _SPI_convert_params(plan->nargs, plan->argtypes,
1229 Values, Nulls);
1230
1231 portal = SPI_cursor_open_internal(name, plan, paramLI, read_only);
1232
1233 /* done with the transient ParamListInfo */
1234 if (paramLI)
1235 pfree(paramLI);
1236
1237 return portal;
1238 }
1239
1240
1241 /*
1242 * SPI_cursor_open_with_args()
1243 *
1244 * Parse and plan a query and open it as a portal.
1245 */
1246 Portal
SPI_cursor_open_with_args(const char * name,const char * src,int nargs,Oid * argtypes,Datum * Values,const char * Nulls,bool read_only,int cursorOptions)1247 SPI_cursor_open_with_args(const char *name,
1248 const char *src,
1249 int nargs, Oid *argtypes,
1250 Datum *Values, const char *Nulls,
1251 bool read_only, int cursorOptions)
1252 {
1253 Portal result;
1254 _SPI_plan plan;
1255 ParamListInfo paramLI;
1256
1257 if (src == NULL || nargs < 0)
1258 elog(ERROR, "SPI_cursor_open_with_args called with invalid arguments");
1259
1260 if (nargs > 0 && (argtypes == NULL || Values == NULL))
1261 elog(ERROR, "SPI_cursor_open_with_args called with missing parameters");
1262
1263 SPI_result = _SPI_begin_call(true);
1264 if (SPI_result < 0)
1265 elog(ERROR, "SPI_cursor_open_with_args called while not connected");
1266
1267 memset(&plan, 0, sizeof(_SPI_plan));
1268 plan.magic = _SPI_PLAN_MAGIC;
1269 plan.cursor_options = cursorOptions;
1270 plan.nargs = nargs;
1271 plan.argtypes = argtypes;
1272 plan.parserSetup = NULL;
1273 plan.parserSetupArg = NULL;
1274
1275 /* build transient ParamListInfo in executor context */
1276 paramLI = _SPI_convert_params(nargs, argtypes,
1277 Values, Nulls);
1278
1279 _SPI_prepare_plan(src, &plan);
1280
1281 /* We needn't copy the plan; SPI_cursor_open_internal will do so */
1282
1283 result = SPI_cursor_open_internal(name, &plan, paramLI, read_only);
1284
1285 /* And clean up */
1286 _SPI_end_call(true);
1287
1288 return result;
1289 }
1290
1291
1292 /*
1293 * SPI_cursor_open_with_paramlist()
1294 *
1295 * Same as SPI_cursor_open except that parameters (if any) are passed
1296 * as a ParamListInfo, which supports dynamic parameter set determination
1297 */
1298 Portal
SPI_cursor_open_with_paramlist(const char * name,SPIPlanPtr plan,ParamListInfo params,bool read_only)1299 SPI_cursor_open_with_paramlist(const char *name, SPIPlanPtr plan,
1300 ParamListInfo params, bool read_only)
1301 {
1302 return SPI_cursor_open_internal(name, plan, params, read_only);
1303 }
1304
1305
1306 /*
1307 * SPI_cursor_open_internal()
1308 *
1309 * Common code for SPI_cursor_open variants
1310 */
1311 static Portal
SPI_cursor_open_internal(const char * name,SPIPlanPtr plan,ParamListInfo paramLI,bool read_only)1312 SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
1313 ParamListInfo paramLI, bool read_only)
1314 {
1315 CachedPlanSource *plansource;
1316 CachedPlan *cplan;
1317 List *stmt_list;
1318 char *query_string;
1319 Snapshot snapshot;
1320 MemoryContext oldcontext;
1321 Portal portal;
1322 ErrorContextCallback spierrcontext;
1323
1324 /*
1325 * Check that the plan is something the Portal code will special-case as
1326 * returning one tupleset.
1327 */
1328 if (!SPI_is_cursor_plan(plan))
1329 {
1330 /* try to give a good error message */
1331 if (list_length(plan->plancache_list) != 1)
1332 ereport(ERROR,
1333 (errcode(ERRCODE_INVALID_CURSOR_DEFINITION),
1334 errmsg("cannot open multi-query plan as cursor")));
1335 plansource = (CachedPlanSource *) linitial(plan->plancache_list);
1336 ereport(ERROR,
1337 (errcode(ERRCODE_INVALID_CURSOR_DEFINITION),
1338 /* translator: %s is name of a SQL command, eg INSERT */
1339 errmsg("cannot open %s query as cursor",
1340 GetCommandTagName(plansource->commandTag))));
1341 }
1342
1343 Assert(list_length(plan->plancache_list) == 1);
1344 plansource = (CachedPlanSource *) linitial(plan->plancache_list);
1345
1346 /* Push the SPI stack */
1347 if (_SPI_begin_call(true) < 0)
1348 elog(ERROR, "SPI_cursor_open called while not connected");
1349
1350 /* Reset SPI result (note we deliberately don't touch lastoid) */
1351 SPI_processed = 0;
1352 SPI_tuptable = NULL;
1353 _SPI_current->processed = 0;
1354 _SPI_current->tuptable = NULL;
1355
1356 /* Create the portal */
1357 if (name == NULL || name[0] == '\0')
1358 {
1359 /* Use a random nonconflicting name */
1360 portal = CreateNewPortal();
1361 }
1362 else
1363 {
1364 /* In this path, error if portal of same name already exists */
1365 portal = CreatePortal(name, false, false);
1366 }
1367
1368 /* Copy the plan's query string into the portal */
1369 query_string = MemoryContextStrdup(portal->portalContext,
1370 plansource->query_string);
1371
1372 /*
1373 * Setup error traceback support for ereport(), in case GetCachedPlan
1374 * throws an error.
1375 */
1376 spierrcontext.callback = _SPI_error_callback;
1377 spierrcontext.arg = unconstify(char *, plansource->query_string);
1378 spierrcontext.previous = error_context_stack;
1379 error_context_stack = &spierrcontext;
1380
1381 /*
1382 * Note: for a saved plan, we mustn't have any failure occur between
1383 * GetCachedPlan and PortalDefineQuery; that would result in leaking our
1384 * plancache refcount.
1385 */
1386
1387 /* Replan if needed, and increment plan refcount for portal */
1388 cplan = GetCachedPlan(plansource, paramLI, false, _SPI_current->queryEnv);
1389 stmt_list = cplan->stmt_list;
1390
1391 if (!plan->saved)
1392 {
1393 /*
1394 * We don't want the portal to depend on an unsaved CachedPlanSource,
1395 * so must copy the plan into the portal's context. An error here
1396 * will result in leaking our refcount on the plan, but it doesn't
1397 * matter because the plan is unsaved and hence transient anyway.
1398 */
1399 oldcontext = MemoryContextSwitchTo(portal->portalContext);
1400 stmt_list = copyObject(stmt_list);
1401 MemoryContextSwitchTo(oldcontext);
1402 ReleaseCachedPlan(cplan, false);
1403 cplan = NULL; /* portal shouldn't depend on cplan */
1404 }
1405
1406 /*
1407 * Set up the portal.
1408 */
1409 PortalDefineQuery(portal,
1410 NULL, /* no statement name */
1411 query_string,
1412 plansource->commandTag,
1413 stmt_list,
1414 cplan);
1415
1416 /*
1417 * Set up options for portal. Default SCROLL type is chosen the same way
1418 * as PerformCursorOpen does it.
1419 */
1420 portal->cursorOptions = plan->cursor_options;
1421 if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
1422 {
1423 if (list_length(stmt_list) == 1 &&
1424 linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
1425 linitial_node(PlannedStmt, stmt_list)->rowMarks == NIL &&
1426 ExecSupportsBackwardScan(linitial_node(PlannedStmt, stmt_list)->planTree))
1427 portal->cursorOptions |= CURSOR_OPT_SCROLL;
1428 else
1429 portal->cursorOptions |= CURSOR_OPT_NO_SCROLL;
1430 }
1431
1432 /*
1433 * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the
1434 * check in transformDeclareCursorStmt because the cursor options might
1435 * not have come through there.
1436 */
1437 if (portal->cursorOptions & CURSOR_OPT_SCROLL)
1438 {
1439 if (list_length(stmt_list) == 1 &&
1440 linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
1441 linitial_node(PlannedStmt, stmt_list)->rowMarks != NIL)
1442 ereport(ERROR,
1443 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1444 errmsg("DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE is not supported"),
1445 errdetail("Scrollable cursors must be READ ONLY.")));
1446 }
1447
1448 /* Make current query environment available to portal at execution time. */
1449 portal->queryEnv = _SPI_current->queryEnv;
1450
1451 /*
1452 * If told to be read-only, we'd better check for read-only queries. This
1453 * can't be done earlier because we need to look at the finished, planned
1454 * queries. (In particular, we don't want to do it between GetCachedPlan
1455 * and PortalDefineQuery, because throwing an error between those steps
1456 * would result in leaking our plancache refcount.)
1457 */
1458 if (read_only)
1459 {
1460 ListCell *lc;
1461
1462 foreach(lc, stmt_list)
1463 {
1464 PlannedStmt *pstmt = lfirst_node(PlannedStmt, lc);
1465
1466 if (!CommandIsReadOnly(pstmt))
1467 ereport(ERROR,
1468 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1469 /* translator: %s is a SQL statement name */
1470 errmsg("%s is not allowed in a non-volatile function",
1471 CreateCommandName((Node *) pstmt))));
1472 }
1473 }
1474
1475 /* Set up the snapshot to use. */
1476 if (read_only)
1477 snapshot = GetActiveSnapshot();
1478 else
1479 {
1480 CommandCounterIncrement();
1481 snapshot = GetTransactionSnapshot();
1482 }
1483
1484 /*
1485 * If the plan has parameters, copy them into the portal. Note that this
1486 * must be done after revalidating the plan, because in dynamic parameter
1487 * cases the set of parameters could have changed during re-parsing.
1488 */
1489 if (paramLI)
1490 {
1491 oldcontext = MemoryContextSwitchTo(portal->portalContext);
1492 paramLI = copyParamList(paramLI);
1493 MemoryContextSwitchTo(oldcontext);
1494 }
1495
1496 /*
1497 * Start portal execution.
1498 */
1499 PortalStart(portal, paramLI, 0, snapshot);
1500
1501 Assert(portal->strategy != PORTAL_MULTI_QUERY);
1502
1503 /* Pop the error context stack */
1504 error_context_stack = spierrcontext.previous;
1505
1506 /* Pop the SPI stack */
1507 _SPI_end_call(true);
1508
1509 /* Return the created portal */
1510 return portal;
1511 }
1512
1513
1514 /*
1515 * SPI_cursor_find()
1516 *
1517 * Find the portal of an existing open cursor
1518 */
1519 Portal
SPI_cursor_find(const char * name)1520 SPI_cursor_find(const char *name)
1521 {
1522 return GetPortalByName(name);
1523 }
1524
1525
1526 /*
1527 * SPI_cursor_fetch()
1528 *
1529 * Fetch rows in a cursor
1530 */
1531 void
SPI_cursor_fetch(Portal portal,bool forward,long count)1532 SPI_cursor_fetch(Portal portal, bool forward, long count)
1533 {
1534 _SPI_cursor_operation(portal,
1535 forward ? FETCH_FORWARD : FETCH_BACKWARD, count,
1536 CreateDestReceiver(DestSPI));
1537 /* we know that the DestSPI receiver doesn't need a destroy call */
1538 }
1539
1540
1541 /*
1542 * SPI_cursor_move()
1543 *
1544 * Move in a cursor
1545 */
1546 void
SPI_cursor_move(Portal portal,bool forward,long count)1547 SPI_cursor_move(Portal portal, bool forward, long count)
1548 {
1549 _SPI_cursor_operation(portal,
1550 forward ? FETCH_FORWARD : FETCH_BACKWARD, count,
1551 None_Receiver);
1552 }
1553
1554
1555 /*
1556 * SPI_scroll_cursor_fetch()
1557 *
1558 * Fetch rows in a scrollable cursor
1559 */
1560 void
SPI_scroll_cursor_fetch(Portal portal,FetchDirection direction,long count)1561 SPI_scroll_cursor_fetch(Portal portal, FetchDirection direction, long count)
1562 {
1563 _SPI_cursor_operation(portal,
1564 direction, count,
1565 CreateDestReceiver(DestSPI));
1566 /* we know that the DestSPI receiver doesn't need a destroy call */
1567 }
1568
1569
1570 /*
1571 * SPI_scroll_cursor_move()
1572 *
1573 * Move in a scrollable cursor
1574 */
1575 void
SPI_scroll_cursor_move(Portal portal,FetchDirection direction,long count)1576 SPI_scroll_cursor_move(Portal portal, FetchDirection direction, long count)
1577 {
1578 _SPI_cursor_operation(portal, direction, count, None_Receiver);
1579 }
1580
1581
1582 /*
1583 * SPI_cursor_close()
1584 *
1585 * Close a cursor
1586 */
1587 void
SPI_cursor_close(Portal portal)1588 SPI_cursor_close(Portal portal)
1589 {
1590 if (!PortalIsValid(portal))
1591 elog(ERROR, "invalid portal in SPI cursor operation");
1592
1593 PortalDrop(portal, false);
1594 }
1595
1596 /*
1597 * Returns the Oid representing the type id for argument at argIndex. First
1598 * parameter is at index zero.
1599 */
1600 Oid
SPI_getargtypeid(SPIPlanPtr plan,int argIndex)1601 SPI_getargtypeid(SPIPlanPtr plan, int argIndex)
1602 {
1603 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC ||
1604 argIndex < 0 || argIndex >= plan->nargs)
1605 {
1606 SPI_result = SPI_ERROR_ARGUMENT;
1607 return InvalidOid;
1608 }
1609 return plan->argtypes[argIndex];
1610 }
1611
1612 /*
1613 * Returns the number of arguments for the prepared plan.
1614 */
1615 int
SPI_getargcount(SPIPlanPtr plan)1616 SPI_getargcount(SPIPlanPtr plan)
1617 {
1618 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
1619 {
1620 SPI_result = SPI_ERROR_ARGUMENT;
1621 return -1;
1622 }
1623 return plan->nargs;
1624 }
1625
1626 /*
1627 * Returns true if the plan contains exactly one command
1628 * and that command returns tuples to the caller (eg, SELECT or
1629 * INSERT ... RETURNING, but not SELECT ... INTO). In essence,
1630 * the result indicates if the command can be used with SPI_cursor_open
1631 *
1632 * Parameters
1633 * plan: A plan previously prepared using SPI_prepare
1634 */
1635 bool
SPI_is_cursor_plan(SPIPlanPtr plan)1636 SPI_is_cursor_plan(SPIPlanPtr plan)
1637 {
1638 CachedPlanSource *plansource;
1639
1640 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
1641 {
1642 SPI_result = SPI_ERROR_ARGUMENT;
1643 return false;
1644 }
1645
1646 if (list_length(plan->plancache_list) != 1)
1647 {
1648 SPI_result = 0;
1649 return false; /* not exactly 1 pre-rewrite command */
1650 }
1651 plansource = (CachedPlanSource *) linitial(plan->plancache_list);
1652
1653 /*
1654 * We used to force revalidation of the cached plan here, but that seems
1655 * unnecessary: invalidation could mean a change in the rowtype of the
1656 * tuples returned by a plan, but not whether it returns tuples at all.
1657 */
1658 SPI_result = 0;
1659
1660 /* Does it return tuples? */
1661 if (plansource->resultDesc)
1662 return true;
1663
1664 return false;
1665 }
1666
1667 /*
1668 * SPI_plan_is_valid --- test whether a SPI plan is currently valid
1669 * (that is, not marked as being in need of revalidation).
1670 *
1671 * See notes for CachedPlanIsValid before using this.
1672 */
1673 bool
SPI_plan_is_valid(SPIPlanPtr plan)1674 SPI_plan_is_valid(SPIPlanPtr plan)
1675 {
1676 ListCell *lc;
1677
1678 Assert(plan->magic == _SPI_PLAN_MAGIC);
1679
1680 foreach(lc, plan->plancache_list)
1681 {
1682 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
1683
1684 if (!CachedPlanIsValid(plansource))
1685 return false;
1686 }
1687 return true;
1688 }
1689
1690 /*
1691 * SPI_result_code_string --- convert any SPI return code to a string
1692 *
1693 * This is often useful in error messages. Most callers will probably
1694 * only pass negative (error-case) codes, but for generality we recognize
1695 * the success codes too.
1696 */
1697 const char *
SPI_result_code_string(int code)1698 SPI_result_code_string(int code)
1699 {
1700 static char buf[64];
1701
1702 switch (code)
1703 {
1704 case SPI_ERROR_CONNECT:
1705 return "SPI_ERROR_CONNECT";
1706 case SPI_ERROR_COPY:
1707 return "SPI_ERROR_COPY";
1708 case SPI_ERROR_OPUNKNOWN:
1709 return "SPI_ERROR_OPUNKNOWN";
1710 case SPI_ERROR_UNCONNECTED:
1711 return "SPI_ERROR_UNCONNECTED";
1712 case SPI_ERROR_ARGUMENT:
1713 return "SPI_ERROR_ARGUMENT";
1714 case SPI_ERROR_PARAM:
1715 return "SPI_ERROR_PARAM";
1716 case SPI_ERROR_TRANSACTION:
1717 return "SPI_ERROR_TRANSACTION";
1718 case SPI_ERROR_NOATTRIBUTE:
1719 return "SPI_ERROR_NOATTRIBUTE";
1720 case SPI_ERROR_NOOUTFUNC:
1721 return "SPI_ERROR_NOOUTFUNC";
1722 case SPI_ERROR_TYPUNKNOWN:
1723 return "SPI_ERROR_TYPUNKNOWN";
1724 case SPI_ERROR_REL_DUPLICATE:
1725 return "SPI_ERROR_REL_DUPLICATE";
1726 case SPI_ERROR_REL_NOT_FOUND:
1727 return "SPI_ERROR_REL_NOT_FOUND";
1728 case SPI_OK_CONNECT:
1729 return "SPI_OK_CONNECT";
1730 case SPI_OK_FINISH:
1731 return "SPI_OK_FINISH";
1732 case SPI_OK_FETCH:
1733 return "SPI_OK_FETCH";
1734 case SPI_OK_UTILITY:
1735 return "SPI_OK_UTILITY";
1736 case SPI_OK_SELECT:
1737 return "SPI_OK_SELECT";
1738 case SPI_OK_SELINTO:
1739 return "SPI_OK_SELINTO";
1740 case SPI_OK_INSERT:
1741 return "SPI_OK_INSERT";
1742 case SPI_OK_DELETE:
1743 return "SPI_OK_DELETE";
1744 case SPI_OK_UPDATE:
1745 return "SPI_OK_UPDATE";
1746 case SPI_OK_CURSOR:
1747 return "SPI_OK_CURSOR";
1748 case SPI_OK_INSERT_RETURNING:
1749 return "SPI_OK_INSERT_RETURNING";
1750 case SPI_OK_DELETE_RETURNING:
1751 return "SPI_OK_DELETE_RETURNING";
1752 case SPI_OK_UPDATE_RETURNING:
1753 return "SPI_OK_UPDATE_RETURNING";
1754 case SPI_OK_REWRITTEN:
1755 return "SPI_OK_REWRITTEN";
1756 case SPI_OK_REL_REGISTER:
1757 return "SPI_OK_REL_REGISTER";
1758 case SPI_OK_REL_UNREGISTER:
1759 return "SPI_OK_REL_UNREGISTER";
1760 }
1761 /* Unrecognized code ... return something useful ... */
1762 sprintf(buf, "Unrecognized SPI code %d", code);
1763 return buf;
1764 }
1765
1766 /*
1767 * SPI_plan_get_plan_sources --- get a SPI plan's underlying list of
1768 * CachedPlanSources.
1769 *
1770 * This is exported so that PL/pgSQL can use it (this beats letting PL/pgSQL
1771 * look directly into the SPIPlan for itself). It's not documented in
1772 * spi.sgml because we'd just as soon not have too many places using this.
1773 */
1774 List *
SPI_plan_get_plan_sources(SPIPlanPtr plan)1775 SPI_plan_get_plan_sources(SPIPlanPtr plan)
1776 {
1777 Assert(plan->magic == _SPI_PLAN_MAGIC);
1778 return plan->plancache_list;
1779 }
1780
1781 /*
1782 * SPI_plan_get_cached_plan --- get a SPI plan's generic CachedPlan,
1783 * if the SPI plan contains exactly one CachedPlanSource. If not,
1784 * return NULL. Caller is responsible for doing ReleaseCachedPlan().
1785 *
1786 * This is exported so that PL/pgSQL can use it (this beats letting PL/pgSQL
1787 * look directly into the SPIPlan for itself). It's not documented in
1788 * spi.sgml because we'd just as soon not have too many places using this.
1789 */
1790 CachedPlan *
SPI_plan_get_cached_plan(SPIPlanPtr plan)1791 SPI_plan_get_cached_plan(SPIPlanPtr plan)
1792 {
1793 CachedPlanSource *plansource;
1794 CachedPlan *cplan;
1795 ErrorContextCallback spierrcontext;
1796
1797 Assert(plan->magic == _SPI_PLAN_MAGIC);
1798
1799 /* Can't support one-shot plans here */
1800 if (plan->oneshot)
1801 return NULL;
1802
1803 /* Must have exactly one CachedPlanSource */
1804 if (list_length(plan->plancache_list) != 1)
1805 return NULL;
1806 plansource = (CachedPlanSource *) linitial(plan->plancache_list);
1807
1808 /* Setup error traceback support for ereport() */
1809 spierrcontext.callback = _SPI_error_callback;
1810 spierrcontext.arg = unconstify(char *, plansource->query_string);
1811 spierrcontext.previous = error_context_stack;
1812 error_context_stack = &spierrcontext;
1813
1814 /* Get the generic plan for the query */
1815 cplan = GetCachedPlan(plansource, NULL, plan->saved,
1816 _SPI_current->queryEnv);
1817 Assert(cplan == plansource->gplan);
1818
1819 /* Pop the error context stack */
1820 error_context_stack = spierrcontext.previous;
1821
1822 return cplan;
1823 }
1824
1825
1826 /* =================== private functions =================== */
1827
1828 /*
1829 * spi_dest_startup
1830 * Initialize to receive tuples from Executor into SPITupleTable
1831 * of current SPI procedure
1832 */
1833 void
spi_dest_startup(DestReceiver * self,int operation,TupleDesc typeinfo)1834 spi_dest_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
1835 {
1836 SPITupleTable *tuptable;
1837 MemoryContext oldcxt;
1838 MemoryContext tuptabcxt;
1839
1840 if (_SPI_current == NULL)
1841 elog(ERROR, "spi_dest_startup called while not connected to SPI");
1842
1843 if (_SPI_current->tuptable != NULL)
1844 elog(ERROR, "improper call to spi_dest_startup");
1845
1846 /* We create the tuple table context as a child of procCxt */
1847
1848 oldcxt = _SPI_procmem(); /* switch to procedure memory context */
1849
1850 tuptabcxt = AllocSetContextCreate(CurrentMemoryContext,
1851 "SPI TupTable",
1852 ALLOCSET_DEFAULT_SIZES);
1853 MemoryContextSwitchTo(tuptabcxt);
1854
1855 _SPI_current->tuptable = tuptable = (SPITupleTable *)
1856 palloc0(sizeof(SPITupleTable));
1857 tuptable->tuptabcxt = tuptabcxt;
1858 tuptable->subid = GetCurrentSubTransactionId();
1859
1860 /*
1861 * The tuptable is now valid enough to be freed by AtEOSubXact_SPI, so put
1862 * it onto the SPI context's tuptables list. This will ensure it's not
1863 * leaked even in the unlikely event the following few lines fail.
1864 */
1865 slist_push_head(&_SPI_current->tuptables, &tuptable->next);
1866
1867 /* set up initial allocations */
1868 tuptable->alloced = 128;
1869 tuptable->vals = (HeapTuple *) palloc(tuptable->alloced * sizeof(HeapTuple));
1870 tuptable->numvals = 0;
1871 tuptable->tupdesc = CreateTupleDescCopy(typeinfo);
1872
1873 MemoryContextSwitchTo(oldcxt);
1874 }
1875
1876 /*
1877 * spi_printtup
1878 * store tuple retrieved by Executor into SPITupleTable
1879 * of current SPI procedure
1880 */
1881 bool
spi_printtup(TupleTableSlot * slot,DestReceiver * self)1882 spi_printtup(TupleTableSlot *slot, DestReceiver *self)
1883 {
1884 SPITupleTable *tuptable;
1885 MemoryContext oldcxt;
1886
1887 if (_SPI_current == NULL)
1888 elog(ERROR, "spi_printtup called while not connected to SPI");
1889
1890 tuptable = _SPI_current->tuptable;
1891 if (tuptable == NULL)
1892 elog(ERROR, "improper call to spi_printtup");
1893
1894 oldcxt = MemoryContextSwitchTo(tuptable->tuptabcxt);
1895
1896 if (tuptable->numvals >= tuptable->alloced)
1897 {
1898 /* Double the size of the pointer array */
1899 uint64 newalloced = tuptable->alloced * 2;
1900
1901 tuptable->vals = (HeapTuple *) repalloc_huge(tuptable->vals,
1902 newalloced * sizeof(HeapTuple));
1903 tuptable->alloced = newalloced;
1904 }
1905
1906 tuptable->vals[tuptable->numvals] = ExecCopySlotHeapTuple(slot);
1907 (tuptable->numvals)++;
1908
1909 MemoryContextSwitchTo(oldcxt);
1910
1911 return true;
1912 }
1913
1914 /*
1915 * Static functions
1916 */
1917
1918 /*
1919 * Parse and analyze a querystring.
1920 *
1921 * At entry, plan->argtypes and plan->nargs (or alternatively plan->parserSetup
1922 * and plan->parserSetupArg) must be valid, as must plan->cursor_options.
1923 *
1924 * Results are stored into *plan (specifically, plan->plancache_list).
1925 * Note that the result data is all in CurrentMemoryContext or child contexts
1926 * thereof; in practice this means it is in the SPI executor context, and
1927 * what we are creating is a "temporary" SPIPlan. Cruft generated during
1928 * parsing is also left in CurrentMemoryContext.
1929 */
1930 static void
_SPI_prepare_plan(const char * src,SPIPlanPtr plan)1931 _SPI_prepare_plan(const char *src, SPIPlanPtr plan)
1932 {
1933 List *raw_parsetree_list;
1934 List *plancache_list;
1935 ListCell *list_item;
1936 ErrorContextCallback spierrcontext;
1937
1938 /*
1939 * Setup error traceback support for ereport()
1940 */
1941 spierrcontext.callback = _SPI_error_callback;
1942 spierrcontext.arg = unconstify(char *, src);
1943 spierrcontext.previous = error_context_stack;
1944 error_context_stack = &spierrcontext;
1945
1946 /*
1947 * Parse the request string into a list of raw parse trees.
1948 */
1949 raw_parsetree_list = pg_parse_query(src);
1950
1951 /*
1952 * Do parse analysis and rule rewrite for each raw parsetree, storing the
1953 * results into unsaved plancache entries.
1954 */
1955 plancache_list = NIL;
1956
1957 foreach(list_item, raw_parsetree_list)
1958 {
1959 RawStmt *parsetree = lfirst_node(RawStmt, list_item);
1960 List *stmt_list;
1961 CachedPlanSource *plansource;
1962
1963 /*
1964 * Create the CachedPlanSource before we do parse analysis, since it
1965 * needs to see the unmodified raw parse tree.
1966 */
1967 plansource = CreateCachedPlan(parsetree,
1968 src,
1969 CreateCommandTag(parsetree->stmt));
1970
1971 /*
1972 * Parameter datatypes are driven by parserSetup hook if provided,
1973 * otherwise we use the fixed parameter list.
1974 */
1975 if (plan->parserSetup != NULL)
1976 {
1977 Assert(plan->nargs == 0);
1978 stmt_list = pg_analyze_and_rewrite_params(parsetree,
1979 src,
1980 plan->parserSetup,
1981 plan->parserSetupArg,
1982 _SPI_current->queryEnv);
1983 }
1984 else
1985 {
1986 stmt_list = pg_analyze_and_rewrite(parsetree,
1987 src,
1988 plan->argtypes,
1989 plan->nargs,
1990 _SPI_current->queryEnv);
1991 }
1992
1993 /* Finish filling in the CachedPlanSource */
1994 CompleteCachedPlan(plansource,
1995 stmt_list,
1996 NULL,
1997 plan->argtypes,
1998 plan->nargs,
1999 plan->parserSetup,
2000 plan->parserSetupArg,
2001 plan->cursor_options,
2002 false); /* not fixed result */
2003
2004 plancache_list = lappend(plancache_list, plansource);
2005 }
2006
2007 plan->plancache_list = plancache_list;
2008 plan->oneshot = false;
2009
2010 /*
2011 * Pop the error context stack
2012 */
2013 error_context_stack = spierrcontext.previous;
2014 }
2015
2016 /*
2017 * Parse, but don't analyze, a querystring.
2018 *
2019 * This is a stripped-down version of _SPI_prepare_plan that only does the
2020 * initial raw parsing. It creates "one shot" CachedPlanSources
2021 * that still require parse analysis before execution is possible.
2022 *
2023 * The advantage of using the "one shot" form of CachedPlanSource is that
2024 * we eliminate data copying and invalidation overhead. Postponing parse
2025 * analysis also prevents issues if some of the raw parsetrees are DDL
2026 * commands that affect validity of later parsetrees. Both of these
2027 * attributes are good things for SPI_execute() and similar cases.
2028 *
2029 * Results are stored into *plan (specifically, plan->plancache_list).
2030 * Note that the result data is all in CurrentMemoryContext or child contexts
2031 * thereof; in practice this means it is in the SPI executor context, and
2032 * what we are creating is a "temporary" SPIPlan. Cruft generated during
2033 * parsing is also left in CurrentMemoryContext.
2034 */
2035 static void
_SPI_prepare_oneshot_plan(const char * src,SPIPlanPtr plan)2036 _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan)
2037 {
2038 List *raw_parsetree_list;
2039 List *plancache_list;
2040 ListCell *list_item;
2041 ErrorContextCallback spierrcontext;
2042
2043 /*
2044 * Setup error traceback support for ereport()
2045 */
2046 spierrcontext.callback = _SPI_error_callback;
2047 spierrcontext.arg = unconstify(char *, src);
2048 spierrcontext.previous = error_context_stack;
2049 error_context_stack = &spierrcontext;
2050
2051 /*
2052 * Parse the request string into a list of raw parse trees.
2053 */
2054 raw_parsetree_list = pg_parse_query(src);
2055
2056 /*
2057 * Construct plancache entries, but don't do parse analysis yet.
2058 */
2059 plancache_list = NIL;
2060
2061 foreach(list_item, raw_parsetree_list)
2062 {
2063 RawStmt *parsetree = lfirst_node(RawStmt, list_item);
2064 CachedPlanSource *plansource;
2065
2066 plansource = CreateOneShotCachedPlan(parsetree,
2067 src,
2068 CreateCommandTag(parsetree->stmt));
2069
2070 plancache_list = lappend(plancache_list, plansource);
2071 }
2072
2073 plan->plancache_list = plancache_list;
2074 plan->oneshot = true;
2075
2076 /*
2077 * Pop the error context stack
2078 */
2079 error_context_stack = spierrcontext.previous;
2080 }
2081
2082 /*
2083 * Execute the given plan with the given parameter values
2084 *
2085 * snapshot: query snapshot to use, or InvalidSnapshot for the normal
2086 * behavior of taking a new snapshot for each query.
2087 * crosscheck_snapshot: for RI use, all others pass InvalidSnapshot
2088 * read_only: true for read-only execution (no CommandCounterIncrement)
2089 * fire_triggers: true to fire AFTER triggers at end of query (normal case);
2090 * false means any AFTER triggers are postponed to end of outer query
2091 * tcount: execution tuple-count limit, or 0 for none
2092 */
2093 static int
_SPI_execute_plan(SPIPlanPtr plan,ParamListInfo paramLI,Snapshot snapshot,Snapshot crosscheck_snapshot,bool read_only,bool fire_triggers,uint64 tcount)2094 _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
2095 Snapshot snapshot, Snapshot crosscheck_snapshot,
2096 bool read_only, bool fire_triggers, uint64 tcount)
2097 {
2098 int my_res = 0;
2099 uint64 my_processed = 0;
2100 SPITupleTable *my_tuptable = NULL;
2101 int res = 0;
2102 bool allow_nonatomic = plan->no_snapshots; /* legacy API name */
2103 bool pushed_active_snap = false;
2104 ErrorContextCallback spierrcontext;
2105 CachedPlan *cplan = NULL;
2106 ListCell *lc1;
2107
2108 /*
2109 * Setup error traceback support for ereport()
2110 */
2111 spierrcontext.callback = _SPI_error_callback;
2112 spierrcontext.arg = NULL; /* we'll fill this below */
2113 spierrcontext.previous = error_context_stack;
2114 error_context_stack = &spierrcontext;
2115
2116 /*
2117 * We support four distinct snapshot management behaviors:
2118 *
2119 * snapshot != InvalidSnapshot, read_only = true: use exactly the given
2120 * snapshot.
2121 *
2122 * snapshot != InvalidSnapshot, read_only = false: use the given snapshot,
2123 * modified by advancing its command ID before each querytree.
2124 *
2125 * snapshot == InvalidSnapshot, read_only = true: use the entry-time
2126 * ActiveSnapshot, if any (if there isn't one, we run with no snapshot).
2127 *
2128 * snapshot == InvalidSnapshot, read_only = false: take a full new
2129 * snapshot for each user command, and advance its command ID before each
2130 * querytree within the command.
2131 *
2132 * In the first two cases, we can just push the snap onto the stack once
2133 * for the whole plan list.
2134 *
2135 * Note that snapshot != InvalidSnapshot implies an atomic execution
2136 * context.
2137 */
2138 if (snapshot != InvalidSnapshot)
2139 {
2140 Assert(!allow_nonatomic);
2141 if (read_only)
2142 {
2143 PushActiveSnapshot(snapshot);
2144 pushed_active_snap = true;
2145 }
2146 else
2147 {
2148 /* Make sure we have a private copy of the snapshot to modify */
2149 PushCopiedSnapshot(snapshot);
2150 pushed_active_snap = true;
2151 }
2152 }
2153
2154 foreach(lc1, plan->plancache_list)
2155 {
2156 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc1);
2157 List *stmt_list;
2158 ListCell *lc2;
2159
2160 spierrcontext.arg = unconstify(char *, plansource->query_string);
2161
2162 /*
2163 * If this is a one-shot plan, we still need to do parse analysis.
2164 */
2165 if (plan->oneshot)
2166 {
2167 RawStmt *parsetree = plansource->raw_parse_tree;
2168 const char *src = plansource->query_string;
2169 List *stmt_list;
2170
2171 /*
2172 * Parameter datatypes are driven by parserSetup hook if provided,
2173 * otherwise we use the fixed parameter list.
2174 */
2175 if (parsetree == NULL)
2176 stmt_list = NIL;
2177 else if (plan->parserSetup != NULL)
2178 {
2179 Assert(plan->nargs == 0);
2180 stmt_list = pg_analyze_and_rewrite_params(parsetree,
2181 src,
2182 plan->parserSetup,
2183 plan->parserSetupArg,
2184 _SPI_current->queryEnv);
2185 }
2186 else
2187 {
2188 stmt_list = pg_analyze_and_rewrite(parsetree,
2189 src,
2190 plan->argtypes,
2191 plan->nargs,
2192 _SPI_current->queryEnv);
2193 }
2194
2195 /* Finish filling in the CachedPlanSource */
2196 CompleteCachedPlan(plansource,
2197 stmt_list,
2198 NULL,
2199 plan->argtypes,
2200 plan->nargs,
2201 plan->parserSetup,
2202 plan->parserSetupArg,
2203 plan->cursor_options,
2204 false); /* not fixed result */
2205 }
2206
2207 /*
2208 * Replan if needed, and increment plan refcount. If it's a saved
2209 * plan, the refcount must be backed by the CurrentResourceOwner.
2210 */
2211 cplan = GetCachedPlan(plansource, paramLI, plan->saved, _SPI_current->queryEnv);
2212 stmt_list = cplan->stmt_list;
2213
2214 /*
2215 * If we weren't given a specific snapshot to use, and the statement
2216 * list requires a snapshot, set that up.
2217 */
2218 if (snapshot == InvalidSnapshot &&
2219 (list_length(stmt_list) > 1 ||
2220 (list_length(stmt_list) == 1 &&
2221 PlannedStmtRequiresSnapshot(linitial_node(PlannedStmt,
2222 stmt_list)))))
2223 {
2224 /*
2225 * First, ensure there's a Portal-level snapshot. This back-fills
2226 * the snapshot stack in case the previous operation was a COMMIT
2227 * or ROLLBACK inside a procedure or DO block. (We can't put back
2228 * the Portal snapshot any sooner, or we'd break cases like doing
2229 * SET or LOCK just after COMMIT.) It's enough to check once per
2230 * statement list, since COMMIT/ROLLBACK/CALL/DO can't appear
2231 * within a multi-statement list.
2232 */
2233 EnsurePortalSnapshotExists();
2234
2235 /*
2236 * In the default non-read-only case, get a new per-statement-list
2237 * snapshot, replacing any that we pushed in a previous cycle.
2238 * Skip it when doing non-atomic execution, though (we rely
2239 * entirely on the Portal snapshot in that case).
2240 */
2241 if (!read_only && !allow_nonatomic)
2242 {
2243 if (pushed_active_snap)
2244 PopActiveSnapshot();
2245 PushActiveSnapshot(GetTransactionSnapshot());
2246 pushed_active_snap = true;
2247 }
2248 }
2249
2250 foreach(lc2, stmt_list)
2251 {
2252 PlannedStmt *stmt = lfirst_node(PlannedStmt, lc2);
2253 bool canSetTag = stmt->canSetTag;
2254 DestReceiver *dest;
2255
2256 _SPI_current->processed = 0;
2257 _SPI_current->tuptable = NULL;
2258
2259 /* Check for unsupported cases. */
2260 if (stmt->utilityStmt)
2261 {
2262 if (IsA(stmt->utilityStmt, CopyStmt))
2263 {
2264 CopyStmt *cstmt = (CopyStmt *) stmt->utilityStmt;
2265
2266 if (cstmt->filename == NULL)
2267 {
2268 my_res = SPI_ERROR_COPY;
2269 goto fail;
2270 }
2271 }
2272 else if (IsA(stmt->utilityStmt, TransactionStmt))
2273 {
2274 my_res = SPI_ERROR_TRANSACTION;
2275 goto fail;
2276 }
2277 }
2278
2279 if (read_only && !CommandIsReadOnly(stmt))
2280 ereport(ERROR,
2281 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2282 /* translator: %s is a SQL statement name */
2283 errmsg("%s is not allowed in a non-volatile function",
2284 CreateCommandName((Node *) stmt))));
2285
2286 /*
2287 * If not read-only mode, advance the command counter before each
2288 * command and update the snapshot. (But skip it if the snapshot
2289 * isn't under our control.)
2290 */
2291 if (!read_only && pushed_active_snap)
2292 {
2293 CommandCounterIncrement();
2294 UpdateActiveSnapshotCommandId();
2295 }
2296
2297 dest = CreateDestReceiver(canSetTag ? DestSPI : DestNone);
2298
2299 if (stmt->utilityStmt == NULL)
2300 {
2301 QueryDesc *qdesc;
2302 Snapshot snap;
2303
2304 if (ActiveSnapshotSet())
2305 snap = GetActiveSnapshot();
2306 else
2307 snap = InvalidSnapshot;
2308
2309 qdesc = CreateQueryDesc(stmt,
2310 plansource->query_string,
2311 snap, crosscheck_snapshot,
2312 dest,
2313 paramLI, _SPI_current->queryEnv,
2314 0);
2315 res = _SPI_pquery(qdesc, fire_triggers,
2316 canSetTag ? tcount : 0);
2317 FreeQueryDesc(qdesc);
2318 }
2319 else
2320 {
2321 ProcessUtilityContext context;
2322 QueryCompletion qc;
2323
2324 /*
2325 * If the SPI context is atomic, or we were not told to allow
2326 * nonatomic operations, tell ProcessUtility this is an atomic
2327 * execution context.
2328 */
2329 if (_SPI_current->atomic || !allow_nonatomic)
2330 context = PROCESS_UTILITY_QUERY;
2331 else
2332 context = PROCESS_UTILITY_QUERY_NONATOMIC;
2333
2334 InitializeQueryCompletion(&qc);
2335 ProcessUtility(stmt,
2336 plansource->query_string,
2337 context,
2338 paramLI,
2339 _SPI_current->queryEnv,
2340 dest,
2341 &qc);
2342
2343 /* Update "processed" if stmt returned tuples */
2344 if (_SPI_current->tuptable)
2345 _SPI_current->processed = _SPI_current->tuptable->numvals;
2346
2347 res = SPI_OK_UTILITY;
2348
2349 /*
2350 * Some utility statements return a row count, even though the
2351 * tuples are not returned to the caller.
2352 */
2353 if (IsA(stmt->utilityStmt, CreateTableAsStmt))
2354 {
2355 CreateTableAsStmt *ctastmt = (CreateTableAsStmt *) stmt->utilityStmt;
2356
2357 if (qc.commandTag == CMDTAG_SELECT)
2358 _SPI_current->processed = qc.nprocessed;
2359 else
2360 {
2361 /*
2362 * Must be an IF NOT EXISTS that did nothing, or a
2363 * CREATE ... WITH NO DATA.
2364 */
2365 Assert(ctastmt->if_not_exists ||
2366 ctastmt->into->skipData);
2367 _SPI_current->processed = 0;
2368 }
2369
2370 /*
2371 * For historical reasons, if CREATE TABLE AS was spelled
2372 * as SELECT INTO, return a special return code.
2373 */
2374 if (ctastmt->is_select_into)
2375 res = SPI_OK_SELINTO;
2376 }
2377 else if (IsA(stmt->utilityStmt, CopyStmt))
2378 {
2379 Assert(qc.commandTag == CMDTAG_COPY);
2380 _SPI_current->processed = qc.nprocessed;
2381 }
2382 }
2383
2384 /*
2385 * The last canSetTag query sets the status values returned to the
2386 * caller. Be careful to free any tuptables not returned, to
2387 * avoid intra-transaction memory leak.
2388 */
2389 if (canSetTag)
2390 {
2391 my_processed = _SPI_current->processed;
2392 SPI_freetuptable(my_tuptable);
2393 my_tuptable = _SPI_current->tuptable;
2394 my_res = res;
2395 }
2396 else
2397 {
2398 SPI_freetuptable(_SPI_current->tuptable);
2399 _SPI_current->tuptable = NULL;
2400 }
2401 /* we know that the receiver doesn't need a destroy call */
2402 if (res < 0)
2403 {
2404 my_res = res;
2405 goto fail;
2406 }
2407 }
2408
2409 /* Done with this plan, so release refcount */
2410 ReleaseCachedPlan(cplan, plan->saved);
2411 cplan = NULL;
2412
2413 /*
2414 * If not read-only mode, advance the command counter after the last
2415 * command. This ensures that its effects are visible, in case it was
2416 * DDL that would affect the next CachedPlanSource.
2417 */
2418 if (!read_only)
2419 CommandCounterIncrement();
2420 }
2421
2422 fail:
2423
2424 /* Pop the snapshot off the stack if we pushed one */
2425 if (pushed_active_snap)
2426 PopActiveSnapshot();
2427
2428 /* We no longer need the cached plan refcount, if any */
2429 if (cplan)
2430 ReleaseCachedPlan(cplan, plan->saved);
2431
2432 /*
2433 * Pop the error context stack
2434 */
2435 error_context_stack = spierrcontext.previous;
2436
2437 /* Save results for caller */
2438 SPI_processed = my_processed;
2439 SPI_tuptable = my_tuptable;
2440
2441 /* tuptable now is caller's responsibility, not SPI's */
2442 _SPI_current->tuptable = NULL;
2443
2444 /*
2445 * If none of the queries had canSetTag, return SPI_OK_REWRITTEN. Prior to
2446 * 8.4, we used return the last query's result code, but not its auxiliary
2447 * results, but that's confusing.
2448 */
2449 if (my_res == 0)
2450 my_res = SPI_OK_REWRITTEN;
2451
2452 return my_res;
2453 }
2454
2455 /*
2456 * Convert arrays of query parameters to form wanted by planner and executor
2457 */
2458 static ParamListInfo
_SPI_convert_params(int nargs,Oid * argtypes,Datum * Values,const char * Nulls)2459 _SPI_convert_params(int nargs, Oid *argtypes,
2460 Datum *Values, const char *Nulls)
2461 {
2462 ParamListInfo paramLI;
2463
2464 if (nargs > 0)
2465 {
2466 paramLI = makeParamList(nargs);
2467
2468 for (int i = 0; i < nargs; i++)
2469 {
2470 ParamExternData *prm = ¶mLI->params[i];
2471
2472 prm->value = Values[i];
2473 prm->isnull = (Nulls && Nulls[i] == 'n');
2474 prm->pflags = PARAM_FLAG_CONST;
2475 prm->ptype = argtypes[i];
2476 }
2477 }
2478 else
2479 paramLI = NULL;
2480 return paramLI;
2481 }
2482
2483 static int
_SPI_pquery(QueryDesc * queryDesc,bool fire_triggers,uint64 tcount)2484 _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount)
2485 {
2486 int operation = queryDesc->operation;
2487 int eflags;
2488 int res;
2489
2490 switch (operation)
2491 {
2492 case CMD_SELECT:
2493 if (queryDesc->dest->mydest != DestSPI)
2494 {
2495 /* Don't return SPI_OK_SELECT if we're discarding result */
2496 res = SPI_OK_UTILITY;
2497 }
2498 else
2499 res = SPI_OK_SELECT;
2500 break;
2501 case CMD_INSERT:
2502 if (queryDesc->plannedstmt->hasReturning)
2503 res = SPI_OK_INSERT_RETURNING;
2504 else
2505 res = SPI_OK_INSERT;
2506 break;
2507 case CMD_DELETE:
2508 if (queryDesc->plannedstmt->hasReturning)
2509 res = SPI_OK_DELETE_RETURNING;
2510 else
2511 res = SPI_OK_DELETE;
2512 break;
2513 case CMD_UPDATE:
2514 if (queryDesc->plannedstmt->hasReturning)
2515 res = SPI_OK_UPDATE_RETURNING;
2516 else
2517 res = SPI_OK_UPDATE;
2518 break;
2519 default:
2520 return SPI_ERROR_OPUNKNOWN;
2521 }
2522
2523 #ifdef SPI_EXECUTOR_STATS
2524 if (ShowExecutorStats)
2525 ResetUsage();
2526 #endif
2527
2528 /* Select execution options */
2529 if (fire_triggers)
2530 eflags = 0; /* default run-to-completion flags */
2531 else
2532 eflags = EXEC_FLAG_SKIP_TRIGGERS;
2533
2534 ExecutorStart(queryDesc, eflags);
2535
2536 ExecutorRun(queryDesc, ForwardScanDirection, tcount, true);
2537
2538 _SPI_current->processed = queryDesc->estate->es_processed;
2539
2540 if ((res == SPI_OK_SELECT || queryDesc->plannedstmt->hasReturning) &&
2541 queryDesc->dest->mydest == DestSPI)
2542 {
2543 if (_SPI_checktuples())
2544 elog(ERROR, "consistency check on SPI tuple count failed");
2545 }
2546
2547 ExecutorFinish(queryDesc);
2548 ExecutorEnd(queryDesc);
2549 /* FreeQueryDesc is done by the caller */
2550
2551 #ifdef SPI_EXECUTOR_STATS
2552 if (ShowExecutorStats)
2553 ShowUsage("SPI EXECUTOR STATS");
2554 #endif
2555
2556 return res;
2557 }
2558
2559 /*
2560 * _SPI_error_callback
2561 *
2562 * Add context information when a query invoked via SPI fails
2563 */
2564 static void
_SPI_error_callback(void * arg)2565 _SPI_error_callback(void *arg)
2566 {
2567 const char *query = (const char *) arg;
2568 int syntaxerrposition;
2569
2570 if (query == NULL) /* in case arg wasn't set yet */
2571 return;
2572
2573 /*
2574 * If there is a syntax error position, convert to internal syntax error;
2575 * otherwise treat the query as an item of context stack
2576 */
2577 syntaxerrposition = geterrposition();
2578 if (syntaxerrposition > 0)
2579 {
2580 errposition(0);
2581 internalerrposition(syntaxerrposition);
2582 internalerrquery(query);
2583 }
2584 else
2585 errcontext("SQL statement \"%s\"", query);
2586 }
2587
2588 /*
2589 * _SPI_cursor_operation()
2590 *
2591 * Do a FETCH or MOVE in a cursor
2592 */
2593 static void
_SPI_cursor_operation(Portal portal,FetchDirection direction,long count,DestReceiver * dest)2594 _SPI_cursor_operation(Portal portal, FetchDirection direction, long count,
2595 DestReceiver *dest)
2596 {
2597 uint64 nfetched;
2598
2599 /* Check that the portal is valid */
2600 if (!PortalIsValid(portal))
2601 elog(ERROR, "invalid portal in SPI cursor operation");
2602
2603 /* Push the SPI stack */
2604 if (_SPI_begin_call(true) < 0)
2605 elog(ERROR, "SPI cursor operation called while not connected");
2606
2607 /* Reset the SPI result (note we deliberately don't touch lastoid) */
2608 SPI_processed = 0;
2609 SPI_tuptable = NULL;
2610 _SPI_current->processed = 0;
2611 _SPI_current->tuptable = NULL;
2612
2613 /* Run the cursor */
2614 nfetched = PortalRunFetch(portal,
2615 direction,
2616 count,
2617 dest);
2618
2619 /*
2620 * Think not to combine this store with the preceding function call. If
2621 * the portal contains calls to functions that use SPI, then _SPI_stack is
2622 * likely to move around while the portal runs. When control returns,
2623 * _SPI_current will point to the correct stack entry... but the pointer
2624 * may be different than it was beforehand. So we must be sure to re-fetch
2625 * the pointer after the function call completes.
2626 */
2627 _SPI_current->processed = nfetched;
2628
2629 if (dest->mydest == DestSPI && _SPI_checktuples())
2630 elog(ERROR, "consistency check on SPI tuple count failed");
2631
2632 /* Put the result into place for access by caller */
2633 SPI_processed = _SPI_current->processed;
2634 SPI_tuptable = _SPI_current->tuptable;
2635
2636 /* tuptable now is caller's responsibility, not SPI's */
2637 _SPI_current->tuptable = NULL;
2638
2639 /* Pop the SPI stack */
2640 _SPI_end_call(true);
2641 }
2642
2643
2644 static MemoryContext
_SPI_execmem(void)2645 _SPI_execmem(void)
2646 {
2647 return MemoryContextSwitchTo(_SPI_current->execCxt);
2648 }
2649
2650 static MemoryContext
_SPI_procmem(void)2651 _SPI_procmem(void)
2652 {
2653 return MemoryContextSwitchTo(_SPI_current->procCxt);
2654 }
2655
2656 /*
2657 * _SPI_begin_call: begin a SPI operation within a connected procedure
2658 *
2659 * use_exec is true if we intend to make use of the procedure's execCxt
2660 * during this SPI operation. We'll switch into that context, and arrange
2661 * for it to be cleaned up at _SPI_end_call or if an error occurs.
2662 */
2663 static int
_SPI_begin_call(bool use_exec)2664 _SPI_begin_call(bool use_exec)
2665 {
2666 if (_SPI_current == NULL)
2667 return SPI_ERROR_UNCONNECTED;
2668
2669 if (use_exec)
2670 {
2671 /* remember when the Executor operation started */
2672 _SPI_current->execSubid = GetCurrentSubTransactionId();
2673 /* switch to the Executor memory context */
2674 _SPI_execmem();
2675 }
2676
2677 return 0;
2678 }
2679
2680 /*
2681 * _SPI_end_call: end a SPI operation within a connected procedure
2682 *
2683 * use_exec must be the same as in the previous _SPI_begin_call
2684 *
2685 * Note: this currently has no failure return cases, so callers don't check
2686 */
2687 static int
_SPI_end_call(bool use_exec)2688 _SPI_end_call(bool use_exec)
2689 {
2690 if (use_exec)
2691 {
2692 /* switch to the procedure memory context */
2693 _SPI_procmem();
2694 /* mark Executor context no longer in use */
2695 _SPI_current->execSubid = InvalidSubTransactionId;
2696 /* and free Executor memory */
2697 MemoryContextResetAndDeleteChildren(_SPI_current->execCxt);
2698 }
2699
2700 return 0;
2701 }
2702
2703 static bool
_SPI_checktuples(void)2704 _SPI_checktuples(void)
2705 {
2706 uint64 processed = _SPI_current->processed;
2707 SPITupleTable *tuptable = _SPI_current->tuptable;
2708 bool failed = false;
2709
2710 if (tuptable == NULL) /* spi_dest_startup was not called */
2711 failed = true;
2712 else if (processed != tuptable->numvals)
2713 failed = true;
2714
2715 return failed;
2716 }
2717
2718 /*
2719 * Convert a "temporary" SPIPlan into an "unsaved" plan.
2720 *
2721 * The passed _SPI_plan struct is on the stack, and all its subsidiary data
2722 * is in or under the current SPI executor context. Copy the plan into the
2723 * SPI procedure context so it will survive _SPI_end_call(). To minimize
2724 * data copying, this destructively modifies the input plan, by taking the
2725 * plancache entries away from it and reparenting them to the new SPIPlan.
2726 */
2727 static SPIPlanPtr
_SPI_make_plan_non_temp(SPIPlanPtr plan)2728 _SPI_make_plan_non_temp(SPIPlanPtr plan)
2729 {
2730 SPIPlanPtr newplan;
2731 MemoryContext parentcxt = _SPI_current->procCxt;
2732 MemoryContext plancxt;
2733 MemoryContext oldcxt;
2734 ListCell *lc;
2735
2736 /* Assert the input is a temporary SPIPlan */
2737 Assert(plan->magic == _SPI_PLAN_MAGIC);
2738 Assert(plan->plancxt == NULL);
2739 /* One-shot plans can't be saved */
2740 Assert(!plan->oneshot);
2741
2742 /*
2743 * Create a memory context for the plan, underneath the procedure context.
2744 * We don't expect the plan to be very large.
2745 */
2746 plancxt = AllocSetContextCreate(parentcxt,
2747 "SPI Plan",
2748 ALLOCSET_SMALL_SIZES);
2749 oldcxt = MemoryContextSwitchTo(plancxt);
2750
2751 /* Copy the _SPI_plan struct and subsidiary data into the new context */
2752 newplan = (SPIPlanPtr) palloc0(sizeof(_SPI_plan));
2753 newplan->magic = _SPI_PLAN_MAGIC;
2754 newplan->plancxt = plancxt;
2755 newplan->cursor_options = plan->cursor_options;
2756 newplan->nargs = plan->nargs;
2757 if (plan->nargs > 0)
2758 {
2759 newplan->argtypes = (Oid *) palloc(plan->nargs * sizeof(Oid));
2760 memcpy(newplan->argtypes, plan->argtypes, plan->nargs * sizeof(Oid));
2761 }
2762 else
2763 newplan->argtypes = NULL;
2764 newplan->parserSetup = plan->parserSetup;
2765 newplan->parserSetupArg = plan->parserSetupArg;
2766
2767 /*
2768 * Reparent all the CachedPlanSources into the procedure context. In
2769 * theory this could fail partway through due to the pallocs, but we don't
2770 * care too much since both the procedure context and the executor context
2771 * would go away on error.
2772 */
2773 foreach(lc, plan->plancache_list)
2774 {
2775 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
2776
2777 CachedPlanSetParentContext(plansource, parentcxt);
2778
2779 /* Build new list, with list cells in plancxt */
2780 newplan->plancache_list = lappend(newplan->plancache_list, plansource);
2781 }
2782
2783 MemoryContextSwitchTo(oldcxt);
2784
2785 /* For safety, unlink the CachedPlanSources from the temporary plan */
2786 plan->plancache_list = NIL;
2787
2788 return newplan;
2789 }
2790
2791 /*
2792 * Make a "saved" copy of the given plan.
2793 */
2794 static SPIPlanPtr
_SPI_save_plan(SPIPlanPtr plan)2795 _SPI_save_plan(SPIPlanPtr plan)
2796 {
2797 SPIPlanPtr newplan;
2798 MemoryContext plancxt;
2799 MemoryContext oldcxt;
2800 ListCell *lc;
2801
2802 /* One-shot plans can't be saved */
2803 Assert(!plan->oneshot);
2804
2805 /*
2806 * Create a memory context for the plan. We don't expect the plan to be
2807 * very large, so use smaller-than-default alloc parameters. It's a
2808 * transient context until we finish copying everything.
2809 */
2810 plancxt = AllocSetContextCreate(CurrentMemoryContext,
2811 "SPI Plan",
2812 ALLOCSET_SMALL_SIZES);
2813 oldcxt = MemoryContextSwitchTo(plancxt);
2814
2815 /* Copy the SPI plan into its own context */
2816 newplan = (SPIPlanPtr) palloc0(sizeof(_SPI_plan));
2817 newplan->magic = _SPI_PLAN_MAGIC;
2818 newplan->plancxt = plancxt;
2819 newplan->cursor_options = plan->cursor_options;
2820 newplan->nargs = plan->nargs;
2821 if (plan->nargs > 0)
2822 {
2823 newplan->argtypes = (Oid *) palloc(plan->nargs * sizeof(Oid));
2824 memcpy(newplan->argtypes, plan->argtypes, plan->nargs * sizeof(Oid));
2825 }
2826 else
2827 newplan->argtypes = NULL;
2828 newplan->parserSetup = plan->parserSetup;
2829 newplan->parserSetupArg = plan->parserSetupArg;
2830
2831 /* Copy all the plancache entries */
2832 foreach(lc, plan->plancache_list)
2833 {
2834 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
2835 CachedPlanSource *newsource;
2836
2837 newsource = CopyCachedPlan(plansource);
2838 newplan->plancache_list = lappend(newplan->plancache_list, newsource);
2839 }
2840
2841 MemoryContextSwitchTo(oldcxt);
2842
2843 /*
2844 * Mark it saved, reparent it under CacheMemoryContext, and mark all the
2845 * component CachedPlanSources as saved. This sequence cannot fail
2846 * partway through, so there's no risk of long-term memory leakage.
2847 */
2848 newplan->saved = true;
2849 MemoryContextSetParent(newplan->plancxt, CacheMemoryContext);
2850
2851 foreach(lc, newplan->plancache_list)
2852 {
2853 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
2854
2855 SaveCachedPlan(plansource);
2856 }
2857
2858 return newplan;
2859 }
2860
2861 /*
2862 * Internal lookup of ephemeral named relation by name.
2863 */
2864 static EphemeralNamedRelation
_SPI_find_ENR_by_name(const char * name)2865 _SPI_find_ENR_by_name(const char *name)
2866 {
2867 /* internal static function; any error is bug in SPI itself */
2868 Assert(name != NULL);
2869
2870 /* fast exit if no tuplestores have been added */
2871 if (_SPI_current->queryEnv == NULL)
2872 return NULL;
2873
2874 return get_ENR(_SPI_current->queryEnv, name);
2875 }
2876
2877 /*
2878 * Register an ephemeral named relation for use by the planner and executor on
2879 * subsequent calls using this SPI connection.
2880 */
2881 int
SPI_register_relation(EphemeralNamedRelation enr)2882 SPI_register_relation(EphemeralNamedRelation enr)
2883 {
2884 EphemeralNamedRelation match;
2885 int res;
2886
2887 if (enr == NULL || enr->md.name == NULL)
2888 return SPI_ERROR_ARGUMENT;
2889
2890 res = _SPI_begin_call(false); /* keep current memory context */
2891 if (res < 0)
2892 return res;
2893
2894 match = _SPI_find_ENR_by_name(enr->md.name);
2895 if (match)
2896 res = SPI_ERROR_REL_DUPLICATE;
2897 else
2898 {
2899 if (_SPI_current->queryEnv == NULL)
2900 _SPI_current->queryEnv = create_queryEnv();
2901
2902 register_ENR(_SPI_current->queryEnv, enr);
2903 res = SPI_OK_REL_REGISTER;
2904 }
2905
2906 _SPI_end_call(false);
2907
2908 return res;
2909 }
2910
2911 /*
2912 * Unregister an ephemeral named relation by name. This will probably be a
2913 * rarely used function, since SPI_finish will clear it automatically.
2914 */
2915 int
SPI_unregister_relation(const char * name)2916 SPI_unregister_relation(const char *name)
2917 {
2918 EphemeralNamedRelation match;
2919 int res;
2920
2921 if (name == NULL)
2922 return SPI_ERROR_ARGUMENT;
2923
2924 res = _SPI_begin_call(false); /* keep current memory context */
2925 if (res < 0)
2926 return res;
2927
2928 match = _SPI_find_ENR_by_name(name);
2929 if (match)
2930 {
2931 unregister_ENR(_SPI_current->queryEnv, match->md.name);
2932 res = SPI_OK_REL_UNREGISTER;
2933 }
2934 else
2935 res = SPI_ERROR_REL_NOT_FOUND;
2936
2937 _SPI_end_call(false);
2938
2939 return res;
2940 }
2941
2942 /*
2943 * Register the transient relations from 'tdata' using this SPI connection.
2944 * This should be called by PL implementations' trigger handlers after
2945 * connecting, in order to make transition tables visible to any queries run
2946 * in this connection.
2947 */
2948 int
SPI_register_trigger_data(TriggerData * tdata)2949 SPI_register_trigger_data(TriggerData *tdata)
2950 {
2951 if (tdata == NULL)
2952 return SPI_ERROR_ARGUMENT;
2953
2954 if (tdata->tg_newtable)
2955 {
2956 EphemeralNamedRelation enr =
2957 palloc(sizeof(EphemeralNamedRelationData));
2958 int rc;
2959
2960 enr->md.name = tdata->tg_trigger->tgnewtable;
2961 enr->md.reliddesc = tdata->tg_relation->rd_id;
2962 enr->md.tupdesc = NULL;
2963 enr->md.enrtype = ENR_NAMED_TUPLESTORE;
2964 enr->md.enrtuples = tuplestore_tuple_count(tdata->tg_newtable);
2965 enr->reldata = tdata->tg_newtable;
2966 rc = SPI_register_relation(enr);
2967 if (rc != SPI_OK_REL_REGISTER)
2968 return rc;
2969 }
2970
2971 if (tdata->tg_oldtable)
2972 {
2973 EphemeralNamedRelation enr =
2974 palloc(sizeof(EphemeralNamedRelationData));
2975 int rc;
2976
2977 enr->md.name = tdata->tg_trigger->tgoldtable;
2978 enr->md.reliddesc = tdata->tg_relation->rd_id;
2979 enr->md.tupdesc = NULL;
2980 enr->md.enrtype = ENR_NAMED_TUPLESTORE;
2981 enr->md.enrtuples = tuplestore_tuple_count(tdata->tg_oldtable);
2982 enr->reldata = tdata->tg_oldtable;
2983 rc = SPI_register_relation(enr);
2984 if (rc != SPI_OK_REL_REGISTER)
2985 return rc;
2986 }
2987
2988 return SPI_OK_TD_REGISTER;
2989 }
2990