1 /*-------------------------------------------------------------------------
2 *
3 * spi.c
4 * Server Programming Interface
5 *
6 * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/executor/spi.c
12 *
13 *-------------------------------------------------------------------------
14 */
15 #include "postgres.h"
16
17 #include "access/htup_details.h"
18 #include "access/printtup.h"
19 #include "access/sysattr.h"
20 #include "access/xact.h"
21 #include "catalog/heap.h"
22 #include "catalog/pg_type.h"
23 #include "commands/trigger.h"
24 #include "executor/executor.h"
25 #include "executor/spi_priv.h"
26 #include "miscadmin.h"
27 #include "tcop/pquery.h"
28 #include "tcop/utility.h"
29 #include "utils/builtins.h"
30 #include "utils/datum.h"
31 #include "utils/lsyscache.h"
32 #include "utils/memutils.h"
33 #include "utils/rel.h"
34 #include "utils/snapmgr.h"
35 #include "utils/syscache.h"
36 #include "utils/typcache.h"
37
38
39 /*
40 * These global variables are part of the API for various SPI functions
41 * (a horrible API choice, but it's too late now). To reduce the risk of
42 * interference between different SPI callers, we save and restore them
43 * when entering/exiting a SPI nesting level.
44 */
45 uint64 SPI_processed = 0;
46 SPITupleTable *SPI_tuptable = NULL;
47 int SPI_result = 0;
48
49 static _SPI_connection *_SPI_stack = NULL;
50 static _SPI_connection *_SPI_current = NULL;
51 static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */
52 static int _SPI_connected = -1; /* current stack index */
53
54 static Portal SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
55 ParamListInfo paramLI, bool read_only);
56
57 static void _SPI_prepare_plan(const char *src, SPIPlanPtr plan);
58
59 static void _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan);
60
61 static int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
62 Snapshot snapshot, Snapshot crosscheck_snapshot,
63 bool read_only, bool fire_triggers, uint64 tcount);
64
65 static ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes,
66 Datum *Values, const char *Nulls);
67
68 static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount);
69
70 static void _SPI_error_callback(void *arg);
71
72 static void _SPI_cursor_operation(Portal portal,
73 FetchDirection direction, long count,
74 DestReceiver *dest);
75
76 static SPIPlanPtr _SPI_make_plan_non_temp(SPIPlanPtr plan);
77 static SPIPlanPtr _SPI_save_plan(SPIPlanPtr plan);
78
79 static int _SPI_begin_call(bool use_exec);
80 static int _SPI_end_call(bool use_exec);
81 static MemoryContext _SPI_execmem(void);
82 static MemoryContext _SPI_procmem(void);
83 static bool _SPI_checktuples(void);
84
85
86 /* =================== interface functions =================== */
87
88 int
SPI_connect(void)89 SPI_connect(void)
90 {
91 return SPI_connect_ext(0);
92 }
93
94 int
SPI_connect_ext(int options)95 SPI_connect_ext(int options)
96 {
97 int newdepth;
98
99 /* Enlarge stack if necessary */
100 if (_SPI_stack == NULL)
101 {
102 if (_SPI_connected != -1 || _SPI_stack_depth != 0)
103 elog(ERROR, "SPI stack corrupted");
104 newdepth = 16;
105 _SPI_stack = (_SPI_connection *)
106 MemoryContextAlloc(TopMemoryContext,
107 newdepth * sizeof(_SPI_connection));
108 _SPI_stack_depth = newdepth;
109 }
110 else
111 {
112 if (_SPI_stack_depth <= 0 || _SPI_stack_depth <= _SPI_connected)
113 elog(ERROR, "SPI stack corrupted");
114 if (_SPI_stack_depth == _SPI_connected + 1)
115 {
116 newdepth = _SPI_stack_depth * 2;
117 _SPI_stack = (_SPI_connection *)
118 repalloc(_SPI_stack,
119 newdepth * sizeof(_SPI_connection));
120 _SPI_stack_depth = newdepth;
121 }
122 }
123
124 /* Enter new stack level */
125 _SPI_connected++;
126 Assert(_SPI_connected >= 0 && _SPI_connected < _SPI_stack_depth);
127
128 _SPI_current = &(_SPI_stack[_SPI_connected]);
129 _SPI_current->processed = 0;
130 _SPI_current->tuptable = NULL;
131 _SPI_current->execSubid = InvalidSubTransactionId;
132 slist_init(&_SPI_current->tuptables);
133 _SPI_current->procCxt = NULL; /* in case we fail to create 'em */
134 _SPI_current->execCxt = NULL;
135 _SPI_current->connectSubid = GetCurrentSubTransactionId();
136 _SPI_current->queryEnv = NULL;
137 _SPI_current->atomic = (options & SPI_OPT_NONATOMIC ? false : true);
138 _SPI_current->internal_xact = false;
139 _SPI_current->outer_processed = SPI_processed;
140 _SPI_current->outer_tuptable = SPI_tuptable;
141 _SPI_current->outer_result = SPI_result;
142
143 /*
144 * Create memory contexts for this procedure
145 *
146 * In atomic contexts (the normal case), we use TopTransactionContext,
147 * otherwise PortalContext, so that it lives across transaction
148 * boundaries.
149 *
150 * XXX It could be better to use PortalContext as the parent context in
151 * all cases, but we may not be inside a portal (consider deferred-trigger
152 * execution). Perhaps CurTransactionContext could be an option? For now
153 * it doesn't matter because we clean up explicitly in AtEOSubXact_SPI().
154 */
155 _SPI_current->procCxt = AllocSetContextCreate(_SPI_current->atomic ? TopTransactionContext : PortalContext,
156 "SPI Proc",
157 ALLOCSET_DEFAULT_SIZES);
158 _SPI_current->execCxt = AllocSetContextCreate(_SPI_current->atomic ? TopTransactionContext : _SPI_current->procCxt,
159 "SPI Exec",
160 ALLOCSET_DEFAULT_SIZES);
161 /* ... and switch to procedure's context */
162 _SPI_current->savedcxt = MemoryContextSwitchTo(_SPI_current->procCxt);
163
164 /*
165 * Reset API global variables so that current caller cannot accidentally
166 * depend on state of an outer caller.
167 */
168 SPI_processed = 0;
169 SPI_tuptable = NULL;
170 SPI_result = 0;
171
172 return SPI_OK_CONNECT;
173 }
174
175 int
SPI_finish(void)176 SPI_finish(void)
177 {
178 int res;
179
180 res = _SPI_begin_call(false); /* just check we're connected */
181 if (res < 0)
182 return res;
183
184 /* Restore memory context as it was before procedure call */
185 MemoryContextSwitchTo(_SPI_current->savedcxt);
186
187 /* Release memory used in procedure call (including tuptables) */
188 MemoryContextDelete(_SPI_current->execCxt);
189 _SPI_current->execCxt = NULL;
190 MemoryContextDelete(_SPI_current->procCxt);
191 _SPI_current->procCxt = NULL;
192
193 /*
194 * Restore outer API variables, especially SPI_tuptable which is probably
195 * pointing at a just-deleted tuptable
196 */
197 SPI_processed = _SPI_current->outer_processed;
198 SPI_tuptable = _SPI_current->outer_tuptable;
199 SPI_result = _SPI_current->outer_result;
200
201 /* Exit stack level */
202 _SPI_connected--;
203 if (_SPI_connected < 0)
204 _SPI_current = NULL;
205 else
206 _SPI_current = &(_SPI_stack[_SPI_connected]);
207
208 return SPI_OK_FINISH;
209 }
210
211 void
SPI_start_transaction(void)212 SPI_start_transaction(void)
213 {
214 MemoryContext oldcontext = CurrentMemoryContext;
215
216 StartTransactionCommand();
217 MemoryContextSwitchTo(oldcontext);
218 }
219
220 static void
_SPI_commit(bool chain)221 _SPI_commit(bool chain)
222 {
223 MemoryContext oldcontext = CurrentMemoryContext;
224
225 if (_SPI_current->atomic)
226 ereport(ERROR,
227 (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
228 errmsg("invalid transaction termination")));
229
230 /*
231 * This restriction is required by PLs implemented on top of SPI. They
232 * use subtransactions to establish exception blocks that are supposed to
233 * be rolled back together if there is an error. Terminating the
234 * top-level transaction in such a block violates that idea. A future PL
235 * implementation might have different ideas about this, in which case
236 * this restriction would have to be refined or the check possibly be
237 * moved out of SPI into the PLs.
238 */
239 if (IsSubTransaction())
240 ereport(ERROR,
241 (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
242 errmsg("cannot commit while a subtransaction is active")));
243
244 /*
245 * Hold any pinned portals that any PLs might be using. We have to do
246 * this before changing transaction state, since this will run
247 * user-defined code that might throw an error.
248 */
249 HoldPinnedPortals();
250
251 /* Start the actual commit */
252 _SPI_current->internal_xact = true;
253
254 /* Release snapshots associated with portals */
255 ForgetPortalSnapshots();
256
257 if (chain)
258 SaveTransactionCharacteristics();
259
260 CommitTransactionCommand();
261
262 if (chain)
263 {
264 StartTransactionCommand();
265 RestoreTransactionCharacteristics();
266 }
267
268 MemoryContextSwitchTo(oldcontext);
269
270 _SPI_current->internal_xact = false;
271 }
272
273 void
SPI_commit(void)274 SPI_commit(void)
275 {
276 _SPI_commit(false);
277 }
278
279 void
SPI_commit_and_chain(void)280 SPI_commit_and_chain(void)
281 {
282 _SPI_commit(true);
283 }
284
285 static void
_SPI_rollback(bool chain)286 _SPI_rollback(bool chain)
287 {
288 MemoryContext oldcontext = CurrentMemoryContext;
289
290 if (_SPI_current->atomic)
291 ereport(ERROR,
292 (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
293 errmsg("invalid transaction termination")));
294
295 /* see under SPI_commit() */
296 if (IsSubTransaction())
297 ereport(ERROR,
298 (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
299 errmsg("cannot roll back while a subtransaction is active")));
300
301 /*
302 * Hold any pinned portals that any PLs might be using. We have to do
303 * this before changing transaction state, since this will run
304 * user-defined code that might throw an error, and in any case couldn't
305 * be run in an already-aborted transaction.
306 */
307 HoldPinnedPortals();
308
309 /* Start the actual rollback */
310 _SPI_current->internal_xact = true;
311
312 /* Release snapshots associated with portals */
313 ForgetPortalSnapshots();
314
315 if (chain)
316 SaveTransactionCharacteristics();
317
318 AbortCurrentTransaction();
319
320 if (chain)
321 {
322 StartTransactionCommand();
323 RestoreTransactionCharacteristics();
324 }
325
326 MemoryContextSwitchTo(oldcontext);
327
328 _SPI_current->internal_xact = false;
329 }
330
331 void
SPI_rollback(void)332 SPI_rollback(void)
333 {
334 _SPI_rollback(false);
335 }
336
337 void
SPI_rollback_and_chain(void)338 SPI_rollback_and_chain(void)
339 {
340 _SPI_rollback(true);
341 }
342
343 /*
344 * Clean up SPI state. Called on transaction end (of non-SPI-internal
345 * transactions) and when returning to the main loop on error.
346 */
347 void
SPICleanup(void)348 SPICleanup(void)
349 {
350 _SPI_current = NULL;
351 _SPI_connected = -1;
352 /* Reset API global variables, too */
353 SPI_processed = 0;
354 SPI_tuptable = NULL;
355 SPI_result = 0;
356 }
357
358 /*
359 * Clean up SPI state at transaction commit or abort.
360 */
361 void
AtEOXact_SPI(bool isCommit)362 AtEOXact_SPI(bool isCommit)
363 {
364 /* Do nothing if the transaction end was initiated by SPI. */
365 if (_SPI_current && _SPI_current->internal_xact)
366 return;
367
368 if (isCommit && _SPI_connected != -1)
369 ereport(WARNING,
370 (errcode(ERRCODE_WARNING),
371 errmsg("transaction left non-empty SPI stack"),
372 errhint("Check for missing \"SPI_finish\" calls.")));
373
374 SPICleanup();
375 }
376
377 /*
378 * Clean up SPI state at subtransaction commit or abort.
379 *
380 * During commit, there shouldn't be any unclosed entries remaining from
381 * the current subtransaction; we emit a warning if any are found.
382 */
383 void
AtEOSubXact_SPI(bool isCommit,SubTransactionId mySubid)384 AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid)
385 {
386 bool found = false;
387
388 while (_SPI_connected >= 0)
389 {
390 _SPI_connection *connection = &(_SPI_stack[_SPI_connected]);
391
392 if (connection->connectSubid != mySubid)
393 break; /* couldn't be any underneath it either */
394
395 if (connection->internal_xact)
396 break;
397
398 found = true;
399
400 /*
401 * Release procedure memory explicitly (see note in SPI_connect)
402 */
403 if (connection->execCxt)
404 {
405 MemoryContextDelete(connection->execCxt);
406 connection->execCxt = NULL;
407 }
408 if (connection->procCxt)
409 {
410 MemoryContextDelete(connection->procCxt);
411 connection->procCxt = NULL;
412 }
413
414 /*
415 * Restore outer global variables and pop the stack entry. Unlike
416 * SPI_finish(), we don't risk switching to memory contexts that might
417 * be already gone.
418 */
419 SPI_processed = connection->outer_processed;
420 SPI_tuptable = connection->outer_tuptable;
421 SPI_result = connection->outer_result;
422
423 _SPI_connected--;
424 if (_SPI_connected < 0)
425 _SPI_current = NULL;
426 else
427 _SPI_current = &(_SPI_stack[_SPI_connected]);
428 }
429
430 if (found && isCommit)
431 ereport(WARNING,
432 (errcode(ERRCODE_WARNING),
433 errmsg("subtransaction left non-empty SPI stack"),
434 errhint("Check for missing \"SPI_finish\" calls.")));
435
436 /*
437 * If we are aborting a subtransaction and there is an open SPI context
438 * surrounding the subxact, clean up to prevent memory leakage.
439 */
440 if (_SPI_current && !isCommit)
441 {
442 slist_mutable_iter siter;
443
444 /*
445 * Throw away executor state if current executor operation was started
446 * within current subxact (essentially, force a _SPI_end_call(true)).
447 */
448 if (_SPI_current->execSubid >= mySubid)
449 {
450 _SPI_current->execSubid = InvalidSubTransactionId;
451 MemoryContextResetAndDeleteChildren(_SPI_current->execCxt);
452 }
453
454 /* throw away any tuple tables created within current subxact */
455 slist_foreach_modify(siter, &_SPI_current->tuptables)
456 {
457 SPITupleTable *tuptable;
458
459 tuptable = slist_container(SPITupleTable, next, siter.cur);
460 if (tuptable->subid >= mySubid)
461 {
462 /*
463 * If we used SPI_freetuptable() here, its internal search of
464 * the tuptables list would make this operation O(N^2).
465 * Instead, just free the tuptable manually. This should
466 * match what SPI_freetuptable() does.
467 */
468 slist_delete_current(&siter);
469 if (tuptable == _SPI_current->tuptable)
470 _SPI_current->tuptable = NULL;
471 if (tuptable == SPI_tuptable)
472 SPI_tuptable = NULL;
473 MemoryContextDelete(tuptable->tuptabcxt);
474 }
475 }
476 }
477 }
478
479 /*
480 * Are we executing inside a procedure (that is, a nonatomic SPI context)?
481 */
482 bool
SPI_inside_nonatomic_context(void)483 SPI_inside_nonatomic_context(void)
484 {
485 if (_SPI_current == NULL)
486 return false; /* not in any SPI context at all */
487 if (_SPI_current->atomic)
488 return false; /* it's atomic (ie function not procedure) */
489 return true;
490 }
491
492
493 /* Parse, plan, and execute a query string */
494 int
SPI_execute(const char * src,bool read_only,long tcount)495 SPI_execute(const char *src, bool read_only, long tcount)
496 {
497 _SPI_plan plan;
498 int res;
499
500 if (src == NULL || tcount < 0)
501 return SPI_ERROR_ARGUMENT;
502
503 res = _SPI_begin_call(true);
504 if (res < 0)
505 return res;
506
507 memset(&plan, 0, sizeof(_SPI_plan));
508 plan.magic = _SPI_PLAN_MAGIC;
509 plan.cursor_options = CURSOR_OPT_PARALLEL_OK;
510
511 _SPI_prepare_oneshot_plan(src, &plan);
512
513 res = _SPI_execute_plan(&plan, NULL,
514 InvalidSnapshot, InvalidSnapshot,
515 read_only, true, tcount);
516
517 _SPI_end_call(true);
518 return res;
519 }
520
521 /* Obsolete version of SPI_execute */
522 int
SPI_exec(const char * src,long tcount)523 SPI_exec(const char *src, long tcount)
524 {
525 return SPI_execute(src, false, tcount);
526 }
527
528 /* Execute a previously prepared plan */
529 int
SPI_execute_plan(SPIPlanPtr plan,Datum * Values,const char * Nulls,bool read_only,long tcount)530 SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
531 bool read_only, long tcount)
532 {
533 int res;
534
535 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0)
536 return SPI_ERROR_ARGUMENT;
537
538 if (plan->nargs > 0 && Values == NULL)
539 return SPI_ERROR_PARAM;
540
541 res = _SPI_begin_call(true);
542 if (res < 0)
543 return res;
544
545 res = _SPI_execute_plan(plan,
546 _SPI_convert_params(plan->nargs, plan->argtypes,
547 Values, Nulls),
548 InvalidSnapshot, InvalidSnapshot,
549 read_only, true, tcount);
550
551 _SPI_end_call(true);
552 return res;
553 }
554
555 /* Obsolete version of SPI_execute_plan */
556 int
SPI_execp(SPIPlanPtr plan,Datum * Values,const char * Nulls,long tcount)557 SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, long tcount)
558 {
559 return SPI_execute_plan(plan, Values, Nulls, false, tcount);
560 }
561
562 /* Execute a previously prepared plan */
563 int
SPI_execute_plan_with_paramlist(SPIPlanPtr plan,ParamListInfo params,bool read_only,long tcount)564 SPI_execute_plan_with_paramlist(SPIPlanPtr plan, ParamListInfo params,
565 bool read_only, long tcount)
566 {
567 int res;
568
569 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0)
570 return SPI_ERROR_ARGUMENT;
571
572 res = _SPI_begin_call(true);
573 if (res < 0)
574 return res;
575
576 res = _SPI_execute_plan(plan, params,
577 InvalidSnapshot, InvalidSnapshot,
578 read_only, true, tcount);
579
580 _SPI_end_call(true);
581 return res;
582 }
583
584 /*
585 * SPI_execute_snapshot -- identical to SPI_execute_plan, except that we allow
586 * the caller to specify exactly which snapshots to use, which will be
587 * registered here. Also, the caller may specify that AFTER triggers should be
588 * queued as part of the outer query rather than being fired immediately at the
589 * end of the command.
590 *
591 * This is currently not documented in spi.sgml because it is only intended
592 * for use by RI triggers.
593 *
594 * Passing snapshot == InvalidSnapshot will select the normal behavior of
595 * fetching a new snapshot for each query.
596 */
597 int
SPI_execute_snapshot(SPIPlanPtr plan,Datum * Values,const char * Nulls,Snapshot snapshot,Snapshot crosscheck_snapshot,bool read_only,bool fire_triggers,long tcount)598 SPI_execute_snapshot(SPIPlanPtr plan,
599 Datum *Values, const char *Nulls,
600 Snapshot snapshot, Snapshot crosscheck_snapshot,
601 bool read_only, bool fire_triggers, long tcount)
602 {
603 int res;
604
605 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0)
606 return SPI_ERROR_ARGUMENT;
607
608 if (plan->nargs > 0 && Values == NULL)
609 return SPI_ERROR_PARAM;
610
611 res = _SPI_begin_call(true);
612 if (res < 0)
613 return res;
614
615 res = _SPI_execute_plan(plan,
616 _SPI_convert_params(plan->nargs, plan->argtypes,
617 Values, Nulls),
618 snapshot, crosscheck_snapshot,
619 read_only, fire_triggers, tcount);
620
621 _SPI_end_call(true);
622 return res;
623 }
624
625 /*
626 * SPI_execute_with_args -- plan and execute a query with supplied arguments
627 *
628 * This is functionally equivalent to SPI_prepare followed by
629 * SPI_execute_plan.
630 */
631 int
SPI_execute_with_args(const char * src,int nargs,Oid * argtypes,Datum * Values,const char * Nulls,bool read_only,long tcount)632 SPI_execute_with_args(const char *src,
633 int nargs, Oid *argtypes,
634 Datum *Values, const char *Nulls,
635 bool read_only, long tcount)
636 {
637 int res;
638 _SPI_plan plan;
639 ParamListInfo paramLI;
640
641 if (src == NULL || nargs < 0 || tcount < 0)
642 return SPI_ERROR_ARGUMENT;
643
644 if (nargs > 0 && (argtypes == NULL || Values == NULL))
645 return SPI_ERROR_PARAM;
646
647 res = _SPI_begin_call(true);
648 if (res < 0)
649 return res;
650
651 memset(&plan, 0, sizeof(_SPI_plan));
652 plan.magic = _SPI_PLAN_MAGIC;
653 plan.cursor_options = CURSOR_OPT_PARALLEL_OK;
654 plan.nargs = nargs;
655 plan.argtypes = argtypes;
656 plan.parserSetup = NULL;
657 plan.parserSetupArg = NULL;
658
659 paramLI = _SPI_convert_params(nargs, argtypes,
660 Values, Nulls);
661
662 _SPI_prepare_oneshot_plan(src, &plan);
663
664 res = _SPI_execute_plan(&plan, paramLI,
665 InvalidSnapshot, InvalidSnapshot,
666 read_only, true, tcount);
667
668 _SPI_end_call(true);
669 return res;
670 }
671
672 SPIPlanPtr
SPI_prepare(const char * src,int nargs,Oid * argtypes)673 SPI_prepare(const char *src, int nargs, Oid *argtypes)
674 {
675 return SPI_prepare_cursor(src, nargs, argtypes, 0);
676 }
677
678 SPIPlanPtr
SPI_prepare_cursor(const char * src,int nargs,Oid * argtypes,int cursorOptions)679 SPI_prepare_cursor(const char *src, int nargs, Oid *argtypes,
680 int cursorOptions)
681 {
682 _SPI_plan plan;
683 SPIPlanPtr result;
684
685 if (src == NULL || nargs < 0 || (nargs > 0 && argtypes == NULL))
686 {
687 SPI_result = SPI_ERROR_ARGUMENT;
688 return NULL;
689 }
690
691 SPI_result = _SPI_begin_call(true);
692 if (SPI_result < 0)
693 return NULL;
694
695 memset(&plan, 0, sizeof(_SPI_plan));
696 plan.magic = _SPI_PLAN_MAGIC;
697 plan.cursor_options = cursorOptions;
698 plan.nargs = nargs;
699 plan.argtypes = argtypes;
700 plan.parserSetup = NULL;
701 plan.parserSetupArg = NULL;
702
703 _SPI_prepare_plan(src, &plan);
704
705 /* copy plan to procedure context */
706 result = _SPI_make_plan_non_temp(&plan);
707
708 _SPI_end_call(true);
709
710 return result;
711 }
712
713 SPIPlanPtr
SPI_prepare_params(const char * src,ParserSetupHook parserSetup,void * parserSetupArg,int cursorOptions)714 SPI_prepare_params(const char *src,
715 ParserSetupHook parserSetup,
716 void *parserSetupArg,
717 int cursorOptions)
718 {
719 _SPI_plan plan;
720 SPIPlanPtr result;
721
722 if (src == NULL)
723 {
724 SPI_result = SPI_ERROR_ARGUMENT;
725 return NULL;
726 }
727
728 SPI_result = _SPI_begin_call(true);
729 if (SPI_result < 0)
730 return NULL;
731
732 memset(&plan, 0, sizeof(_SPI_plan));
733 plan.magic = _SPI_PLAN_MAGIC;
734 plan.cursor_options = cursorOptions;
735 plan.nargs = 0;
736 plan.argtypes = NULL;
737 plan.parserSetup = parserSetup;
738 plan.parserSetupArg = parserSetupArg;
739
740 _SPI_prepare_plan(src, &plan);
741
742 /* copy plan to procedure context */
743 result = _SPI_make_plan_non_temp(&plan);
744
745 _SPI_end_call(true);
746
747 return result;
748 }
749
750 int
SPI_keepplan(SPIPlanPtr plan)751 SPI_keepplan(SPIPlanPtr plan)
752 {
753 ListCell *lc;
754
755 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC ||
756 plan->saved || plan->oneshot)
757 return SPI_ERROR_ARGUMENT;
758
759 /*
760 * Mark it saved, reparent it under CacheMemoryContext, and mark all the
761 * component CachedPlanSources as saved. This sequence cannot fail
762 * partway through, so there's no risk of long-term memory leakage.
763 */
764 plan->saved = true;
765 MemoryContextSetParent(plan->plancxt, CacheMemoryContext);
766
767 foreach(lc, plan->plancache_list)
768 {
769 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
770
771 SaveCachedPlan(plansource);
772 }
773
774 return 0;
775 }
776
777 SPIPlanPtr
SPI_saveplan(SPIPlanPtr plan)778 SPI_saveplan(SPIPlanPtr plan)
779 {
780 SPIPlanPtr newplan;
781
782 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
783 {
784 SPI_result = SPI_ERROR_ARGUMENT;
785 return NULL;
786 }
787
788 SPI_result = _SPI_begin_call(false); /* don't change context */
789 if (SPI_result < 0)
790 return NULL;
791
792 newplan = _SPI_save_plan(plan);
793
794 SPI_result = _SPI_end_call(false);
795
796 return newplan;
797 }
798
799 int
SPI_freeplan(SPIPlanPtr plan)800 SPI_freeplan(SPIPlanPtr plan)
801 {
802 ListCell *lc;
803
804 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
805 return SPI_ERROR_ARGUMENT;
806
807 /* Release the plancache entries */
808 foreach(lc, plan->plancache_list)
809 {
810 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
811
812 DropCachedPlan(plansource);
813 }
814
815 /* Now get rid of the _SPI_plan and subsidiary data in its plancxt */
816 MemoryContextDelete(plan->plancxt);
817
818 return 0;
819 }
820
821 HeapTuple
SPI_copytuple(HeapTuple tuple)822 SPI_copytuple(HeapTuple tuple)
823 {
824 MemoryContext oldcxt;
825 HeapTuple ctuple;
826
827 if (tuple == NULL)
828 {
829 SPI_result = SPI_ERROR_ARGUMENT;
830 return NULL;
831 }
832
833 if (_SPI_current == NULL)
834 {
835 SPI_result = SPI_ERROR_UNCONNECTED;
836 return NULL;
837 }
838
839 oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
840
841 ctuple = heap_copytuple(tuple);
842
843 MemoryContextSwitchTo(oldcxt);
844
845 return ctuple;
846 }
847
848 HeapTupleHeader
SPI_returntuple(HeapTuple tuple,TupleDesc tupdesc)849 SPI_returntuple(HeapTuple tuple, TupleDesc tupdesc)
850 {
851 MemoryContext oldcxt;
852 HeapTupleHeader dtup;
853
854 if (tuple == NULL || tupdesc == NULL)
855 {
856 SPI_result = SPI_ERROR_ARGUMENT;
857 return NULL;
858 }
859
860 if (_SPI_current == NULL)
861 {
862 SPI_result = SPI_ERROR_UNCONNECTED;
863 return NULL;
864 }
865
866 /* For RECORD results, make sure a typmod has been assigned */
867 if (tupdesc->tdtypeid == RECORDOID &&
868 tupdesc->tdtypmod < 0)
869 assign_record_type_typmod(tupdesc);
870
871 oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
872
873 dtup = DatumGetHeapTupleHeader(heap_copy_tuple_as_datum(tuple, tupdesc));
874
875 MemoryContextSwitchTo(oldcxt);
876
877 return dtup;
878 }
879
880 HeapTuple
SPI_modifytuple(Relation rel,HeapTuple tuple,int natts,int * attnum,Datum * Values,const char * Nulls)881 SPI_modifytuple(Relation rel, HeapTuple tuple, int natts, int *attnum,
882 Datum *Values, const char *Nulls)
883 {
884 MemoryContext oldcxt;
885 HeapTuple mtuple;
886 int numberOfAttributes;
887 Datum *v;
888 bool *n;
889 int i;
890
891 if (rel == NULL || tuple == NULL || natts < 0 || attnum == NULL || Values == NULL)
892 {
893 SPI_result = SPI_ERROR_ARGUMENT;
894 return NULL;
895 }
896
897 if (_SPI_current == NULL)
898 {
899 SPI_result = SPI_ERROR_UNCONNECTED;
900 return NULL;
901 }
902
903 oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
904
905 SPI_result = 0;
906
907 numberOfAttributes = rel->rd_att->natts;
908 v = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
909 n = (bool *) palloc(numberOfAttributes * sizeof(bool));
910
911 /* fetch old values and nulls */
912 heap_deform_tuple(tuple, rel->rd_att, v, n);
913
914 /* replace values and nulls */
915 for (i = 0; i < natts; i++)
916 {
917 if (attnum[i] <= 0 || attnum[i] > numberOfAttributes)
918 break;
919 v[attnum[i] - 1] = Values[i];
920 n[attnum[i] - 1] = (Nulls && Nulls[i] == 'n') ? true : false;
921 }
922
923 if (i == natts) /* no errors in *attnum */
924 {
925 mtuple = heap_form_tuple(rel->rd_att, v, n);
926
927 /*
928 * copy the identification info of the old tuple: t_ctid, t_self, and
929 * OID (if any)
930 */
931 mtuple->t_data->t_ctid = tuple->t_data->t_ctid;
932 mtuple->t_self = tuple->t_self;
933 mtuple->t_tableOid = tuple->t_tableOid;
934 }
935 else
936 {
937 mtuple = NULL;
938 SPI_result = SPI_ERROR_NOATTRIBUTE;
939 }
940
941 pfree(v);
942 pfree(n);
943
944 MemoryContextSwitchTo(oldcxt);
945
946 return mtuple;
947 }
948
949 int
SPI_fnumber(TupleDesc tupdesc,const char * fname)950 SPI_fnumber(TupleDesc tupdesc, const char *fname)
951 {
952 int res;
953 const FormData_pg_attribute *sysatt;
954
955 for (res = 0; res < tupdesc->natts; res++)
956 {
957 Form_pg_attribute attr = TupleDescAttr(tupdesc, res);
958
959 if (namestrcmp(&attr->attname, fname) == 0 &&
960 !attr->attisdropped)
961 return res + 1;
962 }
963
964 sysatt = SystemAttributeByName(fname);
965 if (sysatt != NULL)
966 return sysatt->attnum;
967
968 /* SPI_ERROR_NOATTRIBUTE is different from all sys column numbers */
969 return SPI_ERROR_NOATTRIBUTE;
970 }
971
972 char *
SPI_fname(TupleDesc tupdesc,int fnumber)973 SPI_fname(TupleDesc tupdesc, int fnumber)
974 {
975 const FormData_pg_attribute *att;
976
977 SPI_result = 0;
978
979 if (fnumber > tupdesc->natts || fnumber == 0 ||
980 fnumber <= FirstLowInvalidHeapAttributeNumber)
981 {
982 SPI_result = SPI_ERROR_NOATTRIBUTE;
983 return NULL;
984 }
985
986 if (fnumber > 0)
987 att = TupleDescAttr(tupdesc, fnumber - 1);
988 else
989 att = SystemAttributeDefinition(fnumber);
990
991 return pstrdup(NameStr(att->attname));
992 }
993
994 char *
SPI_getvalue(HeapTuple tuple,TupleDesc tupdesc,int fnumber)995 SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber)
996 {
997 Datum val;
998 bool isnull;
999 Oid typoid,
1000 foutoid;
1001 bool typisvarlena;
1002
1003 SPI_result = 0;
1004
1005 if (fnumber > tupdesc->natts || fnumber == 0 ||
1006 fnumber <= FirstLowInvalidHeapAttributeNumber)
1007 {
1008 SPI_result = SPI_ERROR_NOATTRIBUTE;
1009 return NULL;
1010 }
1011
1012 val = heap_getattr(tuple, fnumber, tupdesc, &isnull);
1013 if (isnull)
1014 return NULL;
1015
1016 if (fnumber > 0)
1017 typoid = TupleDescAttr(tupdesc, fnumber - 1)->atttypid;
1018 else
1019 typoid = (SystemAttributeDefinition(fnumber))->atttypid;
1020
1021 getTypeOutputInfo(typoid, &foutoid, &typisvarlena);
1022
1023 return OidOutputFunctionCall(foutoid, val);
1024 }
1025
1026 Datum
SPI_getbinval(HeapTuple tuple,TupleDesc tupdesc,int fnumber,bool * isnull)1027 SPI_getbinval(HeapTuple tuple, TupleDesc tupdesc, int fnumber, bool *isnull)
1028 {
1029 SPI_result = 0;
1030
1031 if (fnumber > tupdesc->natts || fnumber == 0 ||
1032 fnumber <= FirstLowInvalidHeapAttributeNumber)
1033 {
1034 SPI_result = SPI_ERROR_NOATTRIBUTE;
1035 *isnull = true;
1036 return (Datum) NULL;
1037 }
1038
1039 return heap_getattr(tuple, fnumber, tupdesc, isnull);
1040 }
1041
1042 char *
SPI_gettype(TupleDesc tupdesc,int fnumber)1043 SPI_gettype(TupleDesc tupdesc, int fnumber)
1044 {
1045 Oid typoid;
1046 HeapTuple typeTuple;
1047 char *result;
1048
1049 SPI_result = 0;
1050
1051 if (fnumber > tupdesc->natts || fnumber == 0 ||
1052 fnumber <= FirstLowInvalidHeapAttributeNumber)
1053 {
1054 SPI_result = SPI_ERROR_NOATTRIBUTE;
1055 return NULL;
1056 }
1057
1058 if (fnumber > 0)
1059 typoid = TupleDescAttr(tupdesc, fnumber - 1)->atttypid;
1060 else
1061 typoid = (SystemAttributeDefinition(fnumber))->atttypid;
1062
1063 typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typoid));
1064
1065 if (!HeapTupleIsValid(typeTuple))
1066 {
1067 SPI_result = SPI_ERROR_TYPUNKNOWN;
1068 return NULL;
1069 }
1070
1071 result = pstrdup(NameStr(((Form_pg_type) GETSTRUCT(typeTuple))->typname));
1072 ReleaseSysCache(typeTuple);
1073 return result;
1074 }
1075
1076 /*
1077 * Get the data type OID for a column.
1078 *
1079 * There's nothing similar for typmod and typcollation. The rare consumers
1080 * thereof should inspect the TupleDesc directly.
1081 */
1082 Oid
SPI_gettypeid(TupleDesc tupdesc,int fnumber)1083 SPI_gettypeid(TupleDesc tupdesc, int fnumber)
1084 {
1085 SPI_result = 0;
1086
1087 if (fnumber > tupdesc->natts || fnumber == 0 ||
1088 fnumber <= FirstLowInvalidHeapAttributeNumber)
1089 {
1090 SPI_result = SPI_ERROR_NOATTRIBUTE;
1091 return InvalidOid;
1092 }
1093
1094 if (fnumber > 0)
1095 return TupleDescAttr(tupdesc, fnumber - 1)->atttypid;
1096 else
1097 return (SystemAttributeDefinition(fnumber))->atttypid;
1098 }
1099
1100 char *
SPI_getrelname(Relation rel)1101 SPI_getrelname(Relation rel)
1102 {
1103 return pstrdup(RelationGetRelationName(rel));
1104 }
1105
1106 char *
SPI_getnspname(Relation rel)1107 SPI_getnspname(Relation rel)
1108 {
1109 return get_namespace_name(RelationGetNamespace(rel));
1110 }
1111
1112 void *
SPI_palloc(Size size)1113 SPI_palloc(Size size)
1114 {
1115 if (_SPI_current == NULL)
1116 elog(ERROR, "SPI_palloc called while not connected to SPI");
1117
1118 return MemoryContextAlloc(_SPI_current->savedcxt, size);
1119 }
1120
1121 void *
SPI_repalloc(void * pointer,Size size)1122 SPI_repalloc(void *pointer, Size size)
1123 {
1124 /* No longer need to worry which context chunk was in... */
1125 return repalloc(pointer, size);
1126 }
1127
1128 void
SPI_pfree(void * pointer)1129 SPI_pfree(void *pointer)
1130 {
1131 /* No longer need to worry which context chunk was in... */
1132 pfree(pointer);
1133 }
1134
1135 Datum
SPI_datumTransfer(Datum value,bool typByVal,int typLen)1136 SPI_datumTransfer(Datum value, bool typByVal, int typLen)
1137 {
1138 MemoryContext oldcxt;
1139 Datum result;
1140
1141 if (_SPI_current == NULL)
1142 elog(ERROR, "SPI_datumTransfer called while not connected to SPI");
1143
1144 oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
1145
1146 result = datumTransfer(value, typByVal, typLen);
1147
1148 MemoryContextSwitchTo(oldcxt);
1149
1150 return result;
1151 }
1152
1153 void
SPI_freetuple(HeapTuple tuple)1154 SPI_freetuple(HeapTuple tuple)
1155 {
1156 /* No longer need to worry which context tuple was in... */
1157 heap_freetuple(tuple);
1158 }
1159
1160 void
SPI_freetuptable(SPITupleTable * tuptable)1161 SPI_freetuptable(SPITupleTable *tuptable)
1162 {
1163 bool found = false;
1164
1165 /* ignore call if NULL pointer */
1166 if (tuptable == NULL)
1167 return;
1168
1169 /*
1170 * Search only the topmost SPI context for a matching tuple table.
1171 */
1172 if (_SPI_current != NULL)
1173 {
1174 slist_mutable_iter siter;
1175
1176 /* find tuptable in active list, then remove it */
1177 slist_foreach_modify(siter, &_SPI_current->tuptables)
1178 {
1179 SPITupleTable *tt;
1180
1181 tt = slist_container(SPITupleTable, next, siter.cur);
1182 if (tt == tuptable)
1183 {
1184 slist_delete_current(&siter);
1185 found = true;
1186 break;
1187 }
1188 }
1189 }
1190
1191 /*
1192 * Refuse the deletion if we didn't find it in the topmost SPI context.
1193 * This is primarily a guard against double deletion, but might prevent
1194 * other errors as well. Since the worst consequence of not deleting a
1195 * tuptable would be a transient memory leak, this is just a WARNING.
1196 */
1197 if (!found)
1198 {
1199 elog(WARNING, "attempt to delete invalid SPITupleTable %p", tuptable);
1200 return;
1201 }
1202
1203 /* for safety, reset global variables that might point at tuptable */
1204 if (tuptable == _SPI_current->tuptable)
1205 _SPI_current->tuptable = NULL;
1206 if (tuptable == SPI_tuptable)
1207 SPI_tuptable = NULL;
1208
1209 /* release all memory belonging to tuptable */
1210 MemoryContextDelete(tuptable->tuptabcxt);
1211 }
1212
1213
1214 /*
1215 * SPI_cursor_open()
1216 *
1217 * Open a prepared SPI plan as a portal
1218 */
1219 Portal
SPI_cursor_open(const char * name,SPIPlanPtr plan,Datum * Values,const char * Nulls,bool read_only)1220 SPI_cursor_open(const char *name, SPIPlanPtr plan,
1221 Datum *Values, const char *Nulls,
1222 bool read_only)
1223 {
1224 Portal portal;
1225 ParamListInfo paramLI;
1226
1227 /* build transient ParamListInfo in caller's context */
1228 paramLI = _SPI_convert_params(plan->nargs, plan->argtypes,
1229 Values, Nulls);
1230
1231 portal = SPI_cursor_open_internal(name, plan, paramLI, read_only);
1232
1233 /* done with the transient ParamListInfo */
1234 if (paramLI)
1235 pfree(paramLI);
1236
1237 return portal;
1238 }
1239
1240
1241 /*
1242 * SPI_cursor_open_with_args()
1243 *
1244 * Parse and plan a query and open it as a portal.
1245 */
1246 Portal
SPI_cursor_open_with_args(const char * name,const char * src,int nargs,Oid * argtypes,Datum * Values,const char * Nulls,bool read_only,int cursorOptions)1247 SPI_cursor_open_with_args(const char *name,
1248 const char *src,
1249 int nargs, Oid *argtypes,
1250 Datum *Values, const char *Nulls,
1251 bool read_only, int cursorOptions)
1252 {
1253 Portal result;
1254 _SPI_plan plan;
1255 ParamListInfo paramLI;
1256
1257 if (src == NULL || nargs < 0)
1258 elog(ERROR, "SPI_cursor_open_with_args called with invalid arguments");
1259
1260 if (nargs > 0 && (argtypes == NULL || Values == NULL))
1261 elog(ERROR, "SPI_cursor_open_with_args called with missing parameters");
1262
1263 SPI_result = _SPI_begin_call(true);
1264 if (SPI_result < 0)
1265 elog(ERROR, "SPI_cursor_open_with_args called while not connected");
1266
1267 memset(&plan, 0, sizeof(_SPI_plan));
1268 plan.magic = _SPI_PLAN_MAGIC;
1269 plan.cursor_options = cursorOptions;
1270 plan.nargs = nargs;
1271 plan.argtypes = argtypes;
1272 plan.parserSetup = NULL;
1273 plan.parserSetupArg = NULL;
1274
1275 /* build transient ParamListInfo in executor context */
1276 paramLI = _SPI_convert_params(nargs, argtypes,
1277 Values, Nulls);
1278
1279 _SPI_prepare_plan(src, &plan);
1280
1281 /* We needn't copy the plan; SPI_cursor_open_internal will do so */
1282
1283 result = SPI_cursor_open_internal(name, &plan, paramLI, read_only);
1284
1285 /* And clean up */
1286 _SPI_end_call(true);
1287
1288 return result;
1289 }
1290
1291
1292 /*
1293 * SPI_cursor_open_with_paramlist()
1294 *
1295 * Same as SPI_cursor_open except that parameters (if any) are passed
1296 * as a ParamListInfo, which supports dynamic parameter set determination
1297 */
1298 Portal
SPI_cursor_open_with_paramlist(const char * name,SPIPlanPtr plan,ParamListInfo params,bool read_only)1299 SPI_cursor_open_with_paramlist(const char *name, SPIPlanPtr plan,
1300 ParamListInfo params, bool read_only)
1301 {
1302 return SPI_cursor_open_internal(name, plan, params, read_only);
1303 }
1304
1305
1306 /*
1307 * SPI_cursor_open_internal()
1308 *
1309 * Common code for SPI_cursor_open variants
1310 */
1311 static Portal
SPI_cursor_open_internal(const char * name,SPIPlanPtr plan,ParamListInfo paramLI,bool read_only)1312 SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
1313 ParamListInfo paramLI, bool read_only)
1314 {
1315 CachedPlanSource *plansource;
1316 CachedPlan *cplan;
1317 List *stmt_list;
1318 char *query_string;
1319 Snapshot snapshot;
1320 MemoryContext oldcontext;
1321 Portal portal;
1322 ErrorContextCallback spierrcontext;
1323
1324 /*
1325 * Check that the plan is something the Portal code will special-case as
1326 * returning one tupleset.
1327 */
1328 if (!SPI_is_cursor_plan(plan))
1329 {
1330 /* try to give a good error message */
1331 if (list_length(plan->plancache_list) != 1)
1332 ereport(ERROR,
1333 (errcode(ERRCODE_INVALID_CURSOR_DEFINITION),
1334 errmsg("cannot open multi-query plan as cursor")));
1335 plansource = (CachedPlanSource *) linitial(plan->plancache_list);
1336 ereport(ERROR,
1337 (errcode(ERRCODE_INVALID_CURSOR_DEFINITION),
1338 /* translator: %s is name of a SQL command, eg INSERT */
1339 errmsg("cannot open %s query as cursor",
1340 plansource->commandTag)));
1341 }
1342
1343 Assert(list_length(plan->plancache_list) == 1);
1344 plansource = (CachedPlanSource *) linitial(plan->plancache_list);
1345
1346 /* Push the SPI stack */
1347 if (_SPI_begin_call(true) < 0)
1348 elog(ERROR, "SPI_cursor_open called while not connected");
1349
1350 /* Reset SPI result (note we deliberately don't touch lastoid) */
1351 SPI_processed = 0;
1352 SPI_tuptable = NULL;
1353 _SPI_current->processed = 0;
1354 _SPI_current->tuptable = NULL;
1355
1356 /* Create the portal */
1357 if (name == NULL || name[0] == '\0')
1358 {
1359 /* Use a random nonconflicting name */
1360 portal = CreateNewPortal();
1361 }
1362 else
1363 {
1364 /* In this path, error if portal of same name already exists */
1365 portal = CreatePortal(name, false, false);
1366 }
1367
1368 /* Copy the plan's query string into the portal */
1369 query_string = MemoryContextStrdup(portal->portalContext,
1370 plansource->query_string);
1371
1372 /*
1373 * Setup error traceback support for ereport(), in case GetCachedPlan
1374 * throws an error.
1375 */
1376 spierrcontext.callback = _SPI_error_callback;
1377 spierrcontext.arg = unconstify(char *, plansource->query_string);
1378 spierrcontext.previous = error_context_stack;
1379 error_context_stack = &spierrcontext;
1380
1381 /*
1382 * Note: for a saved plan, we mustn't have any failure occur between
1383 * GetCachedPlan and PortalDefineQuery; that would result in leaking our
1384 * plancache refcount.
1385 */
1386
1387 /* Replan if needed, and increment plan refcount for portal */
1388 cplan = GetCachedPlan(plansource, paramLI, false, _SPI_current->queryEnv);
1389 stmt_list = cplan->stmt_list;
1390
1391 if (!plan->saved)
1392 {
1393 /*
1394 * We don't want the portal to depend on an unsaved CachedPlanSource,
1395 * so must copy the plan into the portal's context. An error here
1396 * will result in leaking our refcount on the plan, but it doesn't
1397 * matter because the plan is unsaved and hence transient anyway.
1398 */
1399 oldcontext = MemoryContextSwitchTo(portal->portalContext);
1400 stmt_list = copyObject(stmt_list);
1401 MemoryContextSwitchTo(oldcontext);
1402 ReleaseCachedPlan(cplan, false);
1403 cplan = NULL; /* portal shouldn't depend on cplan */
1404 }
1405
1406 /*
1407 * Set up the portal.
1408 */
1409 PortalDefineQuery(portal,
1410 NULL, /* no statement name */
1411 query_string,
1412 plansource->commandTag,
1413 stmt_list,
1414 cplan);
1415
1416 /*
1417 * Set up options for portal. Default SCROLL type is chosen the same way
1418 * as PerformCursorOpen does it.
1419 */
1420 portal->cursorOptions = plan->cursor_options;
1421 if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
1422 {
1423 if (list_length(stmt_list) == 1 &&
1424 linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
1425 linitial_node(PlannedStmt, stmt_list)->rowMarks == NIL &&
1426 ExecSupportsBackwardScan(linitial_node(PlannedStmt, stmt_list)->planTree))
1427 portal->cursorOptions |= CURSOR_OPT_SCROLL;
1428 else
1429 portal->cursorOptions |= CURSOR_OPT_NO_SCROLL;
1430 }
1431
1432 /*
1433 * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the
1434 * check in transformDeclareCursorStmt because the cursor options might
1435 * not have come through there.
1436 */
1437 if (portal->cursorOptions & CURSOR_OPT_SCROLL)
1438 {
1439 if (list_length(stmt_list) == 1 &&
1440 linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
1441 linitial_node(PlannedStmt, stmt_list)->rowMarks != NIL)
1442 ereport(ERROR,
1443 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1444 errmsg("DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE is not supported"),
1445 errdetail("Scrollable cursors must be READ ONLY.")));
1446 }
1447
1448 /* Make current query environment available to portal at execution time. */
1449 portal->queryEnv = _SPI_current->queryEnv;
1450
1451 /*
1452 * If told to be read-only, or in parallel mode, verify that this query is
1453 * in fact read-only. This can't be done earlier because we need to look
1454 * at the finished, planned queries. (In particular, we don't want to do
1455 * it between GetCachedPlan and PortalDefineQuery, because throwing an
1456 * error between those steps would result in leaking our plancache
1457 * refcount.)
1458 */
1459 if (read_only || IsInParallelMode())
1460 {
1461 ListCell *lc;
1462
1463 foreach(lc, stmt_list)
1464 {
1465 PlannedStmt *pstmt = lfirst_node(PlannedStmt, lc);
1466
1467 if (!CommandIsReadOnly(pstmt))
1468 {
1469 if (read_only)
1470 ereport(ERROR,
1471 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1472 /* translator: %s is a SQL statement name */
1473 errmsg("%s is not allowed in a non-volatile function",
1474 CreateCommandTag((Node *) pstmt))));
1475 else
1476 PreventCommandIfParallelMode(CreateCommandTag((Node *) pstmt));
1477 }
1478 }
1479 }
1480
1481 /* Set up the snapshot to use. */
1482 if (read_only)
1483 snapshot = GetActiveSnapshot();
1484 else
1485 {
1486 CommandCounterIncrement();
1487 snapshot = GetTransactionSnapshot();
1488 }
1489
1490 /*
1491 * If the plan has parameters, copy them into the portal. Note that this
1492 * must be done after revalidating the plan, because in dynamic parameter
1493 * cases the set of parameters could have changed during re-parsing.
1494 */
1495 if (paramLI)
1496 {
1497 oldcontext = MemoryContextSwitchTo(portal->portalContext);
1498 paramLI = copyParamList(paramLI);
1499 MemoryContextSwitchTo(oldcontext);
1500 }
1501
1502 /*
1503 * Start portal execution.
1504 */
1505 PortalStart(portal, paramLI, 0, snapshot);
1506
1507 Assert(portal->strategy != PORTAL_MULTI_QUERY);
1508
1509 /* Pop the error context stack */
1510 error_context_stack = spierrcontext.previous;
1511
1512 /* Pop the SPI stack */
1513 _SPI_end_call(true);
1514
1515 /* Return the created portal */
1516 return portal;
1517 }
1518
1519
1520 /*
1521 * SPI_cursor_find()
1522 *
1523 * Find the portal of an existing open cursor
1524 */
1525 Portal
SPI_cursor_find(const char * name)1526 SPI_cursor_find(const char *name)
1527 {
1528 return GetPortalByName(name);
1529 }
1530
1531
1532 /*
1533 * SPI_cursor_fetch()
1534 *
1535 * Fetch rows in a cursor
1536 */
1537 void
SPI_cursor_fetch(Portal portal,bool forward,long count)1538 SPI_cursor_fetch(Portal portal, bool forward, long count)
1539 {
1540 _SPI_cursor_operation(portal,
1541 forward ? FETCH_FORWARD : FETCH_BACKWARD, count,
1542 CreateDestReceiver(DestSPI));
1543 /* we know that the DestSPI receiver doesn't need a destroy call */
1544 }
1545
1546
1547 /*
1548 * SPI_cursor_move()
1549 *
1550 * Move in a cursor
1551 */
1552 void
SPI_cursor_move(Portal portal,bool forward,long count)1553 SPI_cursor_move(Portal portal, bool forward, long count)
1554 {
1555 _SPI_cursor_operation(portal,
1556 forward ? FETCH_FORWARD : FETCH_BACKWARD, count,
1557 None_Receiver);
1558 }
1559
1560
1561 /*
1562 * SPI_scroll_cursor_fetch()
1563 *
1564 * Fetch rows in a scrollable cursor
1565 */
1566 void
SPI_scroll_cursor_fetch(Portal portal,FetchDirection direction,long count)1567 SPI_scroll_cursor_fetch(Portal portal, FetchDirection direction, long count)
1568 {
1569 _SPI_cursor_operation(portal,
1570 direction, count,
1571 CreateDestReceiver(DestSPI));
1572 /* we know that the DestSPI receiver doesn't need a destroy call */
1573 }
1574
1575
1576 /*
1577 * SPI_scroll_cursor_move()
1578 *
1579 * Move in a scrollable cursor
1580 */
1581 void
SPI_scroll_cursor_move(Portal portal,FetchDirection direction,long count)1582 SPI_scroll_cursor_move(Portal portal, FetchDirection direction, long count)
1583 {
1584 _SPI_cursor_operation(portal, direction, count, None_Receiver);
1585 }
1586
1587
1588 /*
1589 * SPI_cursor_close()
1590 *
1591 * Close a cursor
1592 */
1593 void
SPI_cursor_close(Portal portal)1594 SPI_cursor_close(Portal portal)
1595 {
1596 if (!PortalIsValid(portal))
1597 elog(ERROR, "invalid portal in SPI cursor operation");
1598
1599 PortalDrop(portal, false);
1600 }
1601
1602 /*
1603 * Returns the Oid representing the type id for argument at argIndex. First
1604 * parameter is at index zero.
1605 */
1606 Oid
SPI_getargtypeid(SPIPlanPtr plan,int argIndex)1607 SPI_getargtypeid(SPIPlanPtr plan, int argIndex)
1608 {
1609 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC ||
1610 argIndex < 0 || argIndex >= plan->nargs)
1611 {
1612 SPI_result = SPI_ERROR_ARGUMENT;
1613 return InvalidOid;
1614 }
1615 return plan->argtypes[argIndex];
1616 }
1617
1618 /*
1619 * Returns the number of arguments for the prepared plan.
1620 */
1621 int
SPI_getargcount(SPIPlanPtr plan)1622 SPI_getargcount(SPIPlanPtr plan)
1623 {
1624 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
1625 {
1626 SPI_result = SPI_ERROR_ARGUMENT;
1627 return -1;
1628 }
1629 return plan->nargs;
1630 }
1631
1632 /*
1633 * Returns true if the plan contains exactly one command
1634 * and that command returns tuples to the caller (eg, SELECT or
1635 * INSERT ... RETURNING, but not SELECT ... INTO). In essence,
1636 * the result indicates if the command can be used with SPI_cursor_open
1637 *
1638 * Parameters
1639 * plan: A plan previously prepared using SPI_prepare
1640 */
1641 bool
SPI_is_cursor_plan(SPIPlanPtr plan)1642 SPI_is_cursor_plan(SPIPlanPtr plan)
1643 {
1644 CachedPlanSource *plansource;
1645
1646 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
1647 {
1648 SPI_result = SPI_ERROR_ARGUMENT;
1649 return false;
1650 }
1651
1652 if (list_length(plan->plancache_list) != 1)
1653 {
1654 SPI_result = 0;
1655 return false; /* not exactly 1 pre-rewrite command */
1656 }
1657 plansource = (CachedPlanSource *) linitial(plan->plancache_list);
1658
1659 /*
1660 * We used to force revalidation of the cached plan here, but that seems
1661 * unnecessary: invalidation could mean a change in the rowtype of the
1662 * tuples returned by a plan, but not whether it returns tuples at all.
1663 */
1664 SPI_result = 0;
1665
1666 /* Does it return tuples? */
1667 if (plansource->resultDesc)
1668 return true;
1669
1670 return false;
1671 }
1672
1673 /*
1674 * SPI_plan_is_valid --- test whether a SPI plan is currently valid
1675 * (that is, not marked as being in need of revalidation).
1676 *
1677 * See notes for CachedPlanIsValid before using this.
1678 */
1679 bool
SPI_plan_is_valid(SPIPlanPtr plan)1680 SPI_plan_is_valid(SPIPlanPtr plan)
1681 {
1682 ListCell *lc;
1683
1684 Assert(plan->magic == _SPI_PLAN_MAGIC);
1685
1686 foreach(lc, plan->plancache_list)
1687 {
1688 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
1689
1690 if (!CachedPlanIsValid(plansource))
1691 return false;
1692 }
1693 return true;
1694 }
1695
1696 /*
1697 * SPI_result_code_string --- convert any SPI return code to a string
1698 *
1699 * This is often useful in error messages. Most callers will probably
1700 * only pass negative (error-case) codes, but for generality we recognize
1701 * the success codes too.
1702 */
1703 const char *
SPI_result_code_string(int code)1704 SPI_result_code_string(int code)
1705 {
1706 static char buf[64];
1707
1708 switch (code)
1709 {
1710 case SPI_ERROR_CONNECT:
1711 return "SPI_ERROR_CONNECT";
1712 case SPI_ERROR_COPY:
1713 return "SPI_ERROR_COPY";
1714 case SPI_ERROR_OPUNKNOWN:
1715 return "SPI_ERROR_OPUNKNOWN";
1716 case SPI_ERROR_UNCONNECTED:
1717 return "SPI_ERROR_UNCONNECTED";
1718 case SPI_ERROR_ARGUMENT:
1719 return "SPI_ERROR_ARGUMENT";
1720 case SPI_ERROR_PARAM:
1721 return "SPI_ERROR_PARAM";
1722 case SPI_ERROR_TRANSACTION:
1723 return "SPI_ERROR_TRANSACTION";
1724 case SPI_ERROR_NOATTRIBUTE:
1725 return "SPI_ERROR_NOATTRIBUTE";
1726 case SPI_ERROR_NOOUTFUNC:
1727 return "SPI_ERROR_NOOUTFUNC";
1728 case SPI_ERROR_TYPUNKNOWN:
1729 return "SPI_ERROR_TYPUNKNOWN";
1730 case SPI_ERROR_REL_DUPLICATE:
1731 return "SPI_ERROR_REL_DUPLICATE";
1732 case SPI_ERROR_REL_NOT_FOUND:
1733 return "SPI_ERROR_REL_NOT_FOUND";
1734 case SPI_OK_CONNECT:
1735 return "SPI_OK_CONNECT";
1736 case SPI_OK_FINISH:
1737 return "SPI_OK_FINISH";
1738 case SPI_OK_FETCH:
1739 return "SPI_OK_FETCH";
1740 case SPI_OK_UTILITY:
1741 return "SPI_OK_UTILITY";
1742 case SPI_OK_SELECT:
1743 return "SPI_OK_SELECT";
1744 case SPI_OK_SELINTO:
1745 return "SPI_OK_SELINTO";
1746 case SPI_OK_INSERT:
1747 return "SPI_OK_INSERT";
1748 case SPI_OK_DELETE:
1749 return "SPI_OK_DELETE";
1750 case SPI_OK_UPDATE:
1751 return "SPI_OK_UPDATE";
1752 case SPI_OK_CURSOR:
1753 return "SPI_OK_CURSOR";
1754 case SPI_OK_INSERT_RETURNING:
1755 return "SPI_OK_INSERT_RETURNING";
1756 case SPI_OK_DELETE_RETURNING:
1757 return "SPI_OK_DELETE_RETURNING";
1758 case SPI_OK_UPDATE_RETURNING:
1759 return "SPI_OK_UPDATE_RETURNING";
1760 case SPI_OK_REWRITTEN:
1761 return "SPI_OK_REWRITTEN";
1762 case SPI_OK_REL_REGISTER:
1763 return "SPI_OK_REL_REGISTER";
1764 case SPI_OK_REL_UNREGISTER:
1765 return "SPI_OK_REL_UNREGISTER";
1766 }
1767 /* Unrecognized code ... return something useful ... */
1768 sprintf(buf, "Unrecognized SPI code %d", code);
1769 return buf;
1770 }
1771
1772 /*
1773 * SPI_plan_get_plan_sources --- get a SPI plan's underlying list of
1774 * CachedPlanSources.
1775 *
1776 * This is exported so that PL/pgSQL can use it (this beats letting PL/pgSQL
1777 * look directly into the SPIPlan for itself). It's not documented in
1778 * spi.sgml because we'd just as soon not have too many places using this.
1779 */
1780 List *
SPI_plan_get_plan_sources(SPIPlanPtr plan)1781 SPI_plan_get_plan_sources(SPIPlanPtr plan)
1782 {
1783 Assert(plan->magic == _SPI_PLAN_MAGIC);
1784 return plan->plancache_list;
1785 }
1786
1787 /*
1788 * SPI_plan_get_cached_plan --- get a SPI plan's generic CachedPlan,
1789 * if the SPI plan contains exactly one CachedPlanSource. If not,
1790 * return NULL. Caller is responsible for doing ReleaseCachedPlan().
1791 *
1792 * This is exported so that PL/pgSQL can use it (this beats letting PL/pgSQL
1793 * look directly into the SPIPlan for itself). It's not documented in
1794 * spi.sgml because we'd just as soon not have too many places using this.
1795 */
1796 CachedPlan *
SPI_plan_get_cached_plan(SPIPlanPtr plan)1797 SPI_plan_get_cached_plan(SPIPlanPtr plan)
1798 {
1799 CachedPlanSource *plansource;
1800 CachedPlan *cplan;
1801 ErrorContextCallback spierrcontext;
1802
1803 Assert(plan->magic == _SPI_PLAN_MAGIC);
1804
1805 /* Can't support one-shot plans here */
1806 if (plan->oneshot)
1807 return NULL;
1808
1809 /* Must have exactly one CachedPlanSource */
1810 if (list_length(plan->plancache_list) != 1)
1811 return NULL;
1812 plansource = (CachedPlanSource *) linitial(plan->plancache_list);
1813
1814 /* Setup error traceback support for ereport() */
1815 spierrcontext.callback = _SPI_error_callback;
1816 spierrcontext.arg = unconstify(char *, plansource->query_string);
1817 spierrcontext.previous = error_context_stack;
1818 error_context_stack = &spierrcontext;
1819
1820 /* Get the generic plan for the query */
1821 cplan = GetCachedPlan(plansource, NULL, plan->saved,
1822 _SPI_current->queryEnv);
1823 Assert(cplan == plansource->gplan);
1824
1825 /* Pop the error context stack */
1826 error_context_stack = spierrcontext.previous;
1827
1828 return cplan;
1829 }
1830
1831
1832 /* =================== private functions =================== */
1833
1834 /*
1835 * spi_dest_startup
1836 * Initialize to receive tuples from Executor into SPITupleTable
1837 * of current SPI procedure
1838 */
1839 void
spi_dest_startup(DestReceiver * self,int operation,TupleDesc typeinfo)1840 spi_dest_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
1841 {
1842 SPITupleTable *tuptable;
1843 MemoryContext oldcxt;
1844 MemoryContext tuptabcxt;
1845
1846 if (_SPI_current == NULL)
1847 elog(ERROR, "spi_dest_startup called while not connected to SPI");
1848
1849 if (_SPI_current->tuptable != NULL)
1850 elog(ERROR, "improper call to spi_dest_startup");
1851
1852 /* We create the tuple table context as a child of procCxt */
1853
1854 oldcxt = _SPI_procmem(); /* switch to procedure memory context */
1855
1856 tuptabcxt = AllocSetContextCreate(CurrentMemoryContext,
1857 "SPI TupTable",
1858 ALLOCSET_DEFAULT_SIZES);
1859 MemoryContextSwitchTo(tuptabcxt);
1860
1861 _SPI_current->tuptable = tuptable = (SPITupleTable *)
1862 palloc0(sizeof(SPITupleTable));
1863 tuptable->tuptabcxt = tuptabcxt;
1864 tuptable->subid = GetCurrentSubTransactionId();
1865
1866 /*
1867 * The tuptable is now valid enough to be freed by AtEOSubXact_SPI, so put
1868 * it onto the SPI context's tuptables list. This will ensure it's not
1869 * leaked even in the unlikely event the following few lines fail.
1870 */
1871 slist_push_head(&_SPI_current->tuptables, &tuptable->next);
1872
1873 /* set up initial allocations */
1874 tuptable->alloced = tuptable->free = 128;
1875 tuptable->vals = (HeapTuple *) palloc(tuptable->alloced * sizeof(HeapTuple));
1876 tuptable->tupdesc = CreateTupleDescCopy(typeinfo);
1877
1878 MemoryContextSwitchTo(oldcxt);
1879 }
1880
1881 /*
1882 * spi_printtup
1883 * store tuple retrieved by Executor into SPITupleTable
1884 * of current SPI procedure
1885 */
1886 bool
spi_printtup(TupleTableSlot * slot,DestReceiver * self)1887 spi_printtup(TupleTableSlot *slot, DestReceiver *self)
1888 {
1889 SPITupleTable *tuptable;
1890 MemoryContext oldcxt;
1891
1892 if (_SPI_current == NULL)
1893 elog(ERROR, "spi_printtup called while not connected to SPI");
1894
1895 tuptable = _SPI_current->tuptable;
1896 if (tuptable == NULL)
1897 elog(ERROR, "improper call to spi_printtup");
1898
1899 oldcxt = MemoryContextSwitchTo(tuptable->tuptabcxt);
1900
1901 if (tuptable->free == 0)
1902 {
1903 /* Double the size of the pointer array */
1904 tuptable->free = tuptable->alloced;
1905 tuptable->alloced += tuptable->free;
1906 tuptable->vals = (HeapTuple *) repalloc_huge(tuptable->vals,
1907 tuptable->alloced * sizeof(HeapTuple));
1908 }
1909
1910 tuptable->vals[tuptable->alloced - tuptable->free] =
1911 ExecCopySlotHeapTuple(slot);
1912 (tuptable->free)--;
1913
1914 MemoryContextSwitchTo(oldcxt);
1915
1916 return true;
1917 }
1918
1919 /*
1920 * Static functions
1921 */
1922
1923 /*
1924 * Parse and analyze a querystring.
1925 *
1926 * At entry, plan->argtypes and plan->nargs (or alternatively plan->parserSetup
1927 * and plan->parserSetupArg) must be valid, as must plan->cursor_options.
1928 *
1929 * Results are stored into *plan (specifically, plan->plancache_list).
1930 * Note that the result data is all in CurrentMemoryContext or child contexts
1931 * thereof; in practice this means it is in the SPI executor context, and
1932 * what we are creating is a "temporary" SPIPlan. Cruft generated during
1933 * parsing is also left in CurrentMemoryContext.
1934 */
1935 static void
_SPI_prepare_plan(const char * src,SPIPlanPtr plan)1936 _SPI_prepare_plan(const char *src, SPIPlanPtr plan)
1937 {
1938 List *raw_parsetree_list;
1939 List *plancache_list;
1940 ListCell *list_item;
1941 ErrorContextCallback spierrcontext;
1942
1943 /*
1944 * Setup error traceback support for ereport()
1945 */
1946 spierrcontext.callback = _SPI_error_callback;
1947 spierrcontext.arg = unconstify(char *, src);
1948 spierrcontext.previous = error_context_stack;
1949 error_context_stack = &spierrcontext;
1950
1951 /*
1952 * Parse the request string into a list of raw parse trees.
1953 */
1954 raw_parsetree_list = pg_parse_query(src);
1955
1956 /*
1957 * Do parse analysis and rule rewrite for each raw parsetree, storing the
1958 * results into unsaved plancache entries.
1959 */
1960 plancache_list = NIL;
1961
1962 foreach(list_item, raw_parsetree_list)
1963 {
1964 RawStmt *parsetree = lfirst_node(RawStmt, list_item);
1965 List *stmt_list;
1966 CachedPlanSource *plansource;
1967
1968 /*
1969 * Create the CachedPlanSource before we do parse analysis, since it
1970 * needs to see the unmodified raw parse tree.
1971 */
1972 plansource = CreateCachedPlan(parsetree,
1973 src,
1974 CreateCommandTag(parsetree->stmt));
1975
1976 /*
1977 * Parameter datatypes are driven by parserSetup hook if provided,
1978 * otherwise we use the fixed parameter list.
1979 */
1980 if (plan->parserSetup != NULL)
1981 {
1982 Assert(plan->nargs == 0);
1983 stmt_list = pg_analyze_and_rewrite_params(parsetree,
1984 src,
1985 plan->parserSetup,
1986 plan->parserSetupArg,
1987 _SPI_current->queryEnv);
1988 }
1989 else
1990 {
1991 stmt_list = pg_analyze_and_rewrite(parsetree,
1992 src,
1993 plan->argtypes,
1994 plan->nargs,
1995 _SPI_current->queryEnv);
1996 }
1997
1998 /* Finish filling in the CachedPlanSource */
1999 CompleteCachedPlan(plansource,
2000 stmt_list,
2001 NULL,
2002 plan->argtypes,
2003 plan->nargs,
2004 plan->parserSetup,
2005 plan->parserSetupArg,
2006 plan->cursor_options,
2007 false); /* not fixed result */
2008
2009 plancache_list = lappend(plancache_list, plansource);
2010 }
2011
2012 plan->plancache_list = plancache_list;
2013 plan->oneshot = false;
2014
2015 /*
2016 * Pop the error context stack
2017 */
2018 error_context_stack = spierrcontext.previous;
2019 }
2020
2021 /*
2022 * Parse, but don't analyze, a querystring.
2023 *
2024 * This is a stripped-down version of _SPI_prepare_plan that only does the
2025 * initial raw parsing. It creates "one shot" CachedPlanSources
2026 * that still require parse analysis before execution is possible.
2027 *
2028 * The advantage of using the "one shot" form of CachedPlanSource is that
2029 * we eliminate data copying and invalidation overhead. Postponing parse
2030 * analysis also prevents issues if some of the raw parsetrees are DDL
2031 * commands that affect validity of later parsetrees. Both of these
2032 * attributes are good things for SPI_execute() and similar cases.
2033 *
2034 * Results are stored into *plan (specifically, plan->plancache_list).
2035 * Note that the result data is all in CurrentMemoryContext or child contexts
2036 * thereof; in practice this means it is in the SPI executor context, and
2037 * what we are creating is a "temporary" SPIPlan. Cruft generated during
2038 * parsing is also left in CurrentMemoryContext.
2039 */
2040 static void
_SPI_prepare_oneshot_plan(const char * src,SPIPlanPtr plan)2041 _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan)
2042 {
2043 List *raw_parsetree_list;
2044 List *plancache_list;
2045 ListCell *list_item;
2046 ErrorContextCallback spierrcontext;
2047
2048 /*
2049 * Setup error traceback support for ereport()
2050 */
2051 spierrcontext.callback = _SPI_error_callback;
2052 spierrcontext.arg = unconstify(char *, src);
2053 spierrcontext.previous = error_context_stack;
2054 error_context_stack = &spierrcontext;
2055
2056 /*
2057 * Parse the request string into a list of raw parse trees.
2058 */
2059 raw_parsetree_list = pg_parse_query(src);
2060
2061 /*
2062 * Construct plancache entries, but don't do parse analysis yet.
2063 */
2064 plancache_list = NIL;
2065
2066 foreach(list_item, raw_parsetree_list)
2067 {
2068 RawStmt *parsetree = lfirst_node(RawStmt, list_item);
2069 CachedPlanSource *plansource;
2070
2071 plansource = CreateOneShotCachedPlan(parsetree,
2072 src,
2073 CreateCommandTag(parsetree->stmt));
2074
2075 plancache_list = lappend(plancache_list, plansource);
2076 }
2077
2078 plan->plancache_list = plancache_list;
2079 plan->oneshot = true;
2080
2081 /*
2082 * Pop the error context stack
2083 */
2084 error_context_stack = spierrcontext.previous;
2085 }
2086
2087 /*
2088 * Execute the given plan with the given parameter values
2089 *
2090 * snapshot: query snapshot to use, or InvalidSnapshot for the normal
2091 * behavior of taking a new snapshot for each query.
2092 * crosscheck_snapshot: for RI use, all others pass InvalidSnapshot
2093 * read_only: true for read-only execution (no CommandCounterIncrement)
2094 * fire_triggers: true to fire AFTER triggers at end of query (normal case);
2095 * false means any AFTER triggers are postponed to end of outer query
2096 * tcount: execution tuple-count limit, or 0 for none
2097 */
2098 static int
_SPI_execute_plan(SPIPlanPtr plan,ParamListInfo paramLI,Snapshot snapshot,Snapshot crosscheck_snapshot,bool read_only,bool fire_triggers,uint64 tcount)2099 _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
2100 Snapshot snapshot, Snapshot crosscheck_snapshot,
2101 bool read_only, bool fire_triggers, uint64 tcount)
2102 {
2103 int my_res = 0;
2104 uint64 my_processed = 0;
2105 SPITupleTable *my_tuptable = NULL;
2106 int res = 0;
2107 bool allow_nonatomic = plan->no_snapshots; /* legacy API name */
2108 bool pushed_active_snap = false;
2109 ErrorContextCallback spierrcontext;
2110 CachedPlan *cplan = NULL;
2111 ListCell *lc1;
2112
2113 /*
2114 * Setup error traceback support for ereport()
2115 */
2116 spierrcontext.callback = _SPI_error_callback;
2117 spierrcontext.arg = NULL; /* we'll fill this below */
2118 spierrcontext.previous = error_context_stack;
2119 error_context_stack = &spierrcontext;
2120
2121 /*
2122 * We support four distinct snapshot management behaviors:
2123 *
2124 * snapshot != InvalidSnapshot, read_only = true: use exactly the given
2125 * snapshot.
2126 *
2127 * snapshot != InvalidSnapshot, read_only = false: use the given snapshot,
2128 * modified by advancing its command ID before each querytree.
2129 *
2130 * snapshot == InvalidSnapshot, read_only = true: use the entry-time
2131 * ActiveSnapshot, if any (if there isn't one, we run with no snapshot).
2132 *
2133 * snapshot == InvalidSnapshot, read_only = false: take a full new
2134 * snapshot for each user command, and advance its command ID before each
2135 * querytree within the command.
2136 *
2137 * In the first two cases, we can just push the snap onto the stack once
2138 * for the whole plan list.
2139 *
2140 * Note that snapshot != InvalidSnapshot implies an atomic execution
2141 * context.
2142 */
2143 if (snapshot != InvalidSnapshot)
2144 {
2145 Assert(!allow_nonatomic);
2146 if (read_only)
2147 {
2148 PushActiveSnapshot(snapshot);
2149 pushed_active_snap = true;
2150 }
2151 else
2152 {
2153 /* Make sure we have a private copy of the snapshot to modify */
2154 PushCopiedSnapshot(snapshot);
2155 pushed_active_snap = true;
2156 }
2157 }
2158
2159 foreach(lc1, plan->plancache_list)
2160 {
2161 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc1);
2162 List *stmt_list;
2163 ListCell *lc2;
2164
2165 spierrcontext.arg = unconstify(char *, plansource->query_string);
2166
2167 /*
2168 * If this is a one-shot plan, we still need to do parse analysis.
2169 */
2170 if (plan->oneshot)
2171 {
2172 RawStmt *parsetree = plansource->raw_parse_tree;
2173 const char *src = plansource->query_string;
2174 List *stmt_list;
2175
2176 /*
2177 * Parameter datatypes are driven by parserSetup hook if provided,
2178 * otherwise we use the fixed parameter list.
2179 */
2180 if (parsetree == NULL)
2181 stmt_list = NIL;
2182 else if (plan->parserSetup != NULL)
2183 {
2184 Assert(plan->nargs == 0);
2185 stmt_list = pg_analyze_and_rewrite_params(parsetree,
2186 src,
2187 plan->parserSetup,
2188 plan->parserSetupArg,
2189 _SPI_current->queryEnv);
2190 }
2191 else
2192 {
2193 stmt_list = pg_analyze_and_rewrite(parsetree,
2194 src,
2195 plan->argtypes,
2196 plan->nargs,
2197 _SPI_current->queryEnv);
2198 }
2199
2200 /* Finish filling in the CachedPlanSource */
2201 CompleteCachedPlan(plansource,
2202 stmt_list,
2203 NULL,
2204 plan->argtypes,
2205 plan->nargs,
2206 plan->parserSetup,
2207 plan->parserSetupArg,
2208 plan->cursor_options,
2209 false); /* not fixed result */
2210 }
2211
2212 /*
2213 * Replan if needed, and increment plan refcount. If it's a saved
2214 * plan, the refcount must be backed by the CurrentResourceOwner.
2215 */
2216 cplan = GetCachedPlan(plansource, paramLI, plan->saved, _SPI_current->queryEnv);
2217 stmt_list = cplan->stmt_list;
2218
2219 /*
2220 * If we weren't given a specific snapshot to use, and the statement
2221 * list requires a snapshot, set that up.
2222 */
2223 if (snapshot == InvalidSnapshot &&
2224 (list_length(stmt_list) > 1 ||
2225 (list_length(stmt_list) == 1 &&
2226 PlannedStmtRequiresSnapshot(linitial_node(PlannedStmt,
2227 stmt_list)))))
2228 {
2229 /*
2230 * First, ensure there's a Portal-level snapshot. This back-fills
2231 * the snapshot stack in case the previous operation was a COMMIT
2232 * or ROLLBACK inside a procedure or DO block. (We can't put back
2233 * the Portal snapshot any sooner, or we'd break cases like doing
2234 * SET or LOCK just after COMMIT.) It's enough to check once per
2235 * statement list, since COMMIT/ROLLBACK/CALL/DO can't appear
2236 * within a multi-statement list.
2237 */
2238 EnsurePortalSnapshotExists();
2239
2240 /*
2241 * In the default non-read-only case, get a new per-statement-list
2242 * snapshot, replacing any that we pushed in a previous cycle.
2243 * Skip it when doing non-atomic execution, though (we rely
2244 * entirely on the Portal snapshot in that case).
2245 */
2246 if (!read_only && !allow_nonatomic)
2247 {
2248 if (pushed_active_snap)
2249 PopActiveSnapshot();
2250 PushActiveSnapshot(GetTransactionSnapshot());
2251 pushed_active_snap = true;
2252 }
2253 }
2254
2255 foreach(lc2, stmt_list)
2256 {
2257 PlannedStmt *stmt = lfirst_node(PlannedStmt, lc2);
2258 bool canSetTag = stmt->canSetTag;
2259 DestReceiver *dest;
2260
2261 _SPI_current->processed = 0;
2262 _SPI_current->tuptable = NULL;
2263
2264 /* Check for unsupported cases. */
2265 if (stmt->utilityStmt)
2266 {
2267 if (IsA(stmt->utilityStmt, CopyStmt))
2268 {
2269 CopyStmt *cstmt = (CopyStmt *) stmt->utilityStmt;
2270
2271 if (cstmt->filename == NULL)
2272 {
2273 my_res = SPI_ERROR_COPY;
2274 goto fail;
2275 }
2276 }
2277 else if (IsA(stmt->utilityStmt, TransactionStmt))
2278 {
2279 my_res = SPI_ERROR_TRANSACTION;
2280 goto fail;
2281 }
2282 }
2283
2284 if (read_only && !CommandIsReadOnly(stmt))
2285 ereport(ERROR,
2286 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2287 /* translator: %s is a SQL statement name */
2288 errmsg("%s is not allowed in a non-volatile function",
2289 CreateCommandTag((Node *) stmt))));
2290
2291 if (IsInParallelMode() && !CommandIsReadOnly(stmt))
2292 PreventCommandIfParallelMode(CreateCommandTag((Node *) stmt));
2293
2294 /*
2295 * If not read-only mode, advance the command counter before each
2296 * command and update the snapshot. (But skip it if the snapshot
2297 * isn't under our control.)
2298 */
2299 if (!read_only && pushed_active_snap)
2300 {
2301 CommandCounterIncrement();
2302 UpdateActiveSnapshotCommandId();
2303 }
2304
2305 dest = CreateDestReceiver(canSetTag ? DestSPI : DestNone);
2306
2307 if (stmt->utilityStmt == NULL)
2308 {
2309 QueryDesc *qdesc;
2310 Snapshot snap;
2311
2312 if (ActiveSnapshotSet())
2313 snap = GetActiveSnapshot();
2314 else
2315 snap = InvalidSnapshot;
2316
2317 qdesc = CreateQueryDesc(stmt,
2318 plansource->query_string,
2319 snap, crosscheck_snapshot,
2320 dest,
2321 paramLI, _SPI_current->queryEnv,
2322 0);
2323 res = _SPI_pquery(qdesc, fire_triggers,
2324 canSetTag ? tcount : 0);
2325 FreeQueryDesc(qdesc);
2326 }
2327 else
2328 {
2329 char completionTag[COMPLETION_TAG_BUFSIZE];
2330 ProcessUtilityContext context;
2331
2332 /*
2333 * If the SPI context is atomic, or we were not told to allow
2334 * nonatomic operations, tell ProcessUtility this is an atomic
2335 * execution context.
2336 */
2337 if (_SPI_current->atomic || !allow_nonatomic)
2338 context = PROCESS_UTILITY_QUERY;
2339 else
2340 context = PROCESS_UTILITY_QUERY_NONATOMIC;
2341
2342 ProcessUtility(stmt,
2343 plansource->query_string,
2344 context,
2345 paramLI,
2346 _SPI_current->queryEnv,
2347 dest,
2348 completionTag);
2349
2350 /* Update "processed" if stmt returned tuples */
2351 if (_SPI_current->tuptable)
2352 _SPI_current->processed = _SPI_current->tuptable->alloced -
2353 _SPI_current->tuptable->free;
2354
2355 res = SPI_OK_UTILITY;
2356
2357 /*
2358 * Some utility statements return a row count, even though the
2359 * tuples are not returned to the caller.
2360 */
2361 if (IsA(stmt->utilityStmt, CreateTableAsStmt))
2362 {
2363 CreateTableAsStmt *ctastmt = (CreateTableAsStmt *) stmt->utilityStmt;
2364
2365 if (strncmp(completionTag, "SELECT ", 7) == 0)
2366 _SPI_current->processed =
2367 pg_strtouint64(completionTag + 7, NULL, 10);
2368 else
2369 {
2370 /*
2371 * Must be an IF NOT EXISTS that did nothing, or a
2372 * CREATE ... WITH NO DATA.
2373 */
2374 Assert(ctastmt->if_not_exists ||
2375 ctastmt->into->skipData);
2376 _SPI_current->processed = 0;
2377 }
2378
2379 /*
2380 * For historical reasons, if CREATE TABLE AS was spelled
2381 * as SELECT INTO, return a special return code.
2382 */
2383 if (ctastmt->is_select_into)
2384 res = SPI_OK_SELINTO;
2385 }
2386 else if (IsA(stmt->utilityStmt, CopyStmt))
2387 {
2388 Assert(strncmp(completionTag, "COPY ", 5) == 0);
2389 _SPI_current->processed = pg_strtouint64(completionTag + 5,
2390 NULL, 10);
2391 }
2392 }
2393
2394 /*
2395 * The last canSetTag query sets the status values returned to the
2396 * caller. Be careful to free any tuptables not returned, to
2397 * avoid intratransaction memory leak.
2398 */
2399 if (canSetTag)
2400 {
2401 my_processed = _SPI_current->processed;
2402 SPI_freetuptable(my_tuptable);
2403 my_tuptable = _SPI_current->tuptable;
2404 my_res = res;
2405 }
2406 else
2407 {
2408 SPI_freetuptable(_SPI_current->tuptable);
2409 _SPI_current->tuptable = NULL;
2410 }
2411 /* we know that the receiver doesn't need a destroy call */
2412 if (res < 0)
2413 {
2414 my_res = res;
2415 goto fail;
2416 }
2417 }
2418
2419 /* Done with this plan, so release refcount */
2420 ReleaseCachedPlan(cplan, plan->saved);
2421 cplan = NULL;
2422
2423 /*
2424 * If not read-only mode, advance the command counter after the last
2425 * command. This ensures that its effects are visible, in case it was
2426 * DDL that would affect the next CachedPlanSource.
2427 */
2428 if (!read_only)
2429 CommandCounterIncrement();
2430 }
2431
2432 fail:
2433
2434 /* Pop the snapshot off the stack if we pushed one */
2435 if (pushed_active_snap)
2436 PopActiveSnapshot();
2437
2438 /* We no longer need the cached plan refcount, if any */
2439 if (cplan)
2440 ReleaseCachedPlan(cplan, plan->saved);
2441
2442 /*
2443 * Pop the error context stack
2444 */
2445 error_context_stack = spierrcontext.previous;
2446
2447 /* Save results for caller */
2448 SPI_processed = my_processed;
2449 SPI_tuptable = my_tuptable;
2450
2451 /* tuptable now is caller's responsibility, not SPI's */
2452 _SPI_current->tuptable = NULL;
2453
2454 /*
2455 * If none of the queries had canSetTag, return SPI_OK_REWRITTEN. Prior to
2456 * 8.4, we used return the last query's result code, but not its auxiliary
2457 * results, but that's confusing.
2458 */
2459 if (my_res == 0)
2460 my_res = SPI_OK_REWRITTEN;
2461
2462 return my_res;
2463 }
2464
2465 /*
2466 * Convert arrays of query parameters to form wanted by planner and executor
2467 */
2468 static ParamListInfo
_SPI_convert_params(int nargs,Oid * argtypes,Datum * Values,const char * Nulls)2469 _SPI_convert_params(int nargs, Oid *argtypes,
2470 Datum *Values, const char *Nulls)
2471 {
2472 ParamListInfo paramLI;
2473
2474 if (nargs > 0)
2475 {
2476 paramLI = makeParamList(nargs);
2477
2478 for (int i = 0; i < nargs; i++)
2479 {
2480 ParamExternData *prm = ¶mLI->params[i];
2481
2482 prm->value = Values[i];
2483 prm->isnull = (Nulls && Nulls[i] == 'n');
2484 prm->pflags = PARAM_FLAG_CONST;
2485 prm->ptype = argtypes[i];
2486 }
2487 }
2488 else
2489 paramLI = NULL;
2490 return paramLI;
2491 }
2492
2493 static int
_SPI_pquery(QueryDesc * queryDesc,bool fire_triggers,uint64 tcount)2494 _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount)
2495 {
2496 int operation = queryDesc->operation;
2497 int eflags;
2498 int res;
2499
2500 switch (operation)
2501 {
2502 case CMD_SELECT:
2503 if (queryDesc->dest->mydest != DestSPI)
2504 {
2505 /* Don't return SPI_OK_SELECT if we're discarding result */
2506 res = SPI_OK_UTILITY;
2507 }
2508 else
2509 res = SPI_OK_SELECT;
2510 break;
2511 case CMD_INSERT:
2512 if (queryDesc->plannedstmt->hasReturning)
2513 res = SPI_OK_INSERT_RETURNING;
2514 else
2515 res = SPI_OK_INSERT;
2516 break;
2517 case CMD_DELETE:
2518 if (queryDesc->plannedstmt->hasReturning)
2519 res = SPI_OK_DELETE_RETURNING;
2520 else
2521 res = SPI_OK_DELETE;
2522 break;
2523 case CMD_UPDATE:
2524 if (queryDesc->plannedstmt->hasReturning)
2525 res = SPI_OK_UPDATE_RETURNING;
2526 else
2527 res = SPI_OK_UPDATE;
2528 break;
2529 default:
2530 return SPI_ERROR_OPUNKNOWN;
2531 }
2532
2533 #ifdef SPI_EXECUTOR_STATS
2534 if (ShowExecutorStats)
2535 ResetUsage();
2536 #endif
2537
2538 /* Select execution options */
2539 if (fire_triggers)
2540 eflags = 0; /* default run-to-completion flags */
2541 else
2542 eflags = EXEC_FLAG_SKIP_TRIGGERS;
2543
2544 ExecutorStart(queryDesc, eflags);
2545
2546 ExecutorRun(queryDesc, ForwardScanDirection, tcount, true);
2547
2548 _SPI_current->processed = queryDesc->estate->es_processed;
2549
2550 if ((res == SPI_OK_SELECT || queryDesc->plannedstmt->hasReturning) &&
2551 queryDesc->dest->mydest == DestSPI)
2552 {
2553 if (_SPI_checktuples())
2554 elog(ERROR, "consistency check on SPI tuple count failed");
2555 }
2556
2557 ExecutorFinish(queryDesc);
2558 ExecutorEnd(queryDesc);
2559 /* FreeQueryDesc is done by the caller */
2560
2561 #ifdef SPI_EXECUTOR_STATS
2562 if (ShowExecutorStats)
2563 ShowUsage("SPI EXECUTOR STATS");
2564 #endif
2565
2566 return res;
2567 }
2568
2569 /*
2570 * _SPI_error_callback
2571 *
2572 * Add context information when a query invoked via SPI fails
2573 */
2574 static void
_SPI_error_callback(void * arg)2575 _SPI_error_callback(void *arg)
2576 {
2577 const char *query = (const char *) arg;
2578 int syntaxerrposition;
2579
2580 if (query == NULL) /* in case arg wasn't set yet */
2581 return;
2582
2583 /*
2584 * If there is a syntax error position, convert to internal syntax error;
2585 * otherwise treat the query as an item of context stack
2586 */
2587 syntaxerrposition = geterrposition();
2588 if (syntaxerrposition > 0)
2589 {
2590 errposition(0);
2591 internalerrposition(syntaxerrposition);
2592 internalerrquery(query);
2593 }
2594 else
2595 errcontext("SQL statement \"%s\"", query);
2596 }
2597
2598 /*
2599 * _SPI_cursor_operation()
2600 *
2601 * Do a FETCH or MOVE in a cursor
2602 */
2603 static void
_SPI_cursor_operation(Portal portal,FetchDirection direction,long count,DestReceiver * dest)2604 _SPI_cursor_operation(Portal portal, FetchDirection direction, long count,
2605 DestReceiver *dest)
2606 {
2607 uint64 nfetched;
2608
2609 /* Check that the portal is valid */
2610 if (!PortalIsValid(portal))
2611 elog(ERROR, "invalid portal in SPI cursor operation");
2612
2613 /* Push the SPI stack */
2614 if (_SPI_begin_call(true) < 0)
2615 elog(ERROR, "SPI cursor operation called while not connected");
2616
2617 /* Reset the SPI result (note we deliberately don't touch lastoid) */
2618 SPI_processed = 0;
2619 SPI_tuptable = NULL;
2620 _SPI_current->processed = 0;
2621 _SPI_current->tuptable = NULL;
2622
2623 /* Run the cursor */
2624 nfetched = PortalRunFetch(portal,
2625 direction,
2626 count,
2627 dest);
2628
2629 /*
2630 * Think not to combine this store with the preceding function call. If
2631 * the portal contains calls to functions that use SPI, then SPI_stack is
2632 * likely to move around while the portal runs. When control returns,
2633 * _SPI_current will point to the correct stack entry... but the pointer
2634 * may be different than it was beforehand. So we must be sure to re-fetch
2635 * the pointer after the function call completes.
2636 */
2637 _SPI_current->processed = nfetched;
2638
2639 if (dest->mydest == DestSPI && _SPI_checktuples())
2640 elog(ERROR, "consistency check on SPI tuple count failed");
2641
2642 /* Put the result into place for access by caller */
2643 SPI_processed = _SPI_current->processed;
2644 SPI_tuptable = _SPI_current->tuptable;
2645
2646 /* tuptable now is caller's responsibility, not SPI's */
2647 _SPI_current->tuptable = NULL;
2648
2649 /* Pop the SPI stack */
2650 _SPI_end_call(true);
2651 }
2652
2653
2654 static MemoryContext
_SPI_execmem(void)2655 _SPI_execmem(void)
2656 {
2657 return MemoryContextSwitchTo(_SPI_current->execCxt);
2658 }
2659
2660 static MemoryContext
_SPI_procmem(void)2661 _SPI_procmem(void)
2662 {
2663 return MemoryContextSwitchTo(_SPI_current->procCxt);
2664 }
2665
2666 /*
2667 * _SPI_begin_call: begin a SPI operation within a connected procedure
2668 *
2669 * use_exec is true if we intend to make use of the procedure's execCxt
2670 * during this SPI operation. We'll switch into that context, and arrange
2671 * for it to be cleaned up at _SPI_end_call or if an error occurs.
2672 */
2673 static int
_SPI_begin_call(bool use_exec)2674 _SPI_begin_call(bool use_exec)
2675 {
2676 if (_SPI_current == NULL)
2677 return SPI_ERROR_UNCONNECTED;
2678
2679 if (use_exec)
2680 {
2681 /* remember when the Executor operation started */
2682 _SPI_current->execSubid = GetCurrentSubTransactionId();
2683 /* switch to the Executor memory context */
2684 _SPI_execmem();
2685 }
2686
2687 return 0;
2688 }
2689
2690 /*
2691 * _SPI_end_call: end a SPI operation within a connected procedure
2692 *
2693 * use_exec must be the same as in the previous _SPI_begin_call
2694 *
2695 * Note: this currently has no failure return cases, so callers don't check
2696 */
2697 static int
_SPI_end_call(bool use_exec)2698 _SPI_end_call(bool use_exec)
2699 {
2700 if (use_exec)
2701 {
2702 /* switch to the procedure memory context */
2703 _SPI_procmem();
2704 /* mark Executor context no longer in use */
2705 _SPI_current->execSubid = InvalidSubTransactionId;
2706 /* and free Executor memory */
2707 MemoryContextResetAndDeleteChildren(_SPI_current->execCxt);
2708 }
2709
2710 return 0;
2711 }
2712
2713 static bool
_SPI_checktuples(void)2714 _SPI_checktuples(void)
2715 {
2716 uint64 processed = _SPI_current->processed;
2717 SPITupleTable *tuptable = _SPI_current->tuptable;
2718 bool failed = false;
2719
2720 if (tuptable == NULL) /* spi_dest_startup was not called */
2721 failed = true;
2722 else if (processed != (tuptable->alloced - tuptable->free))
2723 failed = true;
2724
2725 return failed;
2726 }
2727
2728 /*
2729 * Convert a "temporary" SPIPlan into an "unsaved" plan.
2730 *
2731 * The passed _SPI_plan struct is on the stack, and all its subsidiary data
2732 * is in or under the current SPI executor context. Copy the plan into the
2733 * SPI procedure context so it will survive _SPI_end_call(). To minimize
2734 * data copying, this destructively modifies the input plan, by taking the
2735 * plancache entries away from it and reparenting them to the new SPIPlan.
2736 */
2737 static SPIPlanPtr
_SPI_make_plan_non_temp(SPIPlanPtr plan)2738 _SPI_make_plan_non_temp(SPIPlanPtr plan)
2739 {
2740 SPIPlanPtr newplan;
2741 MemoryContext parentcxt = _SPI_current->procCxt;
2742 MemoryContext plancxt;
2743 MemoryContext oldcxt;
2744 ListCell *lc;
2745
2746 /* Assert the input is a temporary SPIPlan */
2747 Assert(plan->magic == _SPI_PLAN_MAGIC);
2748 Assert(plan->plancxt == NULL);
2749 /* One-shot plans can't be saved */
2750 Assert(!plan->oneshot);
2751
2752 /*
2753 * Create a memory context for the plan, underneath the procedure context.
2754 * We don't expect the plan to be very large.
2755 */
2756 plancxt = AllocSetContextCreate(parentcxt,
2757 "SPI Plan",
2758 ALLOCSET_SMALL_SIZES);
2759 oldcxt = MemoryContextSwitchTo(plancxt);
2760
2761 /* Copy the SPI_plan struct and subsidiary data into the new context */
2762 newplan = (SPIPlanPtr) palloc0(sizeof(_SPI_plan));
2763 newplan->magic = _SPI_PLAN_MAGIC;
2764 newplan->plancxt = plancxt;
2765 newplan->cursor_options = plan->cursor_options;
2766 newplan->nargs = plan->nargs;
2767 if (plan->nargs > 0)
2768 {
2769 newplan->argtypes = (Oid *) palloc(plan->nargs * sizeof(Oid));
2770 memcpy(newplan->argtypes, plan->argtypes, plan->nargs * sizeof(Oid));
2771 }
2772 else
2773 newplan->argtypes = NULL;
2774 newplan->parserSetup = plan->parserSetup;
2775 newplan->parserSetupArg = plan->parserSetupArg;
2776
2777 /*
2778 * Reparent all the CachedPlanSources into the procedure context. In
2779 * theory this could fail partway through due to the pallocs, but we don't
2780 * care too much since both the procedure context and the executor context
2781 * would go away on error.
2782 */
2783 foreach(lc, plan->plancache_list)
2784 {
2785 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
2786
2787 CachedPlanSetParentContext(plansource, parentcxt);
2788
2789 /* Build new list, with list cells in plancxt */
2790 newplan->plancache_list = lappend(newplan->plancache_list, plansource);
2791 }
2792
2793 MemoryContextSwitchTo(oldcxt);
2794
2795 /* For safety, unlink the CachedPlanSources from the temporary plan */
2796 plan->plancache_list = NIL;
2797
2798 return newplan;
2799 }
2800
2801 /*
2802 * Make a "saved" copy of the given plan.
2803 */
2804 static SPIPlanPtr
_SPI_save_plan(SPIPlanPtr plan)2805 _SPI_save_plan(SPIPlanPtr plan)
2806 {
2807 SPIPlanPtr newplan;
2808 MemoryContext plancxt;
2809 MemoryContext oldcxt;
2810 ListCell *lc;
2811
2812 /* One-shot plans can't be saved */
2813 Assert(!plan->oneshot);
2814
2815 /*
2816 * Create a memory context for the plan. We don't expect the plan to be
2817 * very large, so use smaller-than-default alloc parameters. It's a
2818 * transient context until we finish copying everything.
2819 */
2820 plancxt = AllocSetContextCreate(CurrentMemoryContext,
2821 "SPI Plan",
2822 ALLOCSET_SMALL_SIZES);
2823 oldcxt = MemoryContextSwitchTo(plancxt);
2824
2825 /* Copy the SPI plan into its own context */
2826 newplan = (SPIPlanPtr) palloc0(sizeof(_SPI_plan));
2827 newplan->magic = _SPI_PLAN_MAGIC;
2828 newplan->plancxt = plancxt;
2829 newplan->cursor_options = plan->cursor_options;
2830 newplan->nargs = plan->nargs;
2831 if (plan->nargs > 0)
2832 {
2833 newplan->argtypes = (Oid *) palloc(plan->nargs * sizeof(Oid));
2834 memcpy(newplan->argtypes, plan->argtypes, plan->nargs * sizeof(Oid));
2835 }
2836 else
2837 newplan->argtypes = NULL;
2838 newplan->parserSetup = plan->parserSetup;
2839 newplan->parserSetupArg = plan->parserSetupArg;
2840
2841 /* Copy all the plancache entries */
2842 foreach(lc, plan->plancache_list)
2843 {
2844 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
2845 CachedPlanSource *newsource;
2846
2847 newsource = CopyCachedPlan(plansource);
2848 newplan->plancache_list = lappend(newplan->plancache_list, newsource);
2849 }
2850
2851 MemoryContextSwitchTo(oldcxt);
2852
2853 /*
2854 * Mark it saved, reparent it under CacheMemoryContext, and mark all the
2855 * component CachedPlanSources as saved. This sequence cannot fail
2856 * partway through, so there's no risk of long-term memory leakage.
2857 */
2858 newplan->saved = true;
2859 MemoryContextSetParent(newplan->plancxt, CacheMemoryContext);
2860
2861 foreach(lc, newplan->plancache_list)
2862 {
2863 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
2864
2865 SaveCachedPlan(plansource);
2866 }
2867
2868 return newplan;
2869 }
2870
2871 /*
2872 * Internal lookup of ephemeral named relation by name.
2873 */
2874 static EphemeralNamedRelation
_SPI_find_ENR_by_name(const char * name)2875 _SPI_find_ENR_by_name(const char *name)
2876 {
2877 /* internal static function; any error is bug in SPI itself */
2878 Assert(name != NULL);
2879
2880 /* fast exit if no tuplestores have been added */
2881 if (_SPI_current->queryEnv == NULL)
2882 return NULL;
2883
2884 return get_ENR(_SPI_current->queryEnv, name);
2885 }
2886
2887 /*
2888 * Register an ephemeral named relation for use by the planner and executor on
2889 * subsequent calls using this SPI connection.
2890 */
2891 int
SPI_register_relation(EphemeralNamedRelation enr)2892 SPI_register_relation(EphemeralNamedRelation enr)
2893 {
2894 EphemeralNamedRelation match;
2895 int res;
2896
2897 if (enr == NULL || enr->md.name == NULL)
2898 return SPI_ERROR_ARGUMENT;
2899
2900 res = _SPI_begin_call(false); /* keep current memory context */
2901 if (res < 0)
2902 return res;
2903
2904 match = _SPI_find_ENR_by_name(enr->md.name);
2905 if (match)
2906 res = SPI_ERROR_REL_DUPLICATE;
2907 else
2908 {
2909 if (_SPI_current->queryEnv == NULL)
2910 _SPI_current->queryEnv = create_queryEnv();
2911
2912 register_ENR(_SPI_current->queryEnv, enr);
2913 res = SPI_OK_REL_REGISTER;
2914 }
2915
2916 _SPI_end_call(false);
2917
2918 return res;
2919 }
2920
2921 /*
2922 * Unregister an ephemeral named relation by name. This will probably be a
2923 * rarely used function, since SPI_finish will clear it automatically.
2924 */
2925 int
SPI_unregister_relation(const char * name)2926 SPI_unregister_relation(const char *name)
2927 {
2928 EphemeralNamedRelation match;
2929 int res;
2930
2931 if (name == NULL)
2932 return SPI_ERROR_ARGUMENT;
2933
2934 res = _SPI_begin_call(false); /* keep current memory context */
2935 if (res < 0)
2936 return res;
2937
2938 match = _SPI_find_ENR_by_name(name);
2939 if (match)
2940 {
2941 unregister_ENR(_SPI_current->queryEnv, match->md.name);
2942 res = SPI_OK_REL_UNREGISTER;
2943 }
2944 else
2945 res = SPI_ERROR_REL_NOT_FOUND;
2946
2947 _SPI_end_call(false);
2948
2949 return res;
2950 }
2951
2952 /*
2953 * Register the transient relations from 'tdata' using this SPI connection.
2954 * This should be called by PL implementations' trigger handlers after
2955 * connecting, in order to make transition tables visible to any queries run
2956 * in this connection.
2957 */
2958 int
SPI_register_trigger_data(TriggerData * tdata)2959 SPI_register_trigger_data(TriggerData *tdata)
2960 {
2961 if (tdata == NULL)
2962 return SPI_ERROR_ARGUMENT;
2963
2964 if (tdata->tg_newtable)
2965 {
2966 EphemeralNamedRelation enr =
2967 palloc(sizeof(EphemeralNamedRelationData));
2968 int rc;
2969
2970 enr->md.name = tdata->tg_trigger->tgnewtable;
2971 enr->md.reliddesc = tdata->tg_relation->rd_id;
2972 enr->md.tupdesc = NULL;
2973 enr->md.enrtype = ENR_NAMED_TUPLESTORE;
2974 enr->md.enrtuples = tuplestore_tuple_count(tdata->tg_newtable);
2975 enr->reldata = tdata->tg_newtable;
2976 rc = SPI_register_relation(enr);
2977 if (rc != SPI_OK_REL_REGISTER)
2978 return rc;
2979 }
2980
2981 if (tdata->tg_oldtable)
2982 {
2983 EphemeralNamedRelation enr =
2984 palloc(sizeof(EphemeralNamedRelationData));
2985 int rc;
2986
2987 enr->md.name = tdata->tg_trigger->tgoldtable;
2988 enr->md.reliddesc = tdata->tg_relation->rd_id;
2989 enr->md.tupdesc = NULL;
2990 enr->md.enrtype = ENR_NAMED_TUPLESTORE;
2991 enr->md.enrtuples = tuplestore_tuple_count(tdata->tg_oldtable);
2992 enr->reldata = tdata->tg_oldtable;
2993 rc = SPI_register_relation(enr);
2994 if (rc != SPI_OK_REL_REGISTER)
2995 return rc;
2996 }
2997
2998 return SPI_OK_TD_REGISTER;
2999 }
3000