1 /*-------------------------------------------------------------------------
2 *
3 * spi.c
4 * Server Programming Interface
5 *
6 * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/executor/spi.c
12 *
13 *-------------------------------------------------------------------------
14 */
15 #include "postgres.h"
16
17 #include "access/htup_details.h"
18 #include "access/printtup.h"
19 #include "access/sysattr.h"
20 #include "access/xact.h"
21 #include "catalog/heap.h"
22 #include "catalog/pg_type.h"
23 #include "commands/trigger.h"
24 #include "executor/executor.h"
25 #include "executor/spi_priv.h"
26 #include "miscadmin.h"
27 #include "tcop/pquery.h"
28 #include "tcop/utility.h"
29 #include "utils/builtins.h"
30 #include "utils/datum.h"
31 #include "utils/lsyscache.h"
32 #include "utils/memutils.h"
33 #include "utils/rel.h"
34 #include "utils/snapmgr.h"
35 #include "utils/syscache.h"
36 #include "utils/typcache.h"
37
38
39 /*
40 * These global variables are part of the API for various SPI functions
41 * (a horrible API choice, but it's too late now). To reduce the risk of
42 * interference between different SPI callers, we save and restore them
43 * when entering/exiting a SPI nesting level.
44 */
45 uint64 SPI_processed = 0;
46 Oid SPI_lastoid = InvalidOid;
47 SPITupleTable *SPI_tuptable = NULL;
48 int SPI_result = 0;
49
50 static _SPI_connection *_SPI_stack = NULL;
51 static _SPI_connection *_SPI_current = NULL;
52 static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */
53 static int _SPI_connected = -1; /* current stack index */
54
55 static Portal SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
56 ParamListInfo paramLI, bool read_only);
57
58 static void _SPI_prepare_plan(const char *src, SPIPlanPtr plan);
59
60 static void _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan);
61
62 static int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
63 Snapshot snapshot, Snapshot crosscheck_snapshot,
64 bool read_only, bool fire_triggers, uint64 tcount);
65
66 static ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes,
67 Datum *Values, const char *Nulls);
68
69 static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount);
70
71 static void _SPI_error_callback(void *arg);
72
73 static void _SPI_cursor_operation(Portal portal,
74 FetchDirection direction, long count,
75 DestReceiver *dest);
76
77 static SPIPlanPtr _SPI_make_plan_non_temp(SPIPlanPtr plan);
78 static SPIPlanPtr _SPI_save_plan(SPIPlanPtr plan);
79
80 static int _SPI_begin_call(bool use_exec);
81 static int _SPI_end_call(bool use_exec);
82 static MemoryContext _SPI_execmem(void);
83 static MemoryContext _SPI_procmem(void);
84 static bool _SPI_checktuples(void);
85
86
87 /* =================== interface functions =================== */
88
89 int
SPI_connect(void)90 SPI_connect(void)
91 {
92 return SPI_connect_ext(0);
93 }
94
95 int
SPI_connect_ext(int options)96 SPI_connect_ext(int options)
97 {
98 int newdepth;
99
100 /* Enlarge stack if necessary */
101 if (_SPI_stack == NULL)
102 {
103 if (_SPI_connected != -1 || _SPI_stack_depth != 0)
104 elog(ERROR, "SPI stack corrupted");
105 newdepth = 16;
106 _SPI_stack = (_SPI_connection *)
107 MemoryContextAlloc(TopMemoryContext,
108 newdepth * sizeof(_SPI_connection));
109 _SPI_stack_depth = newdepth;
110 }
111 else
112 {
113 if (_SPI_stack_depth <= 0 || _SPI_stack_depth <= _SPI_connected)
114 elog(ERROR, "SPI stack corrupted");
115 if (_SPI_stack_depth == _SPI_connected + 1)
116 {
117 newdepth = _SPI_stack_depth * 2;
118 _SPI_stack = (_SPI_connection *)
119 repalloc(_SPI_stack,
120 newdepth * sizeof(_SPI_connection));
121 _SPI_stack_depth = newdepth;
122 }
123 }
124
125 /* Enter new stack level */
126 _SPI_connected++;
127 Assert(_SPI_connected >= 0 && _SPI_connected < _SPI_stack_depth);
128
129 _SPI_current = &(_SPI_stack[_SPI_connected]);
130 _SPI_current->processed = 0;
131 _SPI_current->lastoid = InvalidOid;
132 _SPI_current->tuptable = NULL;
133 _SPI_current->execSubid = InvalidSubTransactionId;
134 slist_init(&_SPI_current->tuptables);
135 _SPI_current->procCxt = NULL; /* in case we fail to create 'em */
136 _SPI_current->execCxt = NULL;
137 _SPI_current->connectSubid = GetCurrentSubTransactionId();
138 _SPI_current->queryEnv = NULL;
139 _SPI_current->atomic = (options & SPI_OPT_NONATOMIC ? false : true);
140 _SPI_current->internal_xact = false;
141 _SPI_current->outer_processed = SPI_processed;
142 _SPI_current->outer_lastoid = SPI_lastoid;
143 _SPI_current->outer_tuptable = SPI_tuptable;
144 _SPI_current->outer_result = SPI_result;
145
146 /*
147 * Create memory contexts for this procedure
148 *
149 * In atomic contexts (the normal case), we use TopTransactionContext,
150 * otherwise PortalContext, so that it lives across transaction
151 * boundaries.
152 *
153 * XXX It could be better to use PortalContext as the parent context in
154 * all cases, but we may not be inside a portal (consider deferred-trigger
155 * execution). Perhaps CurTransactionContext could be an option? For now
156 * it doesn't matter because we clean up explicitly in AtEOSubXact_SPI().
157 */
158 _SPI_current->procCxt = AllocSetContextCreate(_SPI_current->atomic ? TopTransactionContext : PortalContext,
159 "SPI Proc",
160 ALLOCSET_DEFAULT_SIZES);
161 _SPI_current->execCxt = AllocSetContextCreate(_SPI_current->atomic ? TopTransactionContext : _SPI_current->procCxt,
162 "SPI Exec",
163 ALLOCSET_DEFAULT_SIZES);
164 /* ... and switch to procedure's context */
165 _SPI_current->savedcxt = MemoryContextSwitchTo(_SPI_current->procCxt);
166
167 /*
168 * Reset API global variables so that current caller cannot accidentally
169 * depend on state of an outer caller.
170 */
171 SPI_processed = 0;
172 SPI_lastoid = InvalidOid;
173 SPI_tuptable = NULL;
174 SPI_result = 0;
175
176 return SPI_OK_CONNECT;
177 }
178
179 int
SPI_finish(void)180 SPI_finish(void)
181 {
182 int res;
183
184 res = _SPI_begin_call(false); /* just check we're connected */
185 if (res < 0)
186 return res;
187
188 /* Restore memory context as it was before procedure call */
189 MemoryContextSwitchTo(_SPI_current->savedcxt);
190
191 /* Release memory used in procedure call (including tuptables) */
192 MemoryContextDelete(_SPI_current->execCxt);
193 _SPI_current->execCxt = NULL;
194 MemoryContextDelete(_SPI_current->procCxt);
195 _SPI_current->procCxt = NULL;
196
197 /*
198 * Restore outer API variables, especially SPI_tuptable which is probably
199 * pointing at a just-deleted tuptable
200 */
201 SPI_processed = _SPI_current->outer_processed;
202 SPI_lastoid = _SPI_current->outer_lastoid;
203 SPI_tuptable = _SPI_current->outer_tuptable;
204 SPI_result = _SPI_current->outer_result;
205
206 /* Exit stack level */
207 _SPI_connected--;
208 if (_SPI_connected < 0)
209 _SPI_current = NULL;
210 else
211 _SPI_current = &(_SPI_stack[_SPI_connected]);
212
213 return SPI_OK_FINISH;
214 }
215
216 void
SPI_start_transaction(void)217 SPI_start_transaction(void)
218 {
219 MemoryContext oldcontext = CurrentMemoryContext;
220
221 StartTransactionCommand();
222 MemoryContextSwitchTo(oldcontext);
223 }
224
225 void
SPI_commit(void)226 SPI_commit(void)
227 {
228 MemoryContext oldcontext = CurrentMemoryContext;
229
230 if (_SPI_current->atomic)
231 ereport(ERROR,
232 (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
233 errmsg("invalid transaction termination")));
234
235 /*
236 * This restriction is required by PLs implemented on top of SPI. They
237 * use subtransactions to establish exception blocks that are supposed to
238 * be rolled back together if there is an error. Terminating the
239 * top-level transaction in such a block violates that idea. A future PL
240 * implementation might have different ideas about this, in which case
241 * this restriction would have to be refined or the check possibly be
242 * moved out of SPI into the PLs.
243 */
244 if (IsSubTransaction())
245 ereport(ERROR,
246 (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
247 errmsg("cannot commit while a subtransaction is active")));
248
249 /*
250 * Hold any pinned portals that any PLs might be using. We have to do
251 * this before changing transaction state, since this will run
252 * user-defined code that might throw an error.
253 */
254 HoldPinnedPortals();
255
256 /* Start the actual commit */
257 _SPI_current->internal_xact = true;
258
259 /* Release snapshots associated with portals */
260 ForgetPortalSnapshots();
261
262 CommitTransactionCommand();
263 MemoryContextSwitchTo(oldcontext);
264
265 _SPI_current->internal_xact = false;
266 }
267
268 void
SPI_rollback(void)269 SPI_rollback(void)
270 {
271 MemoryContext oldcontext = CurrentMemoryContext;
272
273 if (_SPI_current->atomic)
274 ereport(ERROR,
275 (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
276 errmsg("invalid transaction termination")));
277
278 /* see under SPI_commit() */
279 if (IsSubTransaction())
280 ereport(ERROR,
281 (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
282 errmsg("cannot roll back while a subtransaction is active")));
283
284 /*
285 * Hold any pinned portals that any PLs might be using. We have to do
286 * this before changing transaction state, since this will run
287 * user-defined code that might throw an error, and in any case couldn't
288 * be run in an already-aborted transaction.
289 */
290 HoldPinnedPortals();
291
292 /* Start the actual rollback */
293 _SPI_current->internal_xact = true;
294
295 /* Release snapshots associated with portals */
296 ForgetPortalSnapshots();
297
298 AbortCurrentTransaction();
299 MemoryContextSwitchTo(oldcontext);
300
301 _SPI_current->internal_xact = false;
302 }
303
304 /*
305 * Clean up SPI state. Called on transaction end (of non-SPI-internal
306 * transactions) and when returning to the main loop on error.
307 */
308 void
SPICleanup(void)309 SPICleanup(void)
310 {
311 _SPI_current = NULL;
312 _SPI_connected = -1;
313 /* Reset API global variables, too */
314 SPI_processed = 0;
315 SPI_lastoid = InvalidOid;
316 SPI_tuptable = NULL;
317 SPI_result = 0;
318 }
319
320 /*
321 * Clean up SPI state at transaction commit or abort.
322 */
323 void
AtEOXact_SPI(bool isCommit)324 AtEOXact_SPI(bool isCommit)
325 {
326 /* Do nothing if the transaction end was initiated by SPI. */
327 if (_SPI_current && _SPI_current->internal_xact)
328 return;
329
330 if (isCommit && _SPI_connected != -1)
331 ereport(WARNING,
332 (errcode(ERRCODE_WARNING),
333 errmsg("transaction left non-empty SPI stack"),
334 errhint("Check for missing \"SPI_finish\" calls.")));
335
336 SPICleanup();
337 }
338
339 /*
340 * Clean up SPI state at subtransaction commit or abort.
341 *
342 * During commit, there shouldn't be any unclosed entries remaining from
343 * the current subtransaction; we emit a warning if any are found.
344 */
345 void
AtEOSubXact_SPI(bool isCommit,SubTransactionId mySubid)346 AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid)
347 {
348 bool found = false;
349
350 while (_SPI_connected >= 0)
351 {
352 _SPI_connection *connection = &(_SPI_stack[_SPI_connected]);
353
354 if (connection->connectSubid != mySubid)
355 break; /* couldn't be any underneath it either */
356
357 if (connection->internal_xact)
358 break;
359
360 found = true;
361
362 /*
363 * Release procedure memory explicitly (see note in SPI_connect)
364 */
365 if (connection->execCxt)
366 {
367 MemoryContextDelete(connection->execCxt);
368 connection->execCxt = NULL;
369 }
370 if (connection->procCxt)
371 {
372 MemoryContextDelete(connection->procCxt);
373 connection->procCxt = NULL;
374 }
375
376 /*
377 * Restore outer global variables and pop the stack entry. Unlike
378 * SPI_finish(), we don't risk switching to memory contexts that might
379 * be already gone.
380 */
381 SPI_processed = connection->outer_processed;
382 SPI_lastoid = connection->outer_lastoid;
383 SPI_tuptable = connection->outer_tuptable;
384 SPI_result = connection->outer_result;
385
386 _SPI_connected--;
387 if (_SPI_connected < 0)
388 _SPI_current = NULL;
389 else
390 _SPI_current = &(_SPI_stack[_SPI_connected]);
391 }
392
393 if (found && isCommit)
394 ereport(WARNING,
395 (errcode(ERRCODE_WARNING),
396 errmsg("subtransaction left non-empty SPI stack"),
397 errhint("Check for missing \"SPI_finish\" calls.")));
398
399 /*
400 * If we are aborting a subtransaction and there is an open SPI context
401 * surrounding the subxact, clean up to prevent memory leakage.
402 */
403 if (_SPI_current && !isCommit)
404 {
405 slist_mutable_iter siter;
406
407 /*
408 * Throw away executor state if current executor operation was started
409 * within current subxact (essentially, force a _SPI_end_call(true)).
410 */
411 if (_SPI_current->execSubid >= mySubid)
412 {
413 _SPI_current->execSubid = InvalidSubTransactionId;
414 MemoryContextResetAndDeleteChildren(_SPI_current->execCxt);
415 }
416
417 /* throw away any tuple tables created within current subxact */
418 slist_foreach_modify(siter, &_SPI_current->tuptables)
419 {
420 SPITupleTable *tuptable;
421
422 tuptable = slist_container(SPITupleTable, next, siter.cur);
423 if (tuptable->subid >= mySubid)
424 {
425 /*
426 * If we used SPI_freetuptable() here, its internal search of
427 * the tuptables list would make this operation O(N^2).
428 * Instead, just free the tuptable manually. This should
429 * match what SPI_freetuptable() does.
430 */
431 slist_delete_current(&siter);
432 if (tuptable == _SPI_current->tuptable)
433 _SPI_current->tuptable = NULL;
434 if (tuptable == SPI_tuptable)
435 SPI_tuptable = NULL;
436 MemoryContextDelete(tuptable->tuptabcxt);
437 }
438 }
439 }
440 }
441
442 /*
443 * Are we executing inside a procedure (that is, a nonatomic SPI context)?
444 */
445 bool
SPI_inside_nonatomic_context(void)446 SPI_inside_nonatomic_context(void)
447 {
448 if (_SPI_current == NULL)
449 return false; /* not in any SPI context at all */
450 if (_SPI_current->atomic)
451 return false; /* it's atomic (ie function not procedure) */
452 return true;
453 }
454
455
456 /* Parse, plan, and execute a query string */
457 int
SPI_execute(const char * src,bool read_only,long tcount)458 SPI_execute(const char *src, bool read_only, long tcount)
459 {
460 _SPI_plan plan;
461 int res;
462
463 if (src == NULL || tcount < 0)
464 return SPI_ERROR_ARGUMENT;
465
466 res = _SPI_begin_call(true);
467 if (res < 0)
468 return res;
469
470 memset(&plan, 0, sizeof(_SPI_plan));
471 plan.magic = _SPI_PLAN_MAGIC;
472 plan.cursor_options = CURSOR_OPT_PARALLEL_OK;
473
474 _SPI_prepare_oneshot_plan(src, &plan);
475
476 res = _SPI_execute_plan(&plan, NULL,
477 InvalidSnapshot, InvalidSnapshot,
478 read_only, true, tcount);
479
480 _SPI_end_call(true);
481 return res;
482 }
483
484 /* Obsolete version of SPI_execute */
485 int
SPI_exec(const char * src,long tcount)486 SPI_exec(const char *src, long tcount)
487 {
488 return SPI_execute(src, false, tcount);
489 }
490
491 /* Execute a previously prepared plan */
492 int
SPI_execute_plan(SPIPlanPtr plan,Datum * Values,const char * Nulls,bool read_only,long tcount)493 SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
494 bool read_only, long tcount)
495 {
496 int res;
497
498 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0)
499 return SPI_ERROR_ARGUMENT;
500
501 if (plan->nargs > 0 && Values == NULL)
502 return SPI_ERROR_PARAM;
503
504 res = _SPI_begin_call(true);
505 if (res < 0)
506 return res;
507
508 res = _SPI_execute_plan(plan,
509 _SPI_convert_params(plan->nargs, plan->argtypes,
510 Values, Nulls),
511 InvalidSnapshot, InvalidSnapshot,
512 read_only, true, tcount);
513
514 _SPI_end_call(true);
515 return res;
516 }
517
518 /* Obsolete version of SPI_execute_plan */
519 int
SPI_execp(SPIPlanPtr plan,Datum * Values,const char * Nulls,long tcount)520 SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, long tcount)
521 {
522 return SPI_execute_plan(plan, Values, Nulls, false, tcount);
523 }
524
525 /* Execute a previously prepared plan */
526 int
SPI_execute_plan_with_paramlist(SPIPlanPtr plan,ParamListInfo params,bool read_only,long tcount)527 SPI_execute_plan_with_paramlist(SPIPlanPtr plan, ParamListInfo params,
528 bool read_only, long tcount)
529 {
530 int res;
531
532 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0)
533 return SPI_ERROR_ARGUMENT;
534
535 res = _SPI_begin_call(true);
536 if (res < 0)
537 return res;
538
539 res = _SPI_execute_plan(plan, params,
540 InvalidSnapshot, InvalidSnapshot,
541 read_only, true, tcount);
542
543 _SPI_end_call(true);
544 return res;
545 }
546
547 /*
548 * SPI_execute_snapshot -- identical to SPI_execute_plan, except that we allow
549 * the caller to specify exactly which snapshots to use, which will be
550 * registered here. Also, the caller may specify that AFTER triggers should be
551 * queued as part of the outer query rather than being fired immediately at the
552 * end of the command.
553 *
554 * This is currently not documented in spi.sgml because it is only intended
555 * for use by RI triggers.
556 *
557 * Passing snapshot == InvalidSnapshot will select the normal behavior of
558 * fetching a new snapshot for each query.
559 */
560 int
SPI_execute_snapshot(SPIPlanPtr plan,Datum * Values,const char * Nulls,Snapshot snapshot,Snapshot crosscheck_snapshot,bool read_only,bool fire_triggers,long tcount)561 SPI_execute_snapshot(SPIPlanPtr plan,
562 Datum *Values, const char *Nulls,
563 Snapshot snapshot, Snapshot crosscheck_snapshot,
564 bool read_only, bool fire_triggers, long tcount)
565 {
566 int res;
567
568 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0)
569 return SPI_ERROR_ARGUMENT;
570
571 if (plan->nargs > 0 && Values == NULL)
572 return SPI_ERROR_PARAM;
573
574 res = _SPI_begin_call(true);
575 if (res < 0)
576 return res;
577
578 res = _SPI_execute_plan(plan,
579 _SPI_convert_params(plan->nargs, plan->argtypes,
580 Values, Nulls),
581 snapshot, crosscheck_snapshot,
582 read_only, fire_triggers, tcount);
583
584 _SPI_end_call(true);
585 return res;
586 }
587
588 /*
589 * SPI_execute_with_args -- plan and execute a query with supplied arguments
590 *
591 * This is functionally equivalent to SPI_prepare followed by
592 * SPI_execute_plan.
593 */
594 int
SPI_execute_with_args(const char * src,int nargs,Oid * argtypes,Datum * Values,const char * Nulls,bool read_only,long tcount)595 SPI_execute_with_args(const char *src,
596 int nargs, Oid *argtypes,
597 Datum *Values, const char *Nulls,
598 bool read_only, long tcount)
599 {
600 int res;
601 _SPI_plan plan;
602 ParamListInfo paramLI;
603
604 if (src == NULL || nargs < 0 || tcount < 0)
605 return SPI_ERROR_ARGUMENT;
606
607 if (nargs > 0 && (argtypes == NULL || Values == NULL))
608 return SPI_ERROR_PARAM;
609
610 res = _SPI_begin_call(true);
611 if (res < 0)
612 return res;
613
614 memset(&plan, 0, sizeof(_SPI_plan));
615 plan.magic = _SPI_PLAN_MAGIC;
616 plan.cursor_options = CURSOR_OPT_PARALLEL_OK;
617 plan.nargs = nargs;
618 plan.argtypes = argtypes;
619 plan.parserSetup = NULL;
620 plan.parserSetupArg = NULL;
621
622 paramLI = _SPI_convert_params(nargs, argtypes,
623 Values, Nulls);
624
625 _SPI_prepare_oneshot_plan(src, &plan);
626
627 res = _SPI_execute_plan(&plan, paramLI,
628 InvalidSnapshot, InvalidSnapshot,
629 read_only, true, tcount);
630
631 _SPI_end_call(true);
632 return res;
633 }
634
635 SPIPlanPtr
SPI_prepare(const char * src,int nargs,Oid * argtypes)636 SPI_prepare(const char *src, int nargs, Oid *argtypes)
637 {
638 return SPI_prepare_cursor(src, nargs, argtypes, 0);
639 }
640
641 SPIPlanPtr
SPI_prepare_cursor(const char * src,int nargs,Oid * argtypes,int cursorOptions)642 SPI_prepare_cursor(const char *src, int nargs, Oid *argtypes,
643 int cursorOptions)
644 {
645 _SPI_plan plan;
646 SPIPlanPtr result;
647
648 if (src == NULL || nargs < 0 || (nargs > 0 && argtypes == NULL))
649 {
650 SPI_result = SPI_ERROR_ARGUMENT;
651 return NULL;
652 }
653
654 SPI_result = _SPI_begin_call(true);
655 if (SPI_result < 0)
656 return NULL;
657
658 memset(&plan, 0, sizeof(_SPI_plan));
659 plan.magic = _SPI_PLAN_MAGIC;
660 plan.cursor_options = cursorOptions;
661 plan.nargs = nargs;
662 plan.argtypes = argtypes;
663 plan.parserSetup = NULL;
664 plan.parserSetupArg = NULL;
665
666 _SPI_prepare_plan(src, &plan);
667
668 /* copy plan to procedure context */
669 result = _SPI_make_plan_non_temp(&plan);
670
671 _SPI_end_call(true);
672
673 return result;
674 }
675
676 SPIPlanPtr
SPI_prepare_params(const char * src,ParserSetupHook parserSetup,void * parserSetupArg,int cursorOptions)677 SPI_prepare_params(const char *src,
678 ParserSetupHook parserSetup,
679 void *parserSetupArg,
680 int cursorOptions)
681 {
682 _SPI_plan plan;
683 SPIPlanPtr result;
684
685 if (src == NULL)
686 {
687 SPI_result = SPI_ERROR_ARGUMENT;
688 return NULL;
689 }
690
691 SPI_result = _SPI_begin_call(true);
692 if (SPI_result < 0)
693 return NULL;
694
695 memset(&plan, 0, sizeof(_SPI_plan));
696 plan.magic = _SPI_PLAN_MAGIC;
697 plan.cursor_options = cursorOptions;
698 plan.nargs = 0;
699 plan.argtypes = NULL;
700 plan.parserSetup = parserSetup;
701 plan.parserSetupArg = parserSetupArg;
702
703 _SPI_prepare_plan(src, &plan);
704
705 /* copy plan to procedure context */
706 result = _SPI_make_plan_non_temp(&plan);
707
708 _SPI_end_call(true);
709
710 return result;
711 }
712
713 int
SPI_keepplan(SPIPlanPtr plan)714 SPI_keepplan(SPIPlanPtr plan)
715 {
716 ListCell *lc;
717
718 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC ||
719 plan->saved || plan->oneshot)
720 return SPI_ERROR_ARGUMENT;
721
722 /*
723 * Mark it saved, reparent it under CacheMemoryContext, and mark all the
724 * component CachedPlanSources as saved. This sequence cannot fail
725 * partway through, so there's no risk of long-term memory leakage.
726 */
727 plan->saved = true;
728 MemoryContextSetParent(plan->plancxt, CacheMemoryContext);
729
730 foreach(lc, plan->plancache_list)
731 {
732 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
733
734 SaveCachedPlan(plansource);
735 }
736
737 return 0;
738 }
739
740 SPIPlanPtr
SPI_saveplan(SPIPlanPtr plan)741 SPI_saveplan(SPIPlanPtr plan)
742 {
743 SPIPlanPtr newplan;
744
745 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
746 {
747 SPI_result = SPI_ERROR_ARGUMENT;
748 return NULL;
749 }
750
751 SPI_result = _SPI_begin_call(false); /* don't change context */
752 if (SPI_result < 0)
753 return NULL;
754
755 newplan = _SPI_save_plan(plan);
756
757 SPI_result = _SPI_end_call(false);
758
759 return newplan;
760 }
761
762 int
SPI_freeplan(SPIPlanPtr plan)763 SPI_freeplan(SPIPlanPtr plan)
764 {
765 ListCell *lc;
766
767 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
768 return SPI_ERROR_ARGUMENT;
769
770 /* Release the plancache entries */
771 foreach(lc, plan->plancache_list)
772 {
773 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
774
775 DropCachedPlan(plansource);
776 }
777
778 /* Now get rid of the _SPI_plan and subsidiary data in its plancxt */
779 MemoryContextDelete(plan->plancxt);
780
781 return 0;
782 }
783
784 HeapTuple
SPI_copytuple(HeapTuple tuple)785 SPI_copytuple(HeapTuple tuple)
786 {
787 MemoryContext oldcxt;
788 HeapTuple ctuple;
789
790 if (tuple == NULL)
791 {
792 SPI_result = SPI_ERROR_ARGUMENT;
793 return NULL;
794 }
795
796 if (_SPI_current == NULL)
797 {
798 SPI_result = SPI_ERROR_UNCONNECTED;
799 return NULL;
800 }
801
802 oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
803
804 ctuple = heap_copytuple(tuple);
805
806 MemoryContextSwitchTo(oldcxt);
807
808 return ctuple;
809 }
810
811 HeapTupleHeader
SPI_returntuple(HeapTuple tuple,TupleDesc tupdesc)812 SPI_returntuple(HeapTuple tuple, TupleDesc tupdesc)
813 {
814 MemoryContext oldcxt;
815 HeapTupleHeader dtup;
816
817 if (tuple == NULL || tupdesc == NULL)
818 {
819 SPI_result = SPI_ERROR_ARGUMENT;
820 return NULL;
821 }
822
823 if (_SPI_current == NULL)
824 {
825 SPI_result = SPI_ERROR_UNCONNECTED;
826 return NULL;
827 }
828
829 /* For RECORD results, make sure a typmod has been assigned */
830 if (tupdesc->tdtypeid == RECORDOID &&
831 tupdesc->tdtypmod < 0)
832 assign_record_type_typmod(tupdesc);
833
834 oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
835
836 dtup = DatumGetHeapTupleHeader(heap_copy_tuple_as_datum(tuple, tupdesc));
837
838 MemoryContextSwitchTo(oldcxt);
839
840 return dtup;
841 }
842
843 HeapTuple
SPI_modifytuple(Relation rel,HeapTuple tuple,int natts,int * attnum,Datum * Values,const char * Nulls)844 SPI_modifytuple(Relation rel, HeapTuple tuple, int natts, int *attnum,
845 Datum *Values, const char *Nulls)
846 {
847 MemoryContext oldcxt;
848 HeapTuple mtuple;
849 int numberOfAttributes;
850 Datum *v;
851 bool *n;
852 int i;
853
854 if (rel == NULL || tuple == NULL || natts < 0 || attnum == NULL || Values == NULL)
855 {
856 SPI_result = SPI_ERROR_ARGUMENT;
857 return NULL;
858 }
859
860 if (_SPI_current == NULL)
861 {
862 SPI_result = SPI_ERROR_UNCONNECTED;
863 return NULL;
864 }
865
866 oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
867
868 SPI_result = 0;
869
870 numberOfAttributes = rel->rd_att->natts;
871 v = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
872 n = (bool *) palloc(numberOfAttributes * sizeof(bool));
873
874 /* fetch old values and nulls */
875 heap_deform_tuple(tuple, rel->rd_att, v, n);
876
877 /* replace values and nulls */
878 for (i = 0; i < natts; i++)
879 {
880 if (attnum[i] <= 0 || attnum[i] > numberOfAttributes)
881 break;
882 v[attnum[i] - 1] = Values[i];
883 n[attnum[i] - 1] = (Nulls && Nulls[i] == 'n') ? true : false;
884 }
885
886 if (i == natts) /* no errors in *attnum */
887 {
888 mtuple = heap_form_tuple(rel->rd_att, v, n);
889
890 /*
891 * copy the identification info of the old tuple: t_ctid, t_self, and
892 * OID (if any)
893 */
894 mtuple->t_data->t_ctid = tuple->t_data->t_ctid;
895 mtuple->t_self = tuple->t_self;
896 mtuple->t_tableOid = tuple->t_tableOid;
897 if (rel->rd_att->tdhasoid)
898 HeapTupleSetOid(mtuple, HeapTupleGetOid(tuple));
899 }
900 else
901 {
902 mtuple = NULL;
903 SPI_result = SPI_ERROR_NOATTRIBUTE;
904 }
905
906 pfree(v);
907 pfree(n);
908
909 MemoryContextSwitchTo(oldcxt);
910
911 return mtuple;
912 }
913
914 int
SPI_fnumber(TupleDesc tupdesc,const char * fname)915 SPI_fnumber(TupleDesc tupdesc, const char *fname)
916 {
917 int res;
918 Form_pg_attribute sysatt;
919
920 for (res = 0; res < tupdesc->natts; res++)
921 {
922 Form_pg_attribute attr = TupleDescAttr(tupdesc, res);
923
924 if (namestrcmp(&attr->attname, fname) == 0 &&
925 !attr->attisdropped)
926 return res + 1;
927 }
928
929 sysatt = SystemAttributeByName(fname, true /* "oid" will be accepted */ );
930 if (sysatt != NULL)
931 return sysatt->attnum;
932
933 /* SPI_ERROR_NOATTRIBUTE is different from all sys column numbers */
934 return SPI_ERROR_NOATTRIBUTE;
935 }
936
937 char *
SPI_fname(TupleDesc tupdesc,int fnumber)938 SPI_fname(TupleDesc tupdesc, int fnumber)
939 {
940 Form_pg_attribute att;
941
942 SPI_result = 0;
943
944 if (fnumber > tupdesc->natts || fnumber == 0 ||
945 fnumber <= FirstLowInvalidHeapAttributeNumber)
946 {
947 SPI_result = SPI_ERROR_NOATTRIBUTE;
948 return NULL;
949 }
950
951 if (fnumber > 0)
952 att = TupleDescAttr(tupdesc, fnumber - 1);
953 else
954 att = SystemAttributeDefinition(fnumber, true);
955
956 return pstrdup(NameStr(att->attname));
957 }
958
959 char *
SPI_getvalue(HeapTuple tuple,TupleDesc tupdesc,int fnumber)960 SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber)
961 {
962 Datum val;
963 bool isnull;
964 Oid typoid,
965 foutoid;
966 bool typisvarlena;
967
968 SPI_result = 0;
969
970 if (fnumber > tupdesc->natts || fnumber == 0 ||
971 fnumber <= FirstLowInvalidHeapAttributeNumber)
972 {
973 SPI_result = SPI_ERROR_NOATTRIBUTE;
974 return NULL;
975 }
976
977 val = heap_getattr(tuple, fnumber, tupdesc, &isnull);
978 if (isnull)
979 return NULL;
980
981 if (fnumber > 0)
982 typoid = TupleDescAttr(tupdesc, fnumber - 1)->atttypid;
983 else
984 typoid = (SystemAttributeDefinition(fnumber, true))->atttypid;
985
986 getTypeOutputInfo(typoid, &foutoid, &typisvarlena);
987
988 return OidOutputFunctionCall(foutoid, val);
989 }
990
991 Datum
SPI_getbinval(HeapTuple tuple,TupleDesc tupdesc,int fnumber,bool * isnull)992 SPI_getbinval(HeapTuple tuple, TupleDesc tupdesc, int fnumber, bool *isnull)
993 {
994 SPI_result = 0;
995
996 if (fnumber > tupdesc->natts || fnumber == 0 ||
997 fnumber <= FirstLowInvalidHeapAttributeNumber)
998 {
999 SPI_result = SPI_ERROR_NOATTRIBUTE;
1000 *isnull = true;
1001 return (Datum) NULL;
1002 }
1003
1004 return heap_getattr(tuple, fnumber, tupdesc, isnull);
1005 }
1006
1007 char *
SPI_gettype(TupleDesc tupdesc,int fnumber)1008 SPI_gettype(TupleDesc tupdesc, int fnumber)
1009 {
1010 Oid typoid;
1011 HeapTuple typeTuple;
1012 char *result;
1013
1014 SPI_result = 0;
1015
1016 if (fnumber > tupdesc->natts || fnumber == 0 ||
1017 fnumber <= FirstLowInvalidHeapAttributeNumber)
1018 {
1019 SPI_result = SPI_ERROR_NOATTRIBUTE;
1020 return NULL;
1021 }
1022
1023 if (fnumber > 0)
1024 typoid = TupleDescAttr(tupdesc, fnumber - 1)->atttypid;
1025 else
1026 typoid = (SystemAttributeDefinition(fnumber, true))->atttypid;
1027
1028 typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typoid));
1029
1030 if (!HeapTupleIsValid(typeTuple))
1031 {
1032 SPI_result = SPI_ERROR_TYPUNKNOWN;
1033 return NULL;
1034 }
1035
1036 result = pstrdup(NameStr(((Form_pg_type) GETSTRUCT(typeTuple))->typname));
1037 ReleaseSysCache(typeTuple);
1038 return result;
1039 }
1040
1041 /*
1042 * Get the data type OID for a column.
1043 *
1044 * There's nothing similar for typmod and typcollation. The rare consumers
1045 * thereof should inspect the TupleDesc directly.
1046 */
1047 Oid
SPI_gettypeid(TupleDesc tupdesc,int fnumber)1048 SPI_gettypeid(TupleDesc tupdesc, int fnumber)
1049 {
1050 SPI_result = 0;
1051
1052 if (fnumber > tupdesc->natts || fnumber == 0 ||
1053 fnumber <= FirstLowInvalidHeapAttributeNumber)
1054 {
1055 SPI_result = SPI_ERROR_NOATTRIBUTE;
1056 return InvalidOid;
1057 }
1058
1059 if (fnumber > 0)
1060 return TupleDescAttr(tupdesc, fnumber - 1)->atttypid;
1061 else
1062 return (SystemAttributeDefinition(fnumber, true))->atttypid;
1063 }
1064
1065 char *
SPI_getrelname(Relation rel)1066 SPI_getrelname(Relation rel)
1067 {
1068 return pstrdup(RelationGetRelationName(rel));
1069 }
1070
1071 char *
SPI_getnspname(Relation rel)1072 SPI_getnspname(Relation rel)
1073 {
1074 return get_namespace_name(RelationGetNamespace(rel));
1075 }
1076
1077 void *
SPI_palloc(Size size)1078 SPI_palloc(Size size)
1079 {
1080 if (_SPI_current == NULL)
1081 elog(ERROR, "SPI_palloc called while not connected to SPI");
1082
1083 return MemoryContextAlloc(_SPI_current->savedcxt, size);
1084 }
1085
1086 void *
SPI_repalloc(void * pointer,Size size)1087 SPI_repalloc(void *pointer, Size size)
1088 {
1089 /* No longer need to worry which context chunk was in... */
1090 return repalloc(pointer, size);
1091 }
1092
1093 void
SPI_pfree(void * pointer)1094 SPI_pfree(void *pointer)
1095 {
1096 /* No longer need to worry which context chunk was in... */
1097 pfree(pointer);
1098 }
1099
1100 Datum
SPI_datumTransfer(Datum value,bool typByVal,int typLen)1101 SPI_datumTransfer(Datum value, bool typByVal, int typLen)
1102 {
1103 MemoryContext oldcxt;
1104 Datum result;
1105
1106 if (_SPI_current == NULL)
1107 elog(ERROR, "SPI_datumTransfer called while not connected to SPI");
1108
1109 oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
1110
1111 result = datumTransfer(value, typByVal, typLen);
1112
1113 MemoryContextSwitchTo(oldcxt);
1114
1115 return result;
1116 }
1117
1118 void
SPI_freetuple(HeapTuple tuple)1119 SPI_freetuple(HeapTuple tuple)
1120 {
1121 /* No longer need to worry which context tuple was in... */
1122 heap_freetuple(tuple);
1123 }
1124
1125 void
SPI_freetuptable(SPITupleTable * tuptable)1126 SPI_freetuptable(SPITupleTable *tuptable)
1127 {
1128 bool found = false;
1129
1130 /* ignore call if NULL pointer */
1131 if (tuptable == NULL)
1132 return;
1133
1134 /*
1135 * Search only the topmost SPI context for a matching tuple table.
1136 */
1137 if (_SPI_current != NULL)
1138 {
1139 slist_mutable_iter siter;
1140
1141 /* find tuptable in active list, then remove it */
1142 slist_foreach_modify(siter, &_SPI_current->tuptables)
1143 {
1144 SPITupleTable *tt;
1145
1146 tt = slist_container(SPITupleTable, next, siter.cur);
1147 if (tt == tuptable)
1148 {
1149 slist_delete_current(&siter);
1150 found = true;
1151 break;
1152 }
1153 }
1154 }
1155
1156 /*
1157 * Refuse the deletion if we didn't find it in the topmost SPI context.
1158 * This is primarily a guard against double deletion, but might prevent
1159 * other errors as well. Since the worst consequence of not deleting a
1160 * tuptable would be a transient memory leak, this is just a WARNING.
1161 */
1162 if (!found)
1163 {
1164 elog(WARNING, "attempt to delete invalid SPITupleTable %p", tuptable);
1165 return;
1166 }
1167
1168 /* for safety, reset global variables that might point at tuptable */
1169 if (tuptable == _SPI_current->tuptable)
1170 _SPI_current->tuptable = NULL;
1171 if (tuptable == SPI_tuptable)
1172 SPI_tuptable = NULL;
1173
1174 /* release all memory belonging to tuptable */
1175 MemoryContextDelete(tuptable->tuptabcxt);
1176 }
1177
1178
1179 /*
1180 * SPI_cursor_open()
1181 *
1182 * Open a prepared SPI plan as a portal
1183 */
1184 Portal
SPI_cursor_open(const char * name,SPIPlanPtr plan,Datum * Values,const char * Nulls,bool read_only)1185 SPI_cursor_open(const char *name, SPIPlanPtr plan,
1186 Datum *Values, const char *Nulls,
1187 bool read_only)
1188 {
1189 Portal portal;
1190 ParamListInfo paramLI;
1191
1192 /* build transient ParamListInfo in caller's context */
1193 paramLI = _SPI_convert_params(plan->nargs, plan->argtypes,
1194 Values, Nulls);
1195
1196 portal = SPI_cursor_open_internal(name, plan, paramLI, read_only);
1197
1198 /* done with the transient ParamListInfo */
1199 if (paramLI)
1200 pfree(paramLI);
1201
1202 return portal;
1203 }
1204
1205
1206 /*
1207 * SPI_cursor_open_with_args()
1208 *
1209 * Parse and plan a query and open it as a portal.
1210 */
1211 Portal
SPI_cursor_open_with_args(const char * name,const char * src,int nargs,Oid * argtypes,Datum * Values,const char * Nulls,bool read_only,int cursorOptions)1212 SPI_cursor_open_with_args(const char *name,
1213 const char *src,
1214 int nargs, Oid *argtypes,
1215 Datum *Values, const char *Nulls,
1216 bool read_only, int cursorOptions)
1217 {
1218 Portal result;
1219 _SPI_plan plan;
1220 ParamListInfo paramLI;
1221
1222 if (src == NULL || nargs < 0)
1223 elog(ERROR, "SPI_cursor_open_with_args called with invalid arguments");
1224
1225 if (nargs > 0 && (argtypes == NULL || Values == NULL))
1226 elog(ERROR, "SPI_cursor_open_with_args called with missing parameters");
1227
1228 SPI_result = _SPI_begin_call(true);
1229 if (SPI_result < 0)
1230 elog(ERROR, "SPI_cursor_open_with_args called while not connected");
1231
1232 memset(&plan, 0, sizeof(_SPI_plan));
1233 plan.magic = _SPI_PLAN_MAGIC;
1234 plan.cursor_options = cursorOptions;
1235 plan.nargs = nargs;
1236 plan.argtypes = argtypes;
1237 plan.parserSetup = NULL;
1238 plan.parserSetupArg = NULL;
1239
1240 /* build transient ParamListInfo in executor context */
1241 paramLI = _SPI_convert_params(nargs, argtypes,
1242 Values, Nulls);
1243
1244 _SPI_prepare_plan(src, &plan);
1245
1246 /* We needn't copy the plan; SPI_cursor_open_internal will do so */
1247
1248 result = SPI_cursor_open_internal(name, &plan, paramLI, read_only);
1249
1250 /* And clean up */
1251 _SPI_end_call(true);
1252
1253 return result;
1254 }
1255
1256
1257 /*
1258 * SPI_cursor_open_with_paramlist()
1259 *
1260 * Same as SPI_cursor_open except that parameters (if any) are passed
1261 * as a ParamListInfo, which supports dynamic parameter set determination
1262 */
1263 Portal
SPI_cursor_open_with_paramlist(const char * name,SPIPlanPtr plan,ParamListInfo params,bool read_only)1264 SPI_cursor_open_with_paramlist(const char *name, SPIPlanPtr plan,
1265 ParamListInfo params, bool read_only)
1266 {
1267 return SPI_cursor_open_internal(name, plan, params, read_only);
1268 }
1269
1270
1271 /*
1272 * SPI_cursor_open_internal()
1273 *
1274 * Common code for SPI_cursor_open variants
1275 */
1276 static Portal
SPI_cursor_open_internal(const char * name,SPIPlanPtr plan,ParamListInfo paramLI,bool read_only)1277 SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
1278 ParamListInfo paramLI, bool read_only)
1279 {
1280 CachedPlanSource *plansource;
1281 CachedPlan *cplan;
1282 List *stmt_list;
1283 char *query_string;
1284 Snapshot snapshot;
1285 MemoryContext oldcontext;
1286 Portal portal;
1287 ErrorContextCallback spierrcontext;
1288
1289 /*
1290 * Check that the plan is something the Portal code will special-case as
1291 * returning one tupleset.
1292 */
1293 if (!SPI_is_cursor_plan(plan))
1294 {
1295 /* try to give a good error message */
1296 if (list_length(plan->plancache_list) != 1)
1297 ereport(ERROR,
1298 (errcode(ERRCODE_INVALID_CURSOR_DEFINITION),
1299 errmsg("cannot open multi-query plan as cursor")));
1300 plansource = (CachedPlanSource *) linitial(plan->plancache_list);
1301 ereport(ERROR,
1302 (errcode(ERRCODE_INVALID_CURSOR_DEFINITION),
1303 /* translator: %s is name of a SQL command, eg INSERT */
1304 errmsg("cannot open %s query as cursor",
1305 plansource->commandTag)));
1306 }
1307
1308 Assert(list_length(plan->plancache_list) == 1);
1309 plansource = (CachedPlanSource *) linitial(plan->plancache_list);
1310
1311 /* Push the SPI stack */
1312 if (_SPI_begin_call(true) < 0)
1313 elog(ERROR, "SPI_cursor_open called while not connected");
1314
1315 /* Reset SPI result (note we deliberately don't touch lastoid) */
1316 SPI_processed = 0;
1317 SPI_tuptable = NULL;
1318 _SPI_current->processed = 0;
1319 _SPI_current->tuptable = NULL;
1320
1321 /* Create the portal */
1322 if (name == NULL || name[0] == '\0')
1323 {
1324 /* Use a random nonconflicting name */
1325 portal = CreateNewPortal();
1326 }
1327 else
1328 {
1329 /* In this path, error if portal of same name already exists */
1330 portal = CreatePortal(name, false, false);
1331 }
1332
1333 /* Copy the plan's query string into the portal */
1334 query_string = MemoryContextStrdup(portal->portalContext,
1335 plansource->query_string);
1336
1337 /*
1338 * Setup error traceback support for ereport(), in case GetCachedPlan
1339 * throws an error.
1340 */
1341 spierrcontext.callback = _SPI_error_callback;
1342 spierrcontext.arg = (void *) plansource->query_string;
1343 spierrcontext.previous = error_context_stack;
1344 error_context_stack = &spierrcontext;
1345
1346 /*
1347 * Note: for a saved plan, we mustn't have any failure occur between
1348 * GetCachedPlan and PortalDefineQuery; that would result in leaking our
1349 * plancache refcount.
1350 */
1351
1352 /* Replan if needed, and increment plan refcount for portal */
1353 cplan = GetCachedPlan(plansource, paramLI, false, _SPI_current->queryEnv);
1354 stmt_list = cplan->stmt_list;
1355
1356 if (!plan->saved)
1357 {
1358 /*
1359 * We don't want the portal to depend on an unsaved CachedPlanSource,
1360 * so must copy the plan into the portal's context. An error here
1361 * will result in leaking our refcount on the plan, but it doesn't
1362 * matter because the plan is unsaved and hence transient anyway.
1363 */
1364 oldcontext = MemoryContextSwitchTo(portal->portalContext);
1365 stmt_list = copyObject(stmt_list);
1366 MemoryContextSwitchTo(oldcontext);
1367 ReleaseCachedPlan(cplan, false);
1368 cplan = NULL; /* portal shouldn't depend on cplan */
1369 }
1370
1371 /*
1372 * Set up the portal.
1373 */
1374 PortalDefineQuery(portal,
1375 NULL, /* no statement name */
1376 query_string,
1377 plansource->commandTag,
1378 stmt_list,
1379 cplan);
1380
1381 /*
1382 * Set up options for portal. Default SCROLL type is chosen the same way
1383 * as PerformCursorOpen does it.
1384 */
1385 portal->cursorOptions = plan->cursor_options;
1386 if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
1387 {
1388 if (list_length(stmt_list) == 1 &&
1389 linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
1390 linitial_node(PlannedStmt, stmt_list)->rowMarks == NIL &&
1391 ExecSupportsBackwardScan(linitial_node(PlannedStmt, stmt_list)->planTree))
1392 portal->cursorOptions |= CURSOR_OPT_SCROLL;
1393 else
1394 portal->cursorOptions |= CURSOR_OPT_NO_SCROLL;
1395 }
1396
1397 /*
1398 * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the
1399 * check in transformDeclareCursorStmt because the cursor options might
1400 * not have come through there.
1401 */
1402 if (portal->cursorOptions & CURSOR_OPT_SCROLL)
1403 {
1404 if (list_length(stmt_list) == 1 &&
1405 linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
1406 linitial_node(PlannedStmt, stmt_list)->rowMarks != NIL)
1407 ereport(ERROR,
1408 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1409 errmsg("DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE is not supported"),
1410 errdetail("Scrollable cursors must be READ ONLY.")));
1411 }
1412
1413 /* Make current query environment available to portal at execution time. */
1414 portal->queryEnv = _SPI_current->queryEnv;
1415
1416 /*
1417 * If told to be read-only, or in parallel mode, verify that this query is
1418 * in fact read-only. This can't be done earlier because we need to look
1419 * at the finished, planned queries. (In particular, we don't want to do
1420 * it between GetCachedPlan and PortalDefineQuery, because throwing an
1421 * error between those steps would result in leaking our plancache
1422 * refcount.)
1423 */
1424 if (read_only || IsInParallelMode())
1425 {
1426 ListCell *lc;
1427
1428 foreach(lc, stmt_list)
1429 {
1430 PlannedStmt *pstmt = lfirst_node(PlannedStmt, lc);
1431
1432 if (!CommandIsReadOnly(pstmt))
1433 {
1434 if (read_only)
1435 ereport(ERROR,
1436 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1437 /* translator: %s is a SQL statement name */
1438 errmsg("%s is not allowed in a non-volatile function",
1439 CreateCommandTag((Node *) pstmt))));
1440 else
1441 PreventCommandIfParallelMode(CreateCommandTag((Node *) pstmt));
1442 }
1443 }
1444 }
1445
1446 /* Set up the snapshot to use. */
1447 if (read_only)
1448 snapshot = GetActiveSnapshot();
1449 else
1450 {
1451 CommandCounterIncrement();
1452 snapshot = GetTransactionSnapshot();
1453 }
1454
1455 /*
1456 * If the plan has parameters, copy them into the portal. Note that this
1457 * must be done after revalidating the plan, because in dynamic parameter
1458 * cases the set of parameters could have changed during re-parsing.
1459 */
1460 if (paramLI)
1461 {
1462 oldcontext = MemoryContextSwitchTo(portal->portalContext);
1463 paramLI = copyParamList(paramLI);
1464 MemoryContextSwitchTo(oldcontext);
1465 }
1466
1467 /*
1468 * Start portal execution.
1469 */
1470 PortalStart(portal, paramLI, 0, snapshot);
1471
1472 Assert(portal->strategy != PORTAL_MULTI_QUERY);
1473
1474 /* Pop the error context stack */
1475 error_context_stack = spierrcontext.previous;
1476
1477 /* Pop the SPI stack */
1478 _SPI_end_call(true);
1479
1480 /* Return the created portal */
1481 return portal;
1482 }
1483
1484
1485 /*
1486 * SPI_cursor_find()
1487 *
1488 * Find the portal of an existing open cursor
1489 */
1490 Portal
SPI_cursor_find(const char * name)1491 SPI_cursor_find(const char *name)
1492 {
1493 return GetPortalByName(name);
1494 }
1495
1496
1497 /*
1498 * SPI_cursor_fetch()
1499 *
1500 * Fetch rows in a cursor
1501 */
1502 void
SPI_cursor_fetch(Portal portal,bool forward,long count)1503 SPI_cursor_fetch(Portal portal, bool forward, long count)
1504 {
1505 _SPI_cursor_operation(portal,
1506 forward ? FETCH_FORWARD : FETCH_BACKWARD, count,
1507 CreateDestReceiver(DestSPI));
1508 /* we know that the DestSPI receiver doesn't need a destroy call */
1509 }
1510
1511
1512 /*
1513 * SPI_cursor_move()
1514 *
1515 * Move in a cursor
1516 */
1517 void
SPI_cursor_move(Portal portal,bool forward,long count)1518 SPI_cursor_move(Portal portal, bool forward, long count)
1519 {
1520 _SPI_cursor_operation(portal,
1521 forward ? FETCH_FORWARD : FETCH_BACKWARD, count,
1522 None_Receiver);
1523 }
1524
1525
1526 /*
1527 * SPI_scroll_cursor_fetch()
1528 *
1529 * Fetch rows in a scrollable cursor
1530 */
1531 void
SPI_scroll_cursor_fetch(Portal portal,FetchDirection direction,long count)1532 SPI_scroll_cursor_fetch(Portal portal, FetchDirection direction, long count)
1533 {
1534 _SPI_cursor_operation(portal,
1535 direction, count,
1536 CreateDestReceiver(DestSPI));
1537 /* we know that the DestSPI receiver doesn't need a destroy call */
1538 }
1539
1540
1541 /*
1542 * SPI_scroll_cursor_move()
1543 *
1544 * Move in a scrollable cursor
1545 */
1546 void
SPI_scroll_cursor_move(Portal portal,FetchDirection direction,long count)1547 SPI_scroll_cursor_move(Portal portal, FetchDirection direction, long count)
1548 {
1549 _SPI_cursor_operation(portal, direction, count, None_Receiver);
1550 }
1551
1552
1553 /*
1554 * SPI_cursor_close()
1555 *
1556 * Close a cursor
1557 */
1558 void
SPI_cursor_close(Portal portal)1559 SPI_cursor_close(Portal portal)
1560 {
1561 if (!PortalIsValid(portal))
1562 elog(ERROR, "invalid portal in SPI cursor operation");
1563
1564 PortalDrop(portal, false);
1565 }
1566
1567 /*
1568 * Returns the Oid representing the type id for argument at argIndex. First
1569 * parameter is at index zero.
1570 */
1571 Oid
SPI_getargtypeid(SPIPlanPtr plan,int argIndex)1572 SPI_getargtypeid(SPIPlanPtr plan, int argIndex)
1573 {
1574 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC ||
1575 argIndex < 0 || argIndex >= plan->nargs)
1576 {
1577 SPI_result = SPI_ERROR_ARGUMENT;
1578 return InvalidOid;
1579 }
1580 return plan->argtypes[argIndex];
1581 }
1582
1583 /*
1584 * Returns the number of arguments for the prepared plan.
1585 */
1586 int
SPI_getargcount(SPIPlanPtr plan)1587 SPI_getargcount(SPIPlanPtr plan)
1588 {
1589 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
1590 {
1591 SPI_result = SPI_ERROR_ARGUMENT;
1592 return -1;
1593 }
1594 return plan->nargs;
1595 }
1596
1597 /*
1598 * Returns true if the plan contains exactly one command
1599 * and that command returns tuples to the caller (eg, SELECT or
1600 * INSERT ... RETURNING, but not SELECT ... INTO). In essence,
1601 * the result indicates if the command can be used with SPI_cursor_open
1602 *
1603 * Parameters
1604 * plan: A plan previously prepared using SPI_prepare
1605 */
1606 bool
SPI_is_cursor_plan(SPIPlanPtr plan)1607 SPI_is_cursor_plan(SPIPlanPtr plan)
1608 {
1609 CachedPlanSource *plansource;
1610
1611 if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
1612 {
1613 SPI_result = SPI_ERROR_ARGUMENT;
1614 return false;
1615 }
1616
1617 if (list_length(plan->plancache_list) != 1)
1618 {
1619 SPI_result = 0;
1620 return false; /* not exactly 1 pre-rewrite command */
1621 }
1622 plansource = (CachedPlanSource *) linitial(plan->plancache_list);
1623
1624 /*
1625 * We used to force revalidation of the cached plan here, but that seems
1626 * unnecessary: invalidation could mean a change in the rowtype of the
1627 * tuples returned by a plan, but not whether it returns tuples at all.
1628 */
1629 SPI_result = 0;
1630
1631 /* Does it return tuples? */
1632 if (plansource->resultDesc)
1633 return true;
1634
1635 return false;
1636 }
1637
1638 /*
1639 * SPI_plan_is_valid --- test whether a SPI plan is currently valid
1640 * (that is, not marked as being in need of revalidation).
1641 *
1642 * See notes for CachedPlanIsValid before using this.
1643 */
1644 bool
SPI_plan_is_valid(SPIPlanPtr plan)1645 SPI_plan_is_valid(SPIPlanPtr plan)
1646 {
1647 ListCell *lc;
1648
1649 Assert(plan->magic == _SPI_PLAN_MAGIC);
1650
1651 foreach(lc, plan->plancache_list)
1652 {
1653 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
1654
1655 if (!CachedPlanIsValid(plansource))
1656 return false;
1657 }
1658 return true;
1659 }
1660
1661 /*
1662 * SPI_result_code_string --- convert any SPI return code to a string
1663 *
1664 * This is often useful in error messages. Most callers will probably
1665 * only pass negative (error-case) codes, but for generality we recognize
1666 * the success codes too.
1667 */
1668 const char *
SPI_result_code_string(int code)1669 SPI_result_code_string(int code)
1670 {
1671 static char buf[64];
1672
1673 switch (code)
1674 {
1675 case SPI_ERROR_CONNECT:
1676 return "SPI_ERROR_CONNECT";
1677 case SPI_ERROR_COPY:
1678 return "SPI_ERROR_COPY";
1679 case SPI_ERROR_OPUNKNOWN:
1680 return "SPI_ERROR_OPUNKNOWN";
1681 case SPI_ERROR_UNCONNECTED:
1682 return "SPI_ERROR_UNCONNECTED";
1683 case SPI_ERROR_ARGUMENT:
1684 return "SPI_ERROR_ARGUMENT";
1685 case SPI_ERROR_PARAM:
1686 return "SPI_ERROR_PARAM";
1687 case SPI_ERROR_TRANSACTION:
1688 return "SPI_ERROR_TRANSACTION";
1689 case SPI_ERROR_NOATTRIBUTE:
1690 return "SPI_ERROR_NOATTRIBUTE";
1691 case SPI_ERROR_NOOUTFUNC:
1692 return "SPI_ERROR_NOOUTFUNC";
1693 case SPI_ERROR_TYPUNKNOWN:
1694 return "SPI_ERROR_TYPUNKNOWN";
1695 case SPI_ERROR_REL_DUPLICATE:
1696 return "SPI_ERROR_REL_DUPLICATE";
1697 case SPI_ERROR_REL_NOT_FOUND:
1698 return "SPI_ERROR_REL_NOT_FOUND";
1699 case SPI_OK_CONNECT:
1700 return "SPI_OK_CONNECT";
1701 case SPI_OK_FINISH:
1702 return "SPI_OK_FINISH";
1703 case SPI_OK_FETCH:
1704 return "SPI_OK_FETCH";
1705 case SPI_OK_UTILITY:
1706 return "SPI_OK_UTILITY";
1707 case SPI_OK_SELECT:
1708 return "SPI_OK_SELECT";
1709 case SPI_OK_SELINTO:
1710 return "SPI_OK_SELINTO";
1711 case SPI_OK_INSERT:
1712 return "SPI_OK_INSERT";
1713 case SPI_OK_DELETE:
1714 return "SPI_OK_DELETE";
1715 case SPI_OK_UPDATE:
1716 return "SPI_OK_UPDATE";
1717 case SPI_OK_CURSOR:
1718 return "SPI_OK_CURSOR";
1719 case SPI_OK_INSERT_RETURNING:
1720 return "SPI_OK_INSERT_RETURNING";
1721 case SPI_OK_DELETE_RETURNING:
1722 return "SPI_OK_DELETE_RETURNING";
1723 case SPI_OK_UPDATE_RETURNING:
1724 return "SPI_OK_UPDATE_RETURNING";
1725 case SPI_OK_REWRITTEN:
1726 return "SPI_OK_REWRITTEN";
1727 case SPI_OK_REL_REGISTER:
1728 return "SPI_OK_REL_REGISTER";
1729 case SPI_OK_REL_UNREGISTER:
1730 return "SPI_OK_REL_UNREGISTER";
1731 }
1732 /* Unrecognized code ... return something useful ... */
1733 sprintf(buf, "Unrecognized SPI code %d", code);
1734 return buf;
1735 }
1736
1737 /*
1738 * SPI_plan_get_plan_sources --- get a SPI plan's underlying list of
1739 * CachedPlanSources.
1740 *
1741 * This is exported so that PL/pgSQL can use it (this beats letting PL/pgSQL
1742 * look directly into the SPIPlan for itself). It's not documented in
1743 * spi.sgml because we'd just as soon not have too many places using this.
1744 */
1745 List *
SPI_plan_get_plan_sources(SPIPlanPtr plan)1746 SPI_plan_get_plan_sources(SPIPlanPtr plan)
1747 {
1748 Assert(plan->magic == _SPI_PLAN_MAGIC);
1749 return plan->plancache_list;
1750 }
1751
1752 /*
1753 * SPI_plan_get_cached_plan --- get a SPI plan's generic CachedPlan,
1754 * if the SPI plan contains exactly one CachedPlanSource. If not,
1755 * return NULL. Caller is responsible for doing ReleaseCachedPlan().
1756 *
1757 * This is exported so that PL/pgSQL can use it (this beats letting PL/pgSQL
1758 * look directly into the SPIPlan for itself). It's not documented in
1759 * spi.sgml because we'd just as soon not have too many places using this.
1760 */
1761 CachedPlan *
SPI_plan_get_cached_plan(SPIPlanPtr plan)1762 SPI_plan_get_cached_plan(SPIPlanPtr plan)
1763 {
1764 CachedPlanSource *plansource;
1765 CachedPlan *cplan;
1766 ErrorContextCallback spierrcontext;
1767
1768 Assert(plan->magic == _SPI_PLAN_MAGIC);
1769
1770 /* Can't support one-shot plans here */
1771 if (plan->oneshot)
1772 return NULL;
1773
1774 /* Must have exactly one CachedPlanSource */
1775 if (list_length(plan->plancache_list) != 1)
1776 return NULL;
1777 plansource = (CachedPlanSource *) linitial(plan->plancache_list);
1778
1779 /* Setup error traceback support for ereport() */
1780 spierrcontext.callback = _SPI_error_callback;
1781 spierrcontext.arg = (void *) plansource->query_string;
1782 spierrcontext.previous = error_context_stack;
1783 error_context_stack = &spierrcontext;
1784
1785 /* Get the generic plan for the query */
1786 cplan = GetCachedPlan(plansource, NULL, plan->saved,
1787 _SPI_current->queryEnv);
1788 Assert(cplan == plansource->gplan);
1789
1790 /* Pop the error context stack */
1791 error_context_stack = spierrcontext.previous;
1792
1793 return cplan;
1794 }
1795
1796
1797 /* =================== private functions =================== */
1798
1799 /*
1800 * spi_dest_startup
1801 * Initialize to receive tuples from Executor into SPITupleTable
1802 * of current SPI procedure
1803 */
1804 void
spi_dest_startup(DestReceiver * self,int operation,TupleDesc typeinfo)1805 spi_dest_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
1806 {
1807 SPITupleTable *tuptable;
1808 MemoryContext oldcxt;
1809 MemoryContext tuptabcxt;
1810
1811 if (_SPI_current == NULL)
1812 elog(ERROR, "spi_dest_startup called while not connected to SPI");
1813
1814 if (_SPI_current->tuptable != NULL)
1815 elog(ERROR, "improper call to spi_dest_startup");
1816
1817 /* We create the tuple table context as a child of procCxt */
1818
1819 oldcxt = _SPI_procmem(); /* switch to procedure memory context */
1820
1821 tuptabcxt = AllocSetContextCreate(CurrentMemoryContext,
1822 "SPI TupTable",
1823 ALLOCSET_DEFAULT_SIZES);
1824 MemoryContextSwitchTo(tuptabcxt);
1825
1826 _SPI_current->tuptable = tuptable = (SPITupleTable *)
1827 palloc0(sizeof(SPITupleTable));
1828 tuptable->tuptabcxt = tuptabcxt;
1829 tuptable->subid = GetCurrentSubTransactionId();
1830
1831 /*
1832 * The tuptable is now valid enough to be freed by AtEOSubXact_SPI, so put
1833 * it onto the SPI context's tuptables list. This will ensure it's not
1834 * leaked even in the unlikely event the following few lines fail.
1835 */
1836 slist_push_head(&_SPI_current->tuptables, &tuptable->next);
1837
1838 /* set up initial allocations */
1839 tuptable->alloced = tuptable->free = 128;
1840 tuptable->vals = (HeapTuple *) palloc(tuptable->alloced * sizeof(HeapTuple));
1841 tuptable->tupdesc = CreateTupleDescCopy(typeinfo);
1842
1843 MemoryContextSwitchTo(oldcxt);
1844 }
1845
1846 /*
1847 * spi_printtup
1848 * store tuple retrieved by Executor into SPITupleTable
1849 * of current SPI procedure
1850 */
1851 bool
spi_printtup(TupleTableSlot * slot,DestReceiver * self)1852 spi_printtup(TupleTableSlot *slot, DestReceiver *self)
1853 {
1854 SPITupleTable *tuptable;
1855 MemoryContext oldcxt;
1856
1857 if (_SPI_current == NULL)
1858 elog(ERROR, "spi_printtup called while not connected to SPI");
1859
1860 tuptable = _SPI_current->tuptable;
1861 if (tuptable == NULL)
1862 elog(ERROR, "improper call to spi_printtup");
1863
1864 oldcxt = MemoryContextSwitchTo(tuptable->tuptabcxt);
1865
1866 if (tuptable->free == 0)
1867 {
1868 /* Double the size of the pointer array */
1869 tuptable->free = tuptable->alloced;
1870 tuptable->alloced += tuptable->free;
1871 tuptable->vals = (HeapTuple *) repalloc_huge(tuptable->vals,
1872 tuptable->alloced * sizeof(HeapTuple));
1873 }
1874
1875 tuptable->vals[tuptable->alloced - tuptable->free] =
1876 ExecCopySlotTuple(slot);
1877 (tuptable->free)--;
1878
1879 MemoryContextSwitchTo(oldcxt);
1880
1881 return true;
1882 }
1883
1884 /*
1885 * Static functions
1886 */
1887
1888 /*
1889 * Parse and analyze a querystring.
1890 *
1891 * At entry, plan->argtypes and plan->nargs (or alternatively plan->parserSetup
1892 * and plan->parserSetupArg) must be valid, as must plan->cursor_options.
1893 *
1894 * Results are stored into *plan (specifically, plan->plancache_list).
1895 * Note that the result data is all in CurrentMemoryContext or child contexts
1896 * thereof; in practice this means it is in the SPI executor context, and
1897 * what we are creating is a "temporary" SPIPlan. Cruft generated during
1898 * parsing is also left in CurrentMemoryContext.
1899 */
1900 static void
_SPI_prepare_plan(const char * src,SPIPlanPtr plan)1901 _SPI_prepare_plan(const char *src, SPIPlanPtr plan)
1902 {
1903 List *raw_parsetree_list;
1904 List *plancache_list;
1905 ListCell *list_item;
1906 ErrorContextCallback spierrcontext;
1907
1908 /*
1909 * Setup error traceback support for ereport()
1910 */
1911 spierrcontext.callback = _SPI_error_callback;
1912 spierrcontext.arg = (void *) src;
1913 spierrcontext.previous = error_context_stack;
1914 error_context_stack = &spierrcontext;
1915
1916 /*
1917 * Parse the request string into a list of raw parse trees.
1918 */
1919 raw_parsetree_list = pg_parse_query(src);
1920
1921 /*
1922 * Do parse analysis and rule rewrite for each raw parsetree, storing the
1923 * results into unsaved plancache entries.
1924 */
1925 plancache_list = NIL;
1926
1927 foreach(list_item, raw_parsetree_list)
1928 {
1929 RawStmt *parsetree = lfirst_node(RawStmt, list_item);
1930 List *stmt_list;
1931 CachedPlanSource *plansource;
1932
1933 /*
1934 * Create the CachedPlanSource before we do parse analysis, since it
1935 * needs to see the unmodified raw parse tree.
1936 */
1937 plansource = CreateCachedPlan(parsetree,
1938 src,
1939 CreateCommandTag(parsetree->stmt));
1940
1941 /*
1942 * Parameter datatypes are driven by parserSetup hook if provided,
1943 * otherwise we use the fixed parameter list.
1944 */
1945 if (plan->parserSetup != NULL)
1946 {
1947 Assert(plan->nargs == 0);
1948 stmt_list = pg_analyze_and_rewrite_params(parsetree,
1949 src,
1950 plan->parserSetup,
1951 plan->parserSetupArg,
1952 _SPI_current->queryEnv);
1953 }
1954 else
1955 {
1956 stmt_list = pg_analyze_and_rewrite(parsetree,
1957 src,
1958 plan->argtypes,
1959 plan->nargs,
1960 _SPI_current->queryEnv);
1961 }
1962
1963 /* Finish filling in the CachedPlanSource */
1964 CompleteCachedPlan(plansource,
1965 stmt_list,
1966 NULL,
1967 plan->argtypes,
1968 plan->nargs,
1969 plan->parserSetup,
1970 plan->parserSetupArg,
1971 plan->cursor_options,
1972 false); /* not fixed result */
1973
1974 plancache_list = lappend(plancache_list, plansource);
1975 }
1976
1977 plan->plancache_list = plancache_list;
1978 plan->oneshot = false;
1979
1980 /*
1981 * Pop the error context stack
1982 */
1983 error_context_stack = spierrcontext.previous;
1984 }
1985
1986 /*
1987 * Parse, but don't analyze, a querystring.
1988 *
1989 * This is a stripped-down version of _SPI_prepare_plan that only does the
1990 * initial raw parsing. It creates "one shot" CachedPlanSources
1991 * that still require parse analysis before execution is possible.
1992 *
1993 * The advantage of using the "one shot" form of CachedPlanSource is that
1994 * we eliminate data copying and invalidation overhead. Postponing parse
1995 * analysis also prevents issues if some of the raw parsetrees are DDL
1996 * commands that affect validity of later parsetrees. Both of these
1997 * attributes are good things for SPI_execute() and similar cases.
1998 *
1999 * Results are stored into *plan (specifically, plan->plancache_list).
2000 * Note that the result data is all in CurrentMemoryContext or child contexts
2001 * thereof; in practice this means it is in the SPI executor context, and
2002 * what we are creating is a "temporary" SPIPlan. Cruft generated during
2003 * parsing is also left in CurrentMemoryContext.
2004 */
2005 static void
_SPI_prepare_oneshot_plan(const char * src,SPIPlanPtr plan)2006 _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan)
2007 {
2008 List *raw_parsetree_list;
2009 List *plancache_list;
2010 ListCell *list_item;
2011 ErrorContextCallback spierrcontext;
2012
2013 /*
2014 * Setup error traceback support for ereport()
2015 */
2016 spierrcontext.callback = _SPI_error_callback;
2017 spierrcontext.arg = (void *) src;
2018 spierrcontext.previous = error_context_stack;
2019 error_context_stack = &spierrcontext;
2020
2021 /*
2022 * Parse the request string into a list of raw parse trees.
2023 */
2024 raw_parsetree_list = pg_parse_query(src);
2025
2026 /*
2027 * Construct plancache entries, but don't do parse analysis yet.
2028 */
2029 plancache_list = NIL;
2030
2031 foreach(list_item, raw_parsetree_list)
2032 {
2033 RawStmt *parsetree = lfirst_node(RawStmt, list_item);
2034 CachedPlanSource *plansource;
2035
2036 plansource = CreateOneShotCachedPlan(parsetree,
2037 src,
2038 CreateCommandTag(parsetree->stmt));
2039
2040 plancache_list = lappend(plancache_list, plansource);
2041 }
2042
2043 plan->plancache_list = plancache_list;
2044 plan->oneshot = true;
2045
2046 /*
2047 * Pop the error context stack
2048 */
2049 error_context_stack = spierrcontext.previous;
2050 }
2051
2052 /*
2053 * Execute the given plan with the given parameter values
2054 *
2055 * snapshot: query snapshot to use, or InvalidSnapshot for the normal
2056 * behavior of taking a new snapshot for each query.
2057 * crosscheck_snapshot: for RI use, all others pass InvalidSnapshot
2058 * read_only: true for read-only execution (no CommandCounterIncrement)
2059 * fire_triggers: true to fire AFTER triggers at end of query (normal case);
2060 * false means any AFTER triggers are postponed to end of outer query
2061 * tcount: execution tuple-count limit, or 0 for none
2062 */
2063 static int
_SPI_execute_plan(SPIPlanPtr plan,ParamListInfo paramLI,Snapshot snapshot,Snapshot crosscheck_snapshot,bool read_only,bool fire_triggers,uint64 tcount)2064 _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
2065 Snapshot snapshot, Snapshot crosscheck_snapshot,
2066 bool read_only, bool fire_triggers, uint64 tcount)
2067 {
2068 int my_res = 0;
2069 uint64 my_processed = 0;
2070 Oid my_lastoid = InvalidOid;
2071 SPITupleTable *my_tuptable = NULL;
2072 int res = 0;
2073 bool allow_nonatomic = plan->no_snapshots; /* legacy API name */
2074 bool pushed_active_snap = false;
2075 ErrorContextCallback spierrcontext;
2076 CachedPlan *cplan = NULL;
2077 ListCell *lc1;
2078
2079 /*
2080 * Setup error traceback support for ereport()
2081 */
2082 spierrcontext.callback = _SPI_error_callback;
2083 spierrcontext.arg = NULL; /* we'll fill this below */
2084 spierrcontext.previous = error_context_stack;
2085 error_context_stack = &spierrcontext;
2086
2087 /*
2088 * We support four distinct snapshot management behaviors:
2089 *
2090 * snapshot != InvalidSnapshot, read_only = true: use exactly the given
2091 * snapshot.
2092 *
2093 * snapshot != InvalidSnapshot, read_only = false: use the given snapshot,
2094 * modified by advancing its command ID before each querytree.
2095 *
2096 * snapshot == InvalidSnapshot, read_only = true: use the entry-time
2097 * ActiveSnapshot, if any (if there isn't one, we run with no snapshot).
2098 *
2099 * snapshot == InvalidSnapshot, read_only = false: take a full new
2100 * snapshot for each user command, and advance its command ID before each
2101 * querytree within the command.
2102 *
2103 * In the first two cases, we can just push the snap onto the stack once
2104 * for the whole plan list.
2105 *
2106 * Note that snapshot != InvalidSnapshot implies an atomic execution
2107 * context.
2108 */
2109 if (snapshot != InvalidSnapshot)
2110 {
2111 Assert(!allow_nonatomic);
2112 if (read_only)
2113 {
2114 PushActiveSnapshot(snapshot);
2115 pushed_active_snap = true;
2116 }
2117 else
2118 {
2119 /* Make sure we have a private copy of the snapshot to modify */
2120 PushCopiedSnapshot(snapshot);
2121 pushed_active_snap = true;
2122 }
2123 }
2124
2125 foreach(lc1, plan->plancache_list)
2126 {
2127 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc1);
2128 List *stmt_list;
2129 ListCell *lc2;
2130
2131 spierrcontext.arg = (void *) plansource->query_string;
2132
2133 /*
2134 * If this is a one-shot plan, we still need to do parse analysis.
2135 */
2136 if (plan->oneshot)
2137 {
2138 RawStmt *parsetree = plansource->raw_parse_tree;
2139 const char *src = plansource->query_string;
2140 List *stmt_list;
2141
2142 /*
2143 * Parameter datatypes are driven by parserSetup hook if provided,
2144 * otherwise we use the fixed parameter list.
2145 */
2146 if (parsetree == NULL)
2147 stmt_list = NIL;
2148 else if (plan->parserSetup != NULL)
2149 {
2150 Assert(plan->nargs == 0);
2151 stmt_list = pg_analyze_and_rewrite_params(parsetree,
2152 src,
2153 plan->parserSetup,
2154 plan->parserSetupArg,
2155 _SPI_current->queryEnv);
2156 }
2157 else
2158 {
2159 stmt_list = pg_analyze_and_rewrite(parsetree,
2160 src,
2161 plan->argtypes,
2162 plan->nargs,
2163 _SPI_current->queryEnv);
2164 }
2165
2166 /* Finish filling in the CachedPlanSource */
2167 CompleteCachedPlan(plansource,
2168 stmt_list,
2169 NULL,
2170 plan->argtypes,
2171 plan->nargs,
2172 plan->parserSetup,
2173 plan->parserSetupArg,
2174 plan->cursor_options,
2175 false); /* not fixed result */
2176 }
2177
2178 /*
2179 * Replan if needed, and increment plan refcount. If it's a saved
2180 * plan, the refcount must be backed by the CurrentResourceOwner.
2181 */
2182 cplan = GetCachedPlan(plansource, paramLI, plan->saved, _SPI_current->queryEnv);
2183 stmt_list = cplan->stmt_list;
2184
2185 /*
2186 * If we weren't given a specific snapshot to use, and the statement
2187 * list requires a snapshot, set that up.
2188 */
2189 if (snapshot == InvalidSnapshot &&
2190 (list_length(stmt_list) > 1 ||
2191 (list_length(stmt_list) == 1 &&
2192 PlannedStmtRequiresSnapshot(linitial_node(PlannedStmt,
2193 stmt_list)))))
2194 {
2195 /*
2196 * First, ensure there's a Portal-level snapshot. This back-fills
2197 * the snapshot stack in case the previous operation was a COMMIT
2198 * or ROLLBACK inside a procedure or DO block. (We can't put back
2199 * the Portal snapshot any sooner, or we'd break cases like doing
2200 * SET or LOCK just after COMMIT.) It's enough to check once per
2201 * statement list, since COMMIT/ROLLBACK/CALL/DO can't appear
2202 * within a multi-statement list.
2203 */
2204 EnsurePortalSnapshotExists();
2205
2206 /*
2207 * In the default non-read-only case, get a new per-statement-list
2208 * snapshot, replacing any that we pushed in a previous cycle.
2209 * Skip it when doing non-atomic execution, though (we rely
2210 * entirely on the Portal snapshot in that case).
2211 */
2212 if (!read_only && !allow_nonatomic)
2213 {
2214 if (pushed_active_snap)
2215 PopActiveSnapshot();
2216 PushActiveSnapshot(GetTransactionSnapshot());
2217 pushed_active_snap = true;
2218 }
2219 }
2220
2221 foreach(lc2, stmt_list)
2222 {
2223 PlannedStmt *stmt = lfirst_node(PlannedStmt, lc2);
2224 bool canSetTag = stmt->canSetTag;
2225 DestReceiver *dest;
2226
2227 _SPI_current->processed = 0;
2228 _SPI_current->lastoid = InvalidOid;
2229 _SPI_current->tuptable = NULL;
2230
2231 /* Check for unsupported cases. */
2232 if (stmt->utilityStmt)
2233 {
2234 if (IsA(stmt->utilityStmt, CopyStmt))
2235 {
2236 CopyStmt *cstmt = (CopyStmt *) stmt->utilityStmt;
2237
2238 if (cstmt->filename == NULL)
2239 {
2240 my_res = SPI_ERROR_COPY;
2241 goto fail;
2242 }
2243 }
2244 else if (IsA(stmt->utilityStmt, TransactionStmt))
2245 {
2246 my_res = SPI_ERROR_TRANSACTION;
2247 goto fail;
2248 }
2249 }
2250
2251 if (read_only && !CommandIsReadOnly(stmt))
2252 ereport(ERROR,
2253 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2254 /* translator: %s is a SQL statement name */
2255 errmsg("%s is not allowed in a non-volatile function",
2256 CreateCommandTag((Node *) stmt))));
2257
2258 if (IsInParallelMode() && !CommandIsReadOnly(stmt))
2259 PreventCommandIfParallelMode(CreateCommandTag((Node *) stmt));
2260
2261 /*
2262 * If not read-only mode, advance the command counter before each
2263 * command and update the snapshot. (But skip it if the snapshot
2264 * isn't under our control.)
2265 */
2266 if (!read_only && pushed_active_snap)
2267 {
2268 CommandCounterIncrement();
2269 UpdateActiveSnapshotCommandId();
2270 }
2271
2272 dest = CreateDestReceiver(canSetTag ? DestSPI : DestNone);
2273
2274 if (stmt->utilityStmt == NULL)
2275 {
2276 QueryDesc *qdesc;
2277 Snapshot snap;
2278
2279 if (ActiveSnapshotSet())
2280 snap = GetActiveSnapshot();
2281 else
2282 snap = InvalidSnapshot;
2283
2284 qdesc = CreateQueryDesc(stmt,
2285 plansource->query_string,
2286 snap, crosscheck_snapshot,
2287 dest,
2288 paramLI, _SPI_current->queryEnv,
2289 0);
2290 res = _SPI_pquery(qdesc, fire_triggers,
2291 canSetTag ? tcount : 0);
2292 FreeQueryDesc(qdesc);
2293 }
2294 else
2295 {
2296 char completionTag[COMPLETION_TAG_BUFSIZE];
2297 ProcessUtilityContext context;
2298
2299 /*
2300 * If the SPI context is atomic, or we were not told to allow
2301 * nonatomic operations, tell ProcessUtility this is an atomic
2302 * execution context.
2303 */
2304 if (_SPI_current->atomic || !allow_nonatomic)
2305 context = PROCESS_UTILITY_QUERY;
2306 else
2307 context = PROCESS_UTILITY_QUERY_NONATOMIC;
2308
2309 ProcessUtility(stmt,
2310 plansource->query_string,
2311 context,
2312 paramLI,
2313 _SPI_current->queryEnv,
2314 dest,
2315 completionTag);
2316
2317 /* Update "processed" if stmt returned tuples */
2318 if (_SPI_current->tuptable)
2319 _SPI_current->processed = _SPI_current->tuptable->alloced -
2320 _SPI_current->tuptable->free;
2321
2322 res = SPI_OK_UTILITY;
2323
2324 /*
2325 * Some utility statements return a row count, even though the
2326 * tuples are not returned to the caller.
2327 */
2328 if (IsA(stmt->utilityStmt, CreateTableAsStmt))
2329 {
2330 CreateTableAsStmt *ctastmt = (CreateTableAsStmt *) stmt->utilityStmt;
2331
2332 if (strncmp(completionTag, "SELECT ", 7) == 0)
2333 _SPI_current->processed =
2334 pg_strtouint64(completionTag + 7, NULL, 10);
2335 else
2336 {
2337 /*
2338 * Must be an IF NOT EXISTS that did nothing, or a
2339 * CREATE ... WITH NO DATA.
2340 */
2341 Assert(ctastmt->if_not_exists ||
2342 ctastmt->into->skipData);
2343 _SPI_current->processed = 0;
2344 }
2345
2346 /*
2347 * For historical reasons, if CREATE TABLE AS was spelled
2348 * as SELECT INTO, return a special return code.
2349 */
2350 if (ctastmt->is_select_into)
2351 res = SPI_OK_SELINTO;
2352 }
2353 else if (IsA(stmt->utilityStmt, CopyStmt))
2354 {
2355 Assert(strncmp(completionTag, "COPY ", 5) == 0);
2356 _SPI_current->processed = pg_strtouint64(completionTag + 5,
2357 NULL, 10);
2358 }
2359 }
2360
2361 /*
2362 * The last canSetTag query sets the status values returned to the
2363 * caller. Be careful to free any tuptables not returned, to
2364 * avoid intratransaction memory leak.
2365 */
2366 if (canSetTag)
2367 {
2368 my_processed = _SPI_current->processed;
2369 my_lastoid = _SPI_current->lastoid;
2370 SPI_freetuptable(my_tuptable);
2371 my_tuptable = _SPI_current->tuptable;
2372 my_res = res;
2373 }
2374 else
2375 {
2376 SPI_freetuptable(_SPI_current->tuptable);
2377 _SPI_current->tuptable = NULL;
2378 }
2379 /* we know that the receiver doesn't need a destroy call */
2380 if (res < 0)
2381 {
2382 my_res = res;
2383 goto fail;
2384 }
2385 }
2386
2387 /* Done with this plan, so release refcount */
2388 ReleaseCachedPlan(cplan, plan->saved);
2389 cplan = NULL;
2390
2391 /*
2392 * If not read-only mode, advance the command counter after the last
2393 * command. This ensures that its effects are visible, in case it was
2394 * DDL that would affect the next CachedPlanSource.
2395 */
2396 if (!read_only)
2397 CommandCounterIncrement();
2398 }
2399
2400 fail:
2401
2402 /* Pop the snapshot off the stack if we pushed one */
2403 if (pushed_active_snap)
2404 PopActiveSnapshot();
2405
2406 /* We no longer need the cached plan refcount, if any */
2407 if (cplan)
2408 ReleaseCachedPlan(cplan, plan->saved);
2409
2410 /*
2411 * Pop the error context stack
2412 */
2413 error_context_stack = spierrcontext.previous;
2414
2415 /* Save results for caller */
2416 SPI_processed = my_processed;
2417 SPI_lastoid = my_lastoid;
2418 SPI_tuptable = my_tuptable;
2419
2420 /* tuptable now is caller's responsibility, not SPI's */
2421 _SPI_current->tuptable = NULL;
2422
2423 /*
2424 * If none of the queries had canSetTag, return SPI_OK_REWRITTEN. Prior to
2425 * 8.4, we used return the last query's result code, but not its auxiliary
2426 * results, but that's confusing.
2427 */
2428 if (my_res == 0)
2429 my_res = SPI_OK_REWRITTEN;
2430
2431 return my_res;
2432 }
2433
2434 /*
2435 * Convert arrays of query parameters to form wanted by planner and executor
2436 */
2437 static ParamListInfo
_SPI_convert_params(int nargs,Oid * argtypes,Datum * Values,const char * Nulls)2438 _SPI_convert_params(int nargs, Oid *argtypes,
2439 Datum *Values, const char *Nulls)
2440 {
2441 ParamListInfo paramLI;
2442
2443 if (nargs > 0)
2444 {
2445 int i;
2446
2447 paramLI = (ParamListInfo) palloc(offsetof(ParamListInfoData, params) +
2448 nargs * sizeof(ParamExternData));
2449 /* we have static list of params, so no hooks needed */
2450 paramLI->paramFetch = NULL;
2451 paramLI->paramFetchArg = NULL;
2452 paramLI->paramCompile = NULL;
2453 paramLI->paramCompileArg = NULL;
2454 paramLI->parserSetup = NULL;
2455 paramLI->parserSetupArg = NULL;
2456 paramLI->numParams = nargs;
2457
2458 for (i = 0; i < nargs; i++)
2459 {
2460 ParamExternData *prm = ¶mLI->params[i];
2461
2462 prm->value = Values[i];
2463 prm->isnull = (Nulls && Nulls[i] == 'n');
2464 prm->pflags = PARAM_FLAG_CONST;
2465 prm->ptype = argtypes[i];
2466 }
2467 }
2468 else
2469 paramLI = NULL;
2470 return paramLI;
2471 }
2472
2473 static int
_SPI_pquery(QueryDesc * queryDesc,bool fire_triggers,uint64 tcount)2474 _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount)
2475 {
2476 int operation = queryDesc->operation;
2477 int eflags;
2478 int res;
2479
2480 switch (operation)
2481 {
2482 case CMD_SELECT:
2483 if (queryDesc->dest->mydest != DestSPI)
2484 {
2485 /* Don't return SPI_OK_SELECT if we're discarding result */
2486 res = SPI_OK_UTILITY;
2487 }
2488 else
2489 res = SPI_OK_SELECT;
2490 break;
2491 case CMD_INSERT:
2492 if (queryDesc->plannedstmt->hasReturning)
2493 res = SPI_OK_INSERT_RETURNING;
2494 else
2495 res = SPI_OK_INSERT;
2496 break;
2497 case CMD_DELETE:
2498 if (queryDesc->plannedstmt->hasReturning)
2499 res = SPI_OK_DELETE_RETURNING;
2500 else
2501 res = SPI_OK_DELETE;
2502 break;
2503 case CMD_UPDATE:
2504 if (queryDesc->plannedstmt->hasReturning)
2505 res = SPI_OK_UPDATE_RETURNING;
2506 else
2507 res = SPI_OK_UPDATE;
2508 break;
2509 default:
2510 return SPI_ERROR_OPUNKNOWN;
2511 }
2512
2513 #ifdef SPI_EXECUTOR_STATS
2514 if (ShowExecutorStats)
2515 ResetUsage();
2516 #endif
2517
2518 /* Select execution options */
2519 if (fire_triggers)
2520 eflags = 0; /* default run-to-completion flags */
2521 else
2522 eflags = EXEC_FLAG_SKIP_TRIGGERS;
2523
2524 ExecutorStart(queryDesc, eflags);
2525
2526 ExecutorRun(queryDesc, ForwardScanDirection, tcount, true);
2527
2528 _SPI_current->processed = queryDesc->estate->es_processed;
2529 _SPI_current->lastoid = queryDesc->estate->es_lastoid;
2530
2531 if ((res == SPI_OK_SELECT || queryDesc->plannedstmt->hasReturning) &&
2532 queryDesc->dest->mydest == DestSPI)
2533 {
2534 if (_SPI_checktuples())
2535 elog(ERROR, "consistency check on SPI tuple count failed");
2536 }
2537
2538 ExecutorFinish(queryDesc);
2539 ExecutorEnd(queryDesc);
2540 /* FreeQueryDesc is done by the caller */
2541
2542 #ifdef SPI_EXECUTOR_STATS
2543 if (ShowExecutorStats)
2544 ShowUsage("SPI EXECUTOR STATS");
2545 #endif
2546
2547 return res;
2548 }
2549
2550 /*
2551 * _SPI_error_callback
2552 *
2553 * Add context information when a query invoked via SPI fails
2554 */
2555 static void
_SPI_error_callback(void * arg)2556 _SPI_error_callback(void *arg)
2557 {
2558 const char *query = (const char *) arg;
2559 int syntaxerrposition;
2560
2561 if (query == NULL) /* in case arg wasn't set yet */
2562 return;
2563
2564 /*
2565 * If there is a syntax error position, convert to internal syntax error;
2566 * otherwise treat the query as an item of context stack
2567 */
2568 syntaxerrposition = geterrposition();
2569 if (syntaxerrposition > 0)
2570 {
2571 errposition(0);
2572 internalerrposition(syntaxerrposition);
2573 internalerrquery(query);
2574 }
2575 else
2576 errcontext("SQL statement \"%s\"", query);
2577 }
2578
2579 /*
2580 * _SPI_cursor_operation()
2581 *
2582 * Do a FETCH or MOVE in a cursor
2583 */
2584 static void
_SPI_cursor_operation(Portal portal,FetchDirection direction,long count,DestReceiver * dest)2585 _SPI_cursor_operation(Portal portal, FetchDirection direction, long count,
2586 DestReceiver *dest)
2587 {
2588 uint64 nfetched;
2589
2590 /* Check that the portal is valid */
2591 if (!PortalIsValid(portal))
2592 elog(ERROR, "invalid portal in SPI cursor operation");
2593
2594 /* Push the SPI stack */
2595 if (_SPI_begin_call(true) < 0)
2596 elog(ERROR, "SPI cursor operation called while not connected");
2597
2598 /* Reset the SPI result (note we deliberately don't touch lastoid) */
2599 SPI_processed = 0;
2600 SPI_tuptable = NULL;
2601 _SPI_current->processed = 0;
2602 _SPI_current->tuptable = NULL;
2603
2604 /* Run the cursor */
2605 nfetched = PortalRunFetch(portal,
2606 direction,
2607 count,
2608 dest);
2609
2610 /*
2611 * Think not to combine this store with the preceding function call. If
2612 * the portal contains calls to functions that use SPI, then SPI_stack is
2613 * likely to move around while the portal runs. When control returns,
2614 * _SPI_current will point to the correct stack entry... but the pointer
2615 * may be different than it was beforehand. So we must be sure to re-fetch
2616 * the pointer after the function call completes.
2617 */
2618 _SPI_current->processed = nfetched;
2619
2620 if (dest->mydest == DestSPI && _SPI_checktuples())
2621 elog(ERROR, "consistency check on SPI tuple count failed");
2622
2623 /* Put the result into place for access by caller */
2624 SPI_processed = _SPI_current->processed;
2625 SPI_tuptable = _SPI_current->tuptable;
2626
2627 /* tuptable now is caller's responsibility, not SPI's */
2628 _SPI_current->tuptable = NULL;
2629
2630 /* Pop the SPI stack */
2631 _SPI_end_call(true);
2632 }
2633
2634
2635 static MemoryContext
_SPI_execmem(void)2636 _SPI_execmem(void)
2637 {
2638 return MemoryContextSwitchTo(_SPI_current->execCxt);
2639 }
2640
2641 static MemoryContext
_SPI_procmem(void)2642 _SPI_procmem(void)
2643 {
2644 return MemoryContextSwitchTo(_SPI_current->procCxt);
2645 }
2646
2647 /*
2648 * _SPI_begin_call: begin a SPI operation within a connected procedure
2649 *
2650 * use_exec is true if we intend to make use of the procedure's execCxt
2651 * during this SPI operation. We'll switch into that context, and arrange
2652 * for it to be cleaned up at _SPI_end_call or if an error occurs.
2653 */
2654 static int
_SPI_begin_call(bool use_exec)2655 _SPI_begin_call(bool use_exec)
2656 {
2657 if (_SPI_current == NULL)
2658 return SPI_ERROR_UNCONNECTED;
2659
2660 if (use_exec)
2661 {
2662 /* remember when the Executor operation started */
2663 _SPI_current->execSubid = GetCurrentSubTransactionId();
2664 /* switch to the Executor memory context */
2665 _SPI_execmem();
2666 }
2667
2668 return 0;
2669 }
2670
2671 /*
2672 * _SPI_end_call: end a SPI operation within a connected procedure
2673 *
2674 * use_exec must be the same as in the previous _SPI_begin_call
2675 *
2676 * Note: this currently has no failure return cases, so callers don't check
2677 */
2678 static int
_SPI_end_call(bool use_exec)2679 _SPI_end_call(bool use_exec)
2680 {
2681 if (use_exec)
2682 {
2683 /* switch to the procedure memory context */
2684 _SPI_procmem();
2685 /* mark Executor context no longer in use */
2686 _SPI_current->execSubid = InvalidSubTransactionId;
2687 /* and free Executor memory */
2688 MemoryContextResetAndDeleteChildren(_SPI_current->execCxt);
2689 }
2690
2691 return 0;
2692 }
2693
2694 static bool
_SPI_checktuples(void)2695 _SPI_checktuples(void)
2696 {
2697 uint64 processed = _SPI_current->processed;
2698 SPITupleTable *tuptable = _SPI_current->tuptable;
2699 bool failed = false;
2700
2701 if (tuptable == NULL) /* spi_dest_startup was not called */
2702 failed = true;
2703 else if (processed != (tuptable->alloced - tuptable->free))
2704 failed = true;
2705
2706 return failed;
2707 }
2708
2709 /*
2710 * Convert a "temporary" SPIPlan into an "unsaved" plan.
2711 *
2712 * The passed _SPI_plan struct is on the stack, and all its subsidiary data
2713 * is in or under the current SPI executor context. Copy the plan into the
2714 * SPI procedure context so it will survive _SPI_end_call(). To minimize
2715 * data copying, this destructively modifies the input plan, by taking the
2716 * plancache entries away from it and reparenting them to the new SPIPlan.
2717 */
2718 static SPIPlanPtr
_SPI_make_plan_non_temp(SPIPlanPtr plan)2719 _SPI_make_plan_non_temp(SPIPlanPtr plan)
2720 {
2721 SPIPlanPtr newplan;
2722 MemoryContext parentcxt = _SPI_current->procCxt;
2723 MemoryContext plancxt;
2724 MemoryContext oldcxt;
2725 ListCell *lc;
2726
2727 /* Assert the input is a temporary SPIPlan */
2728 Assert(plan->magic == _SPI_PLAN_MAGIC);
2729 Assert(plan->plancxt == NULL);
2730 /* One-shot plans can't be saved */
2731 Assert(!plan->oneshot);
2732
2733 /*
2734 * Create a memory context for the plan, underneath the procedure context.
2735 * We don't expect the plan to be very large.
2736 */
2737 plancxt = AllocSetContextCreate(parentcxt,
2738 "SPI Plan",
2739 ALLOCSET_SMALL_SIZES);
2740 oldcxt = MemoryContextSwitchTo(plancxt);
2741
2742 /* Copy the SPI_plan struct and subsidiary data into the new context */
2743 newplan = (SPIPlanPtr) palloc0(sizeof(_SPI_plan));
2744 newplan->magic = _SPI_PLAN_MAGIC;
2745 newplan->plancxt = plancxt;
2746 newplan->cursor_options = plan->cursor_options;
2747 newplan->nargs = plan->nargs;
2748 if (plan->nargs > 0)
2749 {
2750 newplan->argtypes = (Oid *) palloc(plan->nargs * sizeof(Oid));
2751 memcpy(newplan->argtypes, plan->argtypes, plan->nargs * sizeof(Oid));
2752 }
2753 else
2754 newplan->argtypes = NULL;
2755 newplan->parserSetup = plan->parserSetup;
2756 newplan->parserSetupArg = plan->parserSetupArg;
2757
2758 /*
2759 * Reparent all the CachedPlanSources into the procedure context. In
2760 * theory this could fail partway through due to the pallocs, but we don't
2761 * care too much since both the procedure context and the executor context
2762 * would go away on error.
2763 */
2764 foreach(lc, plan->plancache_list)
2765 {
2766 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
2767
2768 CachedPlanSetParentContext(plansource, parentcxt);
2769
2770 /* Build new list, with list cells in plancxt */
2771 newplan->plancache_list = lappend(newplan->plancache_list, plansource);
2772 }
2773
2774 MemoryContextSwitchTo(oldcxt);
2775
2776 /* For safety, unlink the CachedPlanSources from the temporary plan */
2777 plan->plancache_list = NIL;
2778
2779 return newplan;
2780 }
2781
2782 /*
2783 * Make a "saved" copy of the given plan.
2784 */
2785 static SPIPlanPtr
_SPI_save_plan(SPIPlanPtr plan)2786 _SPI_save_plan(SPIPlanPtr plan)
2787 {
2788 SPIPlanPtr newplan;
2789 MemoryContext plancxt;
2790 MemoryContext oldcxt;
2791 ListCell *lc;
2792
2793 /* One-shot plans can't be saved */
2794 Assert(!plan->oneshot);
2795
2796 /*
2797 * Create a memory context for the plan. We don't expect the plan to be
2798 * very large, so use smaller-than-default alloc parameters. It's a
2799 * transient context until we finish copying everything.
2800 */
2801 plancxt = AllocSetContextCreate(CurrentMemoryContext,
2802 "SPI Plan",
2803 ALLOCSET_SMALL_SIZES);
2804 oldcxt = MemoryContextSwitchTo(plancxt);
2805
2806 /* Copy the SPI plan into its own context */
2807 newplan = (SPIPlanPtr) palloc0(sizeof(_SPI_plan));
2808 newplan->magic = _SPI_PLAN_MAGIC;
2809 newplan->plancxt = plancxt;
2810 newplan->cursor_options = plan->cursor_options;
2811 newplan->nargs = plan->nargs;
2812 if (plan->nargs > 0)
2813 {
2814 newplan->argtypes = (Oid *) palloc(plan->nargs * sizeof(Oid));
2815 memcpy(newplan->argtypes, plan->argtypes, plan->nargs * sizeof(Oid));
2816 }
2817 else
2818 newplan->argtypes = NULL;
2819 newplan->parserSetup = plan->parserSetup;
2820 newplan->parserSetupArg = plan->parserSetupArg;
2821
2822 /* Copy all the plancache entries */
2823 foreach(lc, plan->plancache_list)
2824 {
2825 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
2826 CachedPlanSource *newsource;
2827
2828 newsource = CopyCachedPlan(plansource);
2829 newplan->plancache_list = lappend(newplan->plancache_list, newsource);
2830 }
2831
2832 MemoryContextSwitchTo(oldcxt);
2833
2834 /*
2835 * Mark it saved, reparent it under CacheMemoryContext, and mark all the
2836 * component CachedPlanSources as saved. This sequence cannot fail
2837 * partway through, so there's no risk of long-term memory leakage.
2838 */
2839 newplan->saved = true;
2840 MemoryContextSetParent(newplan->plancxt, CacheMemoryContext);
2841
2842 foreach(lc, newplan->plancache_list)
2843 {
2844 CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
2845
2846 SaveCachedPlan(plansource);
2847 }
2848
2849 return newplan;
2850 }
2851
2852 /*
2853 * Internal lookup of ephemeral named relation by name.
2854 */
2855 static EphemeralNamedRelation
_SPI_find_ENR_by_name(const char * name)2856 _SPI_find_ENR_by_name(const char *name)
2857 {
2858 /* internal static function; any error is bug in SPI itself */
2859 Assert(name != NULL);
2860
2861 /* fast exit if no tuplestores have been added */
2862 if (_SPI_current->queryEnv == NULL)
2863 return NULL;
2864
2865 return get_ENR(_SPI_current->queryEnv, name);
2866 }
2867
2868 /*
2869 * Register an ephemeral named relation for use by the planner and executor on
2870 * subsequent calls using this SPI connection.
2871 */
2872 int
SPI_register_relation(EphemeralNamedRelation enr)2873 SPI_register_relation(EphemeralNamedRelation enr)
2874 {
2875 EphemeralNamedRelation match;
2876 int res;
2877
2878 if (enr == NULL || enr->md.name == NULL)
2879 return SPI_ERROR_ARGUMENT;
2880
2881 res = _SPI_begin_call(false); /* keep current memory context */
2882 if (res < 0)
2883 return res;
2884
2885 match = _SPI_find_ENR_by_name(enr->md.name);
2886 if (match)
2887 res = SPI_ERROR_REL_DUPLICATE;
2888 else
2889 {
2890 if (_SPI_current->queryEnv == NULL)
2891 _SPI_current->queryEnv = create_queryEnv();
2892
2893 register_ENR(_SPI_current->queryEnv, enr);
2894 res = SPI_OK_REL_REGISTER;
2895 }
2896
2897 _SPI_end_call(false);
2898
2899 return res;
2900 }
2901
2902 /*
2903 * Unregister an ephemeral named relation by name. This will probably be a
2904 * rarely used function, since SPI_finish will clear it automatically.
2905 */
2906 int
SPI_unregister_relation(const char * name)2907 SPI_unregister_relation(const char *name)
2908 {
2909 EphemeralNamedRelation match;
2910 int res;
2911
2912 if (name == NULL)
2913 return SPI_ERROR_ARGUMENT;
2914
2915 res = _SPI_begin_call(false); /* keep current memory context */
2916 if (res < 0)
2917 return res;
2918
2919 match = _SPI_find_ENR_by_name(name);
2920 if (match)
2921 {
2922 unregister_ENR(_SPI_current->queryEnv, match->md.name);
2923 res = SPI_OK_REL_UNREGISTER;
2924 }
2925 else
2926 res = SPI_ERROR_REL_NOT_FOUND;
2927
2928 _SPI_end_call(false);
2929
2930 return res;
2931 }
2932
2933 /*
2934 * Register the transient relations from 'tdata' using this SPI connection.
2935 * This should be called by PL implementations' trigger handlers after
2936 * connecting, in order to make transition tables visible to any queries run
2937 * in this connection.
2938 */
2939 int
SPI_register_trigger_data(TriggerData * tdata)2940 SPI_register_trigger_data(TriggerData *tdata)
2941 {
2942 if (tdata == NULL)
2943 return SPI_ERROR_ARGUMENT;
2944
2945 if (tdata->tg_newtable)
2946 {
2947 EphemeralNamedRelation enr =
2948 palloc(sizeof(EphemeralNamedRelationData));
2949 int rc;
2950
2951 enr->md.name = tdata->tg_trigger->tgnewtable;
2952 enr->md.reliddesc = tdata->tg_relation->rd_id;
2953 enr->md.tupdesc = NULL;
2954 enr->md.enrtype = ENR_NAMED_TUPLESTORE;
2955 enr->md.enrtuples = tuplestore_tuple_count(tdata->tg_newtable);
2956 enr->reldata = tdata->tg_newtable;
2957 rc = SPI_register_relation(enr);
2958 if (rc != SPI_OK_REL_REGISTER)
2959 return rc;
2960 }
2961
2962 if (tdata->tg_oldtable)
2963 {
2964 EphemeralNamedRelation enr =
2965 palloc(sizeof(EphemeralNamedRelationData));
2966 int rc;
2967
2968 enr->md.name = tdata->tg_trigger->tgoldtable;
2969 enr->md.reliddesc = tdata->tg_relation->rd_id;
2970 enr->md.tupdesc = NULL;
2971 enr->md.enrtype = ENR_NAMED_TUPLESTORE;
2972 enr->md.enrtuples = tuplestore_tuple_count(tdata->tg_oldtable);
2973 enr->reldata = tdata->tg_oldtable;
2974 rc = SPI_register_relation(enr);
2975 if (rc != SPI_OK_REL_REGISTER)
2976 return rc;
2977 }
2978
2979 return SPI_OK_TD_REGISTER;
2980 }
2981