1 /*-------------------------------------------------------------------------
2 *
3 * nodeLockRows.c
4 * Routines to handle FOR UPDATE/FOR SHARE row locking
5 *
6 * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/executor/nodeLockRows.c
12 *
13 *-------------------------------------------------------------------------
14 */
15 /*
16 * INTERFACE ROUTINES
17 * ExecLockRows - fetch locked rows
18 * ExecInitLockRows - initialize node and subnodes..
19 * ExecEndLockRows - shutdown node and subnodes
20 */
21
22 #include "postgres.h"
23
24 #include "access/htup_details.h"
25 #include "access/xact.h"
26 #include "executor/executor.h"
27 #include "executor/nodeLockRows.h"
28 #include "foreign/fdwapi.h"
29 #include "miscadmin.h"
30 #include "storage/bufmgr.h"
31 #include "utils/rel.h"
32 #include "utils/tqual.h"
33
34
35 /* ----------------------------------------------------------------
36 * ExecLockRows
37 * ----------------------------------------------------------------
38 */
39 static TupleTableSlot * /* return: a tuple or NULL */
ExecLockRows(PlanState * pstate)40 ExecLockRows(PlanState *pstate)
41 {
42 LockRowsState *node = castNode(LockRowsState, pstate);
43 TupleTableSlot *slot;
44 EState *estate;
45 PlanState *outerPlan;
46 bool epq_needed;
47 ListCell *lc;
48
49 CHECK_FOR_INTERRUPTS();
50
51 /*
52 * get information from the node
53 */
54 estate = node->ps.state;
55 outerPlan = outerPlanState(node);
56
57 /*
58 * Get next tuple from subplan, if any.
59 */
60 lnext:
61 slot = ExecProcNode(outerPlan);
62
63 if (TupIsNull(slot))
64 return NULL;
65
66 /* We don't need EvalPlanQual unless we get updated tuple version(s) */
67 epq_needed = false;
68
69 /*
70 * Attempt to lock the source tuple(s). (Note we only have locking
71 * rowmarks in lr_arowMarks.)
72 */
73 foreach(lc, node->lr_arowMarks)
74 {
75 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(lc);
76 ExecRowMark *erm = aerm->rowmark;
77 HeapTuple *testTuple;
78 Datum datum;
79 bool isNull;
80 HeapTupleData tuple;
81 Buffer buffer;
82 HeapUpdateFailureData hufd;
83 LockTupleMode lockmode;
84 HTSU_Result test;
85 HeapTuple copyTuple;
86
87 /* clear any leftover test tuple for this rel */
88 testTuple = &(node->lr_curtuples[erm->rti - 1]);
89 if (*testTuple != NULL)
90 heap_freetuple(*testTuple);
91 *testTuple = NULL;
92
93 /* if child rel, must check whether it produced this row */
94 if (erm->rti != erm->prti)
95 {
96 Oid tableoid;
97
98 datum = ExecGetJunkAttribute(slot,
99 aerm->toidAttNo,
100 &isNull);
101 /* shouldn't ever get a null result... */
102 if (isNull)
103 elog(ERROR, "tableoid is NULL");
104 tableoid = DatumGetObjectId(datum);
105
106 Assert(OidIsValid(erm->relid));
107 if (tableoid != erm->relid)
108 {
109 /* this child is inactive right now */
110 erm->ermActive = false;
111 ItemPointerSetInvalid(&(erm->curCtid));
112 continue;
113 }
114 }
115 erm->ermActive = true;
116
117 /* fetch the tuple's ctid */
118 datum = ExecGetJunkAttribute(slot,
119 aerm->ctidAttNo,
120 &isNull);
121 /* shouldn't ever get a null result... */
122 if (isNull)
123 elog(ERROR, "ctid is NULL");
124
125 /* requests for foreign tables must be passed to their FDW */
126 if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
127 {
128 FdwRoutine *fdwroutine;
129 bool updated = false;
130
131 fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
132 /* this should have been checked already, but let's be safe */
133 if (fdwroutine->RefetchForeignRow == NULL)
134 ereport(ERROR,
135 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
136 errmsg("cannot lock rows in foreign table \"%s\"",
137 RelationGetRelationName(erm->relation))));
138 copyTuple = fdwroutine->RefetchForeignRow(estate,
139 erm,
140 datum,
141 &updated);
142 if (copyTuple == NULL)
143 {
144 /* couldn't get the lock, so skip this row */
145 goto lnext;
146 }
147
148 /* save locked tuple for possible EvalPlanQual testing below */
149 *testTuple = copyTuple;
150
151 /*
152 * if FDW says tuple was updated before getting locked, we need to
153 * perform EPQ testing to see if quals are still satisfied
154 */
155 if (updated)
156 epq_needed = true;
157
158 continue;
159 }
160
161 /* okay, try to lock the tuple */
162 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
163 switch (erm->markType)
164 {
165 case ROW_MARK_EXCLUSIVE:
166 lockmode = LockTupleExclusive;
167 break;
168 case ROW_MARK_NOKEYEXCLUSIVE:
169 lockmode = LockTupleNoKeyExclusive;
170 break;
171 case ROW_MARK_SHARE:
172 lockmode = LockTupleShare;
173 break;
174 case ROW_MARK_KEYSHARE:
175 lockmode = LockTupleKeyShare;
176 break;
177 default:
178 elog(ERROR, "unsupported rowmark type");
179 lockmode = LockTupleNoKeyExclusive; /* keep compiler quiet */
180 break;
181 }
182
183 test = heap_lock_tuple(erm->relation, &tuple,
184 estate->es_output_cid,
185 lockmode, erm->waitPolicy, true,
186 &buffer, &hufd);
187 ReleaseBuffer(buffer);
188 switch (test)
189 {
190 case HeapTupleWouldBlock:
191 /* couldn't lock tuple in SKIP LOCKED mode */
192 goto lnext;
193
194 case HeapTupleSelfUpdated:
195
196 /*
197 * The target tuple was already updated or deleted by the
198 * current command, or by a later command in the current
199 * transaction. We *must* ignore the tuple in the former
200 * case, so as to avoid the "Halloween problem" of repeated
201 * update attempts. In the latter case it might be sensible
202 * to fetch the updated tuple instead, but doing so would
203 * require changing heap_update and heap_delete to not
204 * complain about updating "invisible" tuples, which seems
205 * pretty scary (heap_lock_tuple will not complain, but few
206 * callers expect HeapTupleInvisible, and we're not one of
207 * them). So for now, treat the tuple as deleted and do not
208 * process.
209 */
210 goto lnext;
211
212 case HeapTupleMayBeUpdated:
213 /* got the lock successfully */
214 break;
215
216 case HeapTupleUpdated:
217 if (IsolationUsesXactSnapshot())
218 ereport(ERROR,
219 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
220 errmsg("could not serialize access due to concurrent update")));
221 if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
222 ereport(ERROR,
223 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
224 errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
225
226 if (ItemPointerEquals(&hufd.ctid, &tuple.t_self))
227 {
228 /* Tuple was deleted, so don't return it */
229 goto lnext;
230 }
231
232 /* updated, so fetch and lock the updated version */
233 copyTuple = EvalPlanQualFetch(estate, erm->relation,
234 lockmode, erm->waitPolicy,
235 &hufd.ctid, hufd.xmax);
236
237 if (copyTuple == NULL)
238 {
239 /*
240 * Tuple was deleted; or it's locked and we're under SKIP
241 * LOCKED policy, so don't return it
242 */
243 goto lnext;
244 }
245 /* remember the actually locked tuple's TID */
246 tuple.t_self = copyTuple->t_self;
247
248 /* Save locked tuple for EvalPlanQual testing below */
249 *testTuple = copyTuple;
250
251 /* Remember we need to do EPQ testing */
252 epq_needed = true;
253
254 /* Continue loop until we have all target tuples */
255 break;
256
257 case HeapTupleInvisible:
258 elog(ERROR, "attempted to lock invisible tuple");
259 break;
260
261 default:
262 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
263 test);
264 }
265
266 /* Remember locked tuple's TID for EPQ testing and WHERE CURRENT OF */
267 erm->curCtid = tuple.t_self;
268 }
269
270 /*
271 * If we need to do EvalPlanQual testing, do so.
272 */
273 if (epq_needed)
274 {
275 /* Initialize EPQ machinery */
276 EvalPlanQualBegin(&node->lr_epqstate, estate);
277
278 /*
279 * Transfer any already-fetched tuples into the EPQ state, and fetch a
280 * copy of any rows that were successfully locked without any update
281 * having occurred. (We do this in a separate pass so as to avoid
282 * overhead in the common case where there are no concurrent updates.)
283 * Make sure any inactive child rels have NULL test tuples in EPQ.
284 */
285 foreach(lc, node->lr_arowMarks)
286 {
287 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(lc);
288 ExecRowMark *erm = aerm->rowmark;
289 HeapTupleData tuple;
290 Buffer buffer;
291
292 /* skip non-active child tables, but clear their test tuples */
293 if (!erm->ermActive)
294 {
295 Assert(erm->rti != erm->prti); /* check it's child table */
296 EvalPlanQualSetTuple(&node->lr_epqstate, erm->rti, NULL);
297 continue;
298 }
299
300 /* was tuple updated and fetched above? */
301 if (node->lr_curtuples[erm->rti - 1] != NULL)
302 {
303 /* yes, so set it as the EPQ test tuple for this rel */
304 EvalPlanQualSetTuple(&node->lr_epqstate,
305 erm->rti,
306 node->lr_curtuples[erm->rti - 1]);
307 /* freeing this tuple is now the responsibility of EPQ */
308 node->lr_curtuples[erm->rti - 1] = NULL;
309 continue;
310 }
311
312 /* foreign tables should have been fetched above */
313 Assert(erm->relation->rd_rel->relkind != RELKIND_FOREIGN_TABLE);
314 Assert(ItemPointerIsValid(&(erm->curCtid)));
315
316 /* okay, fetch the tuple */
317 tuple.t_self = erm->curCtid;
318 if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
319 false, NULL))
320 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
321
322 /* successful, copy and store tuple */
323 EvalPlanQualSetTuple(&node->lr_epqstate, erm->rti,
324 heap_copytuple(&tuple));
325 ReleaseBuffer(buffer);
326 }
327
328 /*
329 * Now fetch any non-locked source rows --- the EPQ logic knows how to
330 * do that.
331 */
332 EvalPlanQualSetSlot(&node->lr_epqstate, slot);
333 EvalPlanQualFetchRowMarks(&node->lr_epqstate);
334
335 /*
336 * And finally we can re-evaluate the tuple.
337 */
338 slot = EvalPlanQualNext(&node->lr_epqstate);
339 if (TupIsNull(slot))
340 {
341 /* Updated tuple fails qual, so ignore it and go on */
342 goto lnext;
343 }
344 }
345
346 /* Got all locks, so return the current tuple */
347 return slot;
348 }
349
350 /* ----------------------------------------------------------------
351 * ExecInitLockRows
352 *
353 * This initializes the LockRows node state structures and
354 * the node's subplan.
355 * ----------------------------------------------------------------
356 */
357 LockRowsState *
ExecInitLockRows(LockRows * node,EState * estate,int eflags)358 ExecInitLockRows(LockRows *node, EState *estate, int eflags)
359 {
360 LockRowsState *lrstate;
361 Plan *outerPlan = outerPlan(node);
362 List *epq_arowmarks;
363 ListCell *lc;
364
365 /* check for unsupported flags */
366 Assert(!(eflags & EXEC_FLAG_MARK));
367
368 /*
369 * create state structure
370 */
371 lrstate = makeNode(LockRowsState);
372 lrstate->ps.plan = (Plan *) node;
373 lrstate->ps.state = estate;
374 lrstate->ps.ExecProcNode = ExecLockRows;
375
376 /*
377 * Miscellaneous initialization
378 *
379 * LockRows nodes never call ExecQual or ExecProject, therefore no
380 * ExprContext is needed.
381 */
382
383 /*
384 * Tuple table initialization (XXX not actually used, but upper nodes
385 * access it to get this node's result tupledesc...)
386 */
387 ExecInitResultTupleSlotTL(estate, &lrstate->ps);
388
389 /*
390 * then initialize outer plan
391 */
392 outerPlanState(lrstate) = ExecInitNode(outerPlan, estate, eflags);
393
394 /*
395 * LockRows nodes do no projections, so initialize projection info for
396 * this node appropriately
397 */
398 lrstate->ps.ps_ProjInfo = NULL;
399
400 /*
401 * Create workspace in which we can remember per-RTE locked tuples
402 */
403 lrstate->lr_ntables = list_length(estate->es_range_table);
404 lrstate->lr_curtuples = (HeapTuple *)
405 palloc0(lrstate->lr_ntables * sizeof(HeapTuple));
406
407 /*
408 * Locate the ExecRowMark(s) that this node is responsible for, and
409 * construct ExecAuxRowMarks for them. (InitPlan should already have
410 * built the global list of ExecRowMarks.)
411 */
412 lrstate->lr_arowMarks = NIL;
413 epq_arowmarks = NIL;
414 foreach(lc, node->rowMarks)
415 {
416 PlanRowMark *rc = lfirst_node(PlanRowMark, lc);
417 ExecRowMark *erm;
418 ExecAuxRowMark *aerm;
419
420 /* ignore "parent" rowmarks; they are irrelevant at runtime */
421 if (rc->isParent)
422 continue;
423
424 /* safety check on size of lr_curtuples array */
425 Assert(rc->rti > 0 && rc->rti <= lrstate->lr_ntables);
426
427 /* find ExecRowMark and build ExecAuxRowMark */
428 erm = ExecFindRowMark(estate, rc->rti, false);
429 aerm = ExecBuildAuxRowMark(erm, outerPlan->targetlist);
430
431 /*
432 * Only locking rowmarks go into our own list. Non-locking marks are
433 * passed off to the EvalPlanQual machinery. This is because we don't
434 * want to bother fetching non-locked rows unless we actually have to
435 * do an EPQ recheck.
436 */
437 if (RowMarkRequiresRowShareLock(erm->markType))
438 lrstate->lr_arowMarks = lappend(lrstate->lr_arowMarks, aerm);
439 else
440 epq_arowmarks = lappend(epq_arowmarks, aerm);
441 }
442
443 /* Now we have the info needed to set up EPQ state */
444 EvalPlanQualInit(&lrstate->lr_epqstate, estate,
445 outerPlan, epq_arowmarks, node->epqParam);
446
447 return lrstate;
448 }
449
450 /* ----------------------------------------------------------------
451 * ExecEndLockRows
452 *
453 * This shuts down the subplan and frees resources allocated
454 * to this node.
455 * ----------------------------------------------------------------
456 */
457 void
ExecEndLockRows(LockRowsState * node)458 ExecEndLockRows(LockRowsState *node)
459 {
460 EvalPlanQualEnd(&node->lr_epqstate);
461 ExecEndNode(outerPlanState(node));
462 }
463
464
465 void
ExecReScanLockRows(LockRowsState * node)466 ExecReScanLockRows(LockRowsState *node)
467 {
468 /*
469 * if chgParam of subnode is not null then plan will be re-scanned by
470 * first ExecProcNode.
471 */
472 if (node->ps.lefttree->chgParam == NULL)
473 ExecReScan(node->ps.lefttree);
474 }
475