1 /*-------------------------------------------------------------------------
2 *
3 * nodeLockRows.c
4 * Routines to handle FOR UPDATE/FOR SHARE row locking
5 *
6 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/executor/nodeLockRows.c
12 *
13 *-------------------------------------------------------------------------
14 */
15 /*
16 * INTERFACE ROUTINES
17 * ExecLockRows - fetch locked rows
18 * ExecInitLockRows - initialize node and subnodes..
19 * ExecEndLockRows - shutdown node and subnodes
20 */
21
22 #include "postgres.h"
23
24 #include "access/htup_details.h"
25 #include "access/xact.h"
26 #include "executor/executor.h"
27 #include "executor/nodeLockRows.h"
28 #include "foreign/fdwapi.h"
29 #include "miscadmin.h"
30 #include "storage/bufmgr.h"
31 #include "utils/rel.h"
32 #include "utils/tqual.h"
33
34
35 /* ----------------------------------------------------------------
36 * ExecLockRows
37 * ----------------------------------------------------------------
38 */
39 static TupleTableSlot * /* return: a tuple or NULL */
ExecLockRows(PlanState * pstate)40 ExecLockRows(PlanState *pstate)
41 {
42 LockRowsState *node = castNode(LockRowsState, pstate);
43 TupleTableSlot *slot;
44 EState *estate;
45 PlanState *outerPlan;
46 bool epq_needed;
47 ListCell *lc;
48
49 CHECK_FOR_INTERRUPTS();
50
51 /*
52 * get information from the node
53 */
54 estate = node->ps.state;
55 outerPlan = outerPlanState(node);
56
57 /*
58 * Get next tuple from subplan, if any.
59 */
60 lnext:
61 slot = ExecProcNode(outerPlan);
62
63 if (TupIsNull(slot))
64 return NULL;
65
66 /* We don't need EvalPlanQual unless we get updated tuple version(s) */
67 epq_needed = false;
68
69 /*
70 * Attempt to lock the source tuple(s). (Note we only have locking
71 * rowmarks in lr_arowMarks.)
72 */
73 foreach(lc, node->lr_arowMarks)
74 {
75 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(lc);
76 ExecRowMark *erm = aerm->rowmark;
77 HeapTuple *testTuple;
78 Datum datum;
79 bool isNull;
80 HeapTupleData tuple;
81 Buffer buffer;
82 HeapUpdateFailureData hufd;
83 LockTupleMode lockmode;
84 HTSU_Result test;
85 HeapTuple copyTuple;
86
87 /* clear any leftover test tuple for this rel */
88 testTuple = &(node->lr_curtuples[erm->rti - 1]);
89 if (*testTuple != NULL)
90 heap_freetuple(*testTuple);
91 *testTuple = NULL;
92
93 /* if child rel, must check whether it produced this row */
94 if (erm->rti != erm->prti)
95 {
96 Oid tableoid;
97
98 datum = ExecGetJunkAttribute(slot,
99 aerm->toidAttNo,
100 &isNull);
101 /* shouldn't ever get a null result... */
102 if (isNull)
103 elog(ERROR, "tableoid is NULL");
104 tableoid = DatumGetObjectId(datum);
105
106 Assert(OidIsValid(erm->relid));
107 if (tableoid != erm->relid)
108 {
109 /* this child is inactive right now */
110 erm->ermActive = false;
111 ItemPointerSetInvalid(&(erm->curCtid));
112 continue;
113 }
114 }
115 erm->ermActive = true;
116
117 /* fetch the tuple's ctid */
118 datum = ExecGetJunkAttribute(slot,
119 aerm->ctidAttNo,
120 &isNull);
121 /* shouldn't ever get a null result... */
122 if (isNull)
123 elog(ERROR, "ctid is NULL");
124
125 /* requests for foreign tables must be passed to their FDW */
126 if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
127 {
128 FdwRoutine *fdwroutine;
129 bool updated = false;
130
131 fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
132 /* this should have been checked already, but let's be safe */
133 if (fdwroutine->RefetchForeignRow == NULL)
134 ereport(ERROR,
135 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
136 errmsg("cannot lock rows in foreign table \"%s\"",
137 RelationGetRelationName(erm->relation))));
138 copyTuple = fdwroutine->RefetchForeignRow(estate,
139 erm,
140 datum,
141 &updated);
142 if (copyTuple == NULL)
143 {
144 /* couldn't get the lock, so skip this row */
145 goto lnext;
146 }
147
148 /* save locked tuple for possible EvalPlanQual testing below */
149 *testTuple = copyTuple;
150
151 /*
152 * if FDW says tuple was updated before getting locked, we need to
153 * perform EPQ testing to see if quals are still satisfied
154 */
155 if (updated)
156 epq_needed = true;
157
158 continue;
159 }
160
161 /* okay, try to lock the tuple */
162 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
163 switch (erm->markType)
164 {
165 case ROW_MARK_EXCLUSIVE:
166 lockmode = LockTupleExclusive;
167 break;
168 case ROW_MARK_NOKEYEXCLUSIVE:
169 lockmode = LockTupleNoKeyExclusive;
170 break;
171 case ROW_MARK_SHARE:
172 lockmode = LockTupleShare;
173 break;
174 case ROW_MARK_KEYSHARE:
175 lockmode = LockTupleKeyShare;
176 break;
177 default:
178 elog(ERROR, "unsupported rowmark type");
179 lockmode = LockTupleNoKeyExclusive; /* keep compiler quiet */
180 break;
181 }
182
183 test = heap_lock_tuple(erm->relation, &tuple,
184 estate->es_output_cid,
185 lockmode, erm->waitPolicy, true,
186 &buffer, &hufd);
187 ReleaseBuffer(buffer);
188 switch (test)
189 {
190 case HeapTupleWouldBlock:
191 /* couldn't lock tuple in SKIP LOCKED mode */
192 goto lnext;
193
194 case HeapTupleSelfUpdated:
195
196 /*
197 * The target tuple was already updated or deleted by the
198 * current command, or by a later command in the current
199 * transaction. We *must* ignore the tuple in the former
200 * case, so as to avoid the "Halloween problem" of repeated
201 * update attempts. In the latter case it might be sensible
202 * to fetch the updated tuple instead, but doing so would
203 * require changing heap_update and heap_delete to not
204 * complain about updating "invisible" tuples, which seems
205 * pretty scary (heap_lock_tuple will not complain, but few
206 * callers expect HeapTupleInvisible, and we're not one of
207 * them). So for now, treat the tuple as deleted and do not
208 * process.
209 */
210 goto lnext;
211
212 case HeapTupleMayBeUpdated:
213 /* got the lock successfully */
214 break;
215
216 case HeapTupleUpdated:
217 if (IsolationUsesXactSnapshot())
218 ereport(ERROR,
219 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
220 errmsg("could not serialize access due to concurrent update")));
221 if (ItemPointerEquals(&hufd.ctid, &tuple.t_self))
222 {
223 /* Tuple was deleted, so don't return it */
224 goto lnext;
225 }
226
227 /* updated, so fetch and lock the updated version */
228 copyTuple = EvalPlanQualFetch(estate, erm->relation,
229 lockmode, erm->waitPolicy,
230 &hufd.ctid, hufd.xmax);
231
232 if (copyTuple == NULL)
233 {
234 /*
235 * Tuple was deleted; or it's locked and we're under SKIP
236 * LOCKED policy, so don't return it
237 */
238 goto lnext;
239 }
240 /* remember the actually locked tuple's TID */
241 tuple.t_self = copyTuple->t_self;
242
243 /* Save locked tuple for EvalPlanQual testing below */
244 *testTuple = copyTuple;
245
246 /* Remember we need to do EPQ testing */
247 epq_needed = true;
248
249 /* Continue loop until we have all target tuples */
250 break;
251
252 case HeapTupleInvisible:
253 elog(ERROR, "attempted to lock invisible tuple");
254
255 default:
256 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
257 test);
258 }
259
260 /* Remember locked tuple's TID for EPQ testing and WHERE CURRENT OF */
261 erm->curCtid = tuple.t_self;
262 }
263
264 /*
265 * If we need to do EvalPlanQual testing, do so.
266 */
267 if (epq_needed)
268 {
269 /* Initialize EPQ machinery */
270 EvalPlanQualBegin(&node->lr_epqstate, estate);
271
272 /*
273 * Transfer any already-fetched tuples into the EPQ state, and fetch a
274 * copy of any rows that were successfully locked without any update
275 * having occurred. (We do this in a separate pass so as to avoid
276 * overhead in the common case where there are no concurrent updates.)
277 * Make sure any inactive child rels have NULL test tuples in EPQ.
278 */
279 foreach(lc, node->lr_arowMarks)
280 {
281 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(lc);
282 ExecRowMark *erm = aerm->rowmark;
283 HeapTupleData tuple;
284 Buffer buffer;
285
286 /* skip non-active child tables, but clear their test tuples */
287 if (!erm->ermActive)
288 {
289 Assert(erm->rti != erm->prti); /* check it's child table */
290 EvalPlanQualSetTuple(&node->lr_epqstate, erm->rti, NULL);
291 continue;
292 }
293
294 /* was tuple updated and fetched above? */
295 if (node->lr_curtuples[erm->rti - 1] != NULL)
296 {
297 /* yes, so set it as the EPQ test tuple for this rel */
298 EvalPlanQualSetTuple(&node->lr_epqstate,
299 erm->rti,
300 node->lr_curtuples[erm->rti - 1]);
301 /* freeing this tuple is now the responsibility of EPQ */
302 node->lr_curtuples[erm->rti - 1] = NULL;
303 continue;
304 }
305
306 /* foreign tables should have been fetched above */
307 Assert(erm->relation->rd_rel->relkind != RELKIND_FOREIGN_TABLE);
308 Assert(ItemPointerIsValid(&(erm->curCtid)));
309
310 /* okay, fetch the tuple */
311 tuple.t_self = erm->curCtid;
312 if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
313 false, NULL))
314 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
315
316 /* successful, copy and store tuple */
317 EvalPlanQualSetTuple(&node->lr_epqstate, erm->rti,
318 heap_copytuple(&tuple));
319 ReleaseBuffer(buffer);
320 }
321
322 /*
323 * Now fetch any non-locked source rows --- the EPQ logic knows how to
324 * do that.
325 */
326 EvalPlanQualSetSlot(&node->lr_epqstate, slot);
327 EvalPlanQualFetchRowMarks(&node->lr_epqstate);
328
329 /*
330 * And finally we can re-evaluate the tuple.
331 */
332 slot = EvalPlanQualNext(&node->lr_epqstate);
333 if (TupIsNull(slot))
334 {
335 /* Updated tuple fails qual, so ignore it and go on */
336 goto lnext;
337 }
338 }
339
340 /* Got all locks, so return the current tuple */
341 return slot;
342 }
343
344 /* ----------------------------------------------------------------
345 * ExecInitLockRows
346 *
347 * This initializes the LockRows node state structures and
348 * the node's subplan.
349 * ----------------------------------------------------------------
350 */
351 LockRowsState *
ExecInitLockRows(LockRows * node,EState * estate,int eflags)352 ExecInitLockRows(LockRows *node, EState *estate, int eflags)
353 {
354 LockRowsState *lrstate;
355 Plan *outerPlan = outerPlan(node);
356 List *epq_arowmarks;
357 ListCell *lc;
358
359 /* check for unsupported flags */
360 Assert(!(eflags & EXEC_FLAG_MARK));
361
362 /*
363 * create state structure
364 */
365 lrstate = makeNode(LockRowsState);
366 lrstate->ps.plan = (Plan *) node;
367 lrstate->ps.state = estate;
368 lrstate->ps.ExecProcNode = ExecLockRows;
369
370 /*
371 * Miscellaneous initialization
372 *
373 * LockRows nodes never call ExecQual or ExecProject.
374 */
375
376 /*
377 * Tuple table initialization (XXX not actually used...)
378 */
379 ExecInitResultTupleSlot(estate, &lrstate->ps);
380
381 /*
382 * then initialize outer plan
383 */
384 outerPlanState(lrstate) = ExecInitNode(outerPlan, estate, eflags);
385
386 /*
387 * LockRows nodes do no projections, so initialize projection info for
388 * this node appropriately
389 */
390 ExecAssignResultTypeFromTL(&lrstate->ps);
391 lrstate->ps.ps_ProjInfo = NULL;
392
393 /*
394 * Create workspace in which we can remember per-RTE locked tuples
395 */
396 lrstate->lr_ntables = list_length(estate->es_range_table);
397 lrstate->lr_curtuples = (HeapTuple *)
398 palloc0(lrstate->lr_ntables * sizeof(HeapTuple));
399
400 /*
401 * Locate the ExecRowMark(s) that this node is responsible for, and
402 * construct ExecAuxRowMarks for them. (InitPlan should already have
403 * built the global list of ExecRowMarks.)
404 */
405 lrstate->lr_arowMarks = NIL;
406 epq_arowmarks = NIL;
407 foreach(lc, node->rowMarks)
408 {
409 PlanRowMark *rc = lfirst_node(PlanRowMark, lc);
410 ExecRowMark *erm;
411 ExecAuxRowMark *aerm;
412
413 /* ignore "parent" rowmarks; they are irrelevant at runtime */
414 if (rc->isParent)
415 continue;
416
417 /* safety check on size of lr_curtuples array */
418 Assert(rc->rti > 0 && rc->rti <= lrstate->lr_ntables);
419
420 /* find ExecRowMark and build ExecAuxRowMark */
421 erm = ExecFindRowMark(estate, rc->rti, false);
422 aerm = ExecBuildAuxRowMark(erm, outerPlan->targetlist);
423
424 /*
425 * Only locking rowmarks go into our own list. Non-locking marks are
426 * passed off to the EvalPlanQual machinery. This is because we don't
427 * want to bother fetching non-locked rows unless we actually have to
428 * do an EPQ recheck.
429 */
430 if (RowMarkRequiresRowShareLock(erm->markType))
431 lrstate->lr_arowMarks = lappend(lrstate->lr_arowMarks, aerm);
432 else
433 epq_arowmarks = lappend(epq_arowmarks, aerm);
434 }
435
436 /* Now we have the info needed to set up EPQ state */
437 EvalPlanQualInit(&lrstate->lr_epqstate, estate,
438 outerPlan, epq_arowmarks, node->epqParam);
439
440 return lrstate;
441 }
442
443 /* ----------------------------------------------------------------
444 * ExecEndLockRows
445 *
446 * This shuts down the subplan and frees resources allocated
447 * to this node.
448 * ----------------------------------------------------------------
449 */
450 void
ExecEndLockRows(LockRowsState * node)451 ExecEndLockRows(LockRowsState *node)
452 {
453 EvalPlanQualEnd(&node->lr_epqstate);
454 ExecEndNode(outerPlanState(node));
455 }
456
457
458 void
ExecReScanLockRows(LockRowsState * node)459 ExecReScanLockRows(LockRowsState *node)
460 {
461 /*
462 * if chgParam of subnode is not null then plan will be re-scanned by
463 * first ExecProcNode.
464 */
465 if (node->ps.lefttree->chgParam == NULL)
466 ExecReScan(node->ps.lefttree);
467 }
468