1 /*-------------------------------------------------------------------------
2  *
3  * nodeLockRows.c
4  *	  Routines to handle FOR UPDATE/FOR SHARE row locking
5  *
6  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *	  src/backend/executor/nodeLockRows.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * INTERFACE ROUTINES
17  *		ExecLockRows		- fetch locked rows
18  *		ExecInitLockRows	- initialize node and subnodes..
19  *		ExecEndLockRows		- shutdown node and subnodes
20  */
21 
22 #include "postgres.h"
23 
24 #include "access/htup_details.h"
25 #include "access/xact.h"
26 #include "executor/executor.h"
27 #include "executor/nodeLockRows.h"
28 #include "foreign/fdwapi.h"
29 #include "storage/bufmgr.h"
30 #include "utils/rel.h"
31 #include "utils/tqual.h"
32 
33 
34 /* ----------------------------------------------------------------
35  *		ExecLockRows
36  * ----------------------------------------------------------------
37  */
38 TupleTableSlot *				/* return: a tuple or NULL */
ExecLockRows(LockRowsState * node)39 ExecLockRows(LockRowsState *node)
40 {
41 	TupleTableSlot *slot;
42 	EState	   *estate;
43 	PlanState  *outerPlan;
44 	bool		epq_needed;
45 	ListCell   *lc;
46 
47 	/*
48 	 * get information from the node
49 	 */
50 	estate = node->ps.state;
51 	outerPlan = outerPlanState(node);
52 
53 	/*
54 	 * Get next tuple from subplan, if any.
55 	 */
56 lnext:
57 	slot = ExecProcNode(outerPlan);
58 
59 	if (TupIsNull(slot))
60 		return NULL;
61 
62 	/* We don't need EvalPlanQual unless we get updated tuple version(s) */
63 	epq_needed = false;
64 
65 	/*
66 	 * Attempt to lock the source tuple(s).  (Note we only have locking
67 	 * rowmarks in lr_arowMarks.)
68 	 */
69 	foreach(lc, node->lr_arowMarks)
70 	{
71 		ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(lc);
72 		ExecRowMark *erm = aerm->rowmark;
73 		HeapTuple  *testTuple;
74 		Datum		datum;
75 		bool		isNull;
76 		HeapTupleData tuple;
77 		Buffer		buffer;
78 		HeapUpdateFailureData hufd;
79 		LockTupleMode lockmode;
80 		HTSU_Result test;
81 		HeapTuple	copyTuple;
82 
83 		/* clear any leftover test tuple for this rel */
84 		testTuple = &(node->lr_curtuples[erm->rti - 1]);
85 		if (*testTuple != NULL)
86 			heap_freetuple(*testTuple);
87 		*testTuple = NULL;
88 
89 		/* if child rel, must check whether it produced this row */
90 		if (erm->rti != erm->prti)
91 		{
92 			Oid			tableoid;
93 
94 			datum = ExecGetJunkAttribute(slot,
95 										 aerm->toidAttNo,
96 										 &isNull);
97 			/* shouldn't ever get a null result... */
98 			if (isNull)
99 				elog(ERROR, "tableoid is NULL");
100 			tableoid = DatumGetObjectId(datum);
101 
102 			Assert(OidIsValid(erm->relid));
103 			if (tableoid != erm->relid)
104 			{
105 				/* this child is inactive right now */
106 				erm->ermActive = false;
107 				ItemPointerSetInvalid(&(erm->curCtid));
108 				continue;
109 			}
110 		}
111 		erm->ermActive = true;
112 
113 		/* fetch the tuple's ctid */
114 		datum = ExecGetJunkAttribute(slot,
115 									 aerm->ctidAttNo,
116 									 &isNull);
117 		/* shouldn't ever get a null result... */
118 		if (isNull)
119 			elog(ERROR, "ctid is NULL");
120 
121 		/* requests for foreign tables must be passed to their FDW */
122 		if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
123 		{
124 			FdwRoutine *fdwroutine;
125 			bool		updated = false;
126 
127 			fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
128 			/* this should have been checked already, but let's be safe */
129 			if (fdwroutine->RefetchForeignRow == NULL)
130 				ereport(ERROR,
131 						(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
132 						 errmsg("cannot lock rows in foreign table \"%s\"",
133 								RelationGetRelationName(erm->relation))));
134 			copyTuple = fdwroutine->RefetchForeignRow(estate,
135 													  erm,
136 													  datum,
137 													  &updated);
138 			if (copyTuple == NULL)
139 			{
140 				/* couldn't get the lock, so skip this row */
141 				goto lnext;
142 			}
143 
144 			/* save locked tuple for possible EvalPlanQual testing below */
145 			*testTuple = copyTuple;
146 
147 			/*
148 			 * if FDW says tuple was updated before getting locked, we need to
149 			 * perform EPQ testing to see if quals are still satisfied
150 			 */
151 			if (updated)
152 				epq_needed = true;
153 
154 			continue;
155 		}
156 
157 		/* okay, try to lock the tuple */
158 		tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
159 		switch (erm->markType)
160 		{
161 			case ROW_MARK_EXCLUSIVE:
162 				lockmode = LockTupleExclusive;
163 				break;
164 			case ROW_MARK_NOKEYEXCLUSIVE:
165 				lockmode = LockTupleNoKeyExclusive;
166 				break;
167 			case ROW_MARK_SHARE:
168 				lockmode = LockTupleShare;
169 				break;
170 			case ROW_MARK_KEYSHARE:
171 				lockmode = LockTupleKeyShare;
172 				break;
173 			default:
174 				elog(ERROR, "unsupported rowmark type");
175 				lockmode = LockTupleNoKeyExclusive;		/* keep compiler quiet */
176 				break;
177 		}
178 
179 		test = heap_lock_tuple(erm->relation, &tuple,
180 							   estate->es_output_cid,
181 							   lockmode, erm->waitPolicy, true,
182 							   &buffer, &hufd);
183 		ReleaseBuffer(buffer);
184 		switch (test)
185 		{
186 			case HeapTupleWouldBlock:
187 				/* couldn't lock tuple in SKIP LOCKED mode */
188 				goto lnext;
189 
190 			case HeapTupleSelfUpdated:
191 
192 				/*
193 				 * The target tuple was already updated or deleted by the
194 				 * current command, or by a later command in the current
195 				 * transaction.  We *must* ignore the tuple in the former
196 				 * case, so as to avoid the "Halloween problem" of repeated
197 				 * update attempts.  In the latter case it might be sensible
198 				 * to fetch the updated tuple instead, but doing so would
199 				 * require changing heap_update and heap_delete to not
200 				 * complain about updating "invisible" tuples, which seems
201 				 * pretty scary (heap_lock_tuple will not complain, but few
202 				 * callers expect HeapTupleInvisible, and we're not one of
203 				 * them).  So for now, treat the tuple as deleted and do not
204 				 * process.
205 				 */
206 				goto lnext;
207 
208 			case HeapTupleMayBeUpdated:
209 				/* got the lock successfully */
210 				break;
211 
212 			case HeapTupleUpdated:
213 				if (IsolationUsesXactSnapshot())
214 					ereport(ERROR,
215 							(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
216 							 errmsg("could not serialize access due to concurrent update")));
217 				if (ItemPointerEquals(&hufd.ctid, &tuple.t_self))
218 				{
219 					/* Tuple was deleted, so don't return it */
220 					goto lnext;
221 				}
222 
223 				/* updated, so fetch and lock the updated version */
224 				copyTuple = EvalPlanQualFetch(estate, erm->relation,
225 											  lockmode, erm->waitPolicy,
226 											  &hufd.ctid, hufd.xmax);
227 
228 				if (copyTuple == NULL)
229 				{
230 					/*
231 					 * Tuple was deleted; or it's locked and we're under SKIP
232 					 * LOCKED policy, so don't return it
233 					 */
234 					goto lnext;
235 				}
236 				/* remember the actually locked tuple's TID */
237 				tuple.t_self = copyTuple->t_self;
238 
239 				/* Save locked tuple for EvalPlanQual testing below */
240 				*testTuple = copyTuple;
241 
242 				/* Remember we need to do EPQ testing */
243 				epq_needed = true;
244 
245 				/* Continue loop until we have all target tuples */
246 				break;
247 
248 			case HeapTupleInvisible:
249 				elog(ERROR, "attempted to lock invisible tuple");
250 
251 			default:
252 				elog(ERROR, "unrecognized heap_lock_tuple status: %u",
253 					 test);
254 		}
255 
256 		/* Remember locked tuple's TID for EPQ testing and WHERE CURRENT OF */
257 		erm->curCtid = tuple.t_self;
258 	}
259 
260 	/*
261 	 * If we need to do EvalPlanQual testing, do so.
262 	 */
263 	if (epq_needed)
264 	{
265 		/* Initialize EPQ machinery */
266 		EvalPlanQualBegin(&node->lr_epqstate, estate);
267 
268 		/*
269 		 * Transfer any already-fetched tuples into the EPQ state, and fetch a
270 		 * copy of any rows that were successfully locked without any update
271 		 * having occurred.  (We do this in a separate pass so as to avoid
272 		 * overhead in the common case where there are no concurrent updates.)
273 		 * Make sure any inactive child rels have NULL test tuples in EPQ.
274 		 */
275 		foreach(lc, node->lr_arowMarks)
276 		{
277 			ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(lc);
278 			ExecRowMark *erm = aerm->rowmark;
279 			HeapTupleData tuple;
280 			Buffer		buffer;
281 
282 			/* skip non-active child tables, but clear their test tuples */
283 			if (!erm->ermActive)
284 			{
285 				Assert(erm->rti != erm->prti);	/* check it's child table */
286 				EvalPlanQualSetTuple(&node->lr_epqstate, erm->rti, NULL);
287 				continue;
288 			}
289 
290 			/* was tuple updated and fetched above? */
291 			if (node->lr_curtuples[erm->rti - 1] != NULL)
292 			{
293 				/* yes, so set it as the EPQ test tuple for this rel */
294 				EvalPlanQualSetTuple(&node->lr_epqstate,
295 									 erm->rti,
296 									 node->lr_curtuples[erm->rti - 1]);
297 				/* freeing this tuple is now the responsibility of EPQ */
298 				node->lr_curtuples[erm->rti - 1] = NULL;
299 				continue;
300 			}
301 
302 			/* foreign tables should have been fetched above */
303 			Assert(erm->relation->rd_rel->relkind != RELKIND_FOREIGN_TABLE);
304 			Assert(ItemPointerIsValid(&(erm->curCtid)));
305 
306 			/* okay, fetch the tuple */
307 			tuple.t_self = erm->curCtid;
308 			if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
309 							false, NULL))
310 				elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
311 
312 			/* successful, copy and store tuple */
313 			EvalPlanQualSetTuple(&node->lr_epqstate, erm->rti,
314 								 heap_copytuple(&tuple));
315 			ReleaseBuffer(buffer);
316 		}
317 
318 		/*
319 		 * Now fetch any non-locked source rows --- the EPQ logic knows how to
320 		 * do that.
321 		 */
322 		EvalPlanQualSetSlot(&node->lr_epqstate, slot);
323 		EvalPlanQualFetchRowMarks(&node->lr_epqstate);
324 
325 		/*
326 		 * And finally we can re-evaluate the tuple.
327 		 */
328 		slot = EvalPlanQualNext(&node->lr_epqstate);
329 		if (TupIsNull(slot))
330 		{
331 			/* Updated tuple fails qual, so ignore it and go on */
332 			goto lnext;
333 		}
334 	}
335 
336 	/* Got all locks, so return the current tuple */
337 	return slot;
338 }
339 
340 /* ----------------------------------------------------------------
341  *		ExecInitLockRows
342  *
343  *		This initializes the LockRows node state structures and
344  *		the node's subplan.
345  * ----------------------------------------------------------------
346  */
347 LockRowsState *
ExecInitLockRows(LockRows * node,EState * estate,int eflags)348 ExecInitLockRows(LockRows *node, EState *estate, int eflags)
349 {
350 	LockRowsState *lrstate;
351 	Plan	   *outerPlan = outerPlan(node);
352 	List	   *epq_arowmarks;
353 	ListCell   *lc;
354 
355 	/* check for unsupported flags */
356 	Assert(!(eflags & EXEC_FLAG_MARK));
357 
358 	/*
359 	 * create state structure
360 	 */
361 	lrstate = makeNode(LockRowsState);
362 	lrstate->ps.plan = (Plan *) node;
363 	lrstate->ps.state = estate;
364 
365 	/*
366 	 * Miscellaneous initialization
367 	 *
368 	 * LockRows nodes never call ExecQual or ExecProject.
369 	 */
370 
371 	/*
372 	 * Tuple table initialization (XXX not actually used...)
373 	 */
374 	ExecInitResultTupleSlot(estate, &lrstate->ps);
375 
376 	/*
377 	 * then initialize outer plan
378 	 */
379 	outerPlanState(lrstate) = ExecInitNode(outerPlan, estate, eflags);
380 
381 	/*
382 	 * LockRows nodes do no projections, so initialize projection info for
383 	 * this node appropriately
384 	 */
385 	ExecAssignResultTypeFromTL(&lrstate->ps);
386 	lrstate->ps.ps_ProjInfo = NULL;
387 
388 	/*
389 	 * Create workspace in which we can remember per-RTE locked tuples
390 	 */
391 	lrstate->lr_ntables = list_length(estate->es_range_table);
392 	lrstate->lr_curtuples = (HeapTuple *)
393 		palloc0(lrstate->lr_ntables * sizeof(HeapTuple));
394 
395 	/*
396 	 * Locate the ExecRowMark(s) that this node is responsible for, and
397 	 * construct ExecAuxRowMarks for them.  (InitPlan should already have
398 	 * built the global list of ExecRowMarks.)
399 	 */
400 	lrstate->lr_arowMarks = NIL;
401 	epq_arowmarks = NIL;
402 	foreach(lc, node->rowMarks)
403 	{
404 		PlanRowMark *rc = (PlanRowMark *) lfirst(lc);
405 		ExecRowMark *erm;
406 		ExecAuxRowMark *aerm;
407 
408 		Assert(IsA(rc, PlanRowMark));
409 
410 		/* ignore "parent" rowmarks; they are irrelevant at runtime */
411 		if (rc->isParent)
412 			continue;
413 
414 		/* safety check on size of lr_curtuples array */
415 		Assert(rc->rti > 0 && rc->rti <= lrstate->lr_ntables);
416 
417 		/* find ExecRowMark and build ExecAuxRowMark */
418 		erm = ExecFindRowMark(estate, rc->rti, false);
419 		aerm = ExecBuildAuxRowMark(erm, outerPlan->targetlist);
420 
421 		/*
422 		 * Only locking rowmarks go into our own list.  Non-locking marks are
423 		 * passed off to the EvalPlanQual machinery.  This is because we don't
424 		 * want to bother fetching non-locked rows unless we actually have to
425 		 * do an EPQ recheck.
426 		 */
427 		if (RowMarkRequiresRowShareLock(erm->markType))
428 			lrstate->lr_arowMarks = lappend(lrstate->lr_arowMarks, aerm);
429 		else
430 			epq_arowmarks = lappend(epq_arowmarks, aerm);
431 	}
432 
433 	/* Now we have the info needed to set up EPQ state */
434 	EvalPlanQualInit(&lrstate->lr_epqstate, estate,
435 					 outerPlan, epq_arowmarks, node->epqParam);
436 
437 	return lrstate;
438 }
439 
440 /* ----------------------------------------------------------------
441  *		ExecEndLockRows
442  *
443  *		This shuts down the subplan and frees resources allocated
444  *		to this node.
445  * ----------------------------------------------------------------
446  */
447 void
ExecEndLockRows(LockRowsState * node)448 ExecEndLockRows(LockRowsState *node)
449 {
450 	EvalPlanQualEnd(&node->lr_epqstate);
451 	ExecEndNode(outerPlanState(node));
452 }
453 
454 
455 void
ExecReScanLockRows(LockRowsState * node)456 ExecReScanLockRows(LockRowsState *node)
457 {
458 	/*
459 	 * if chgParam of subnode is not null then plan will be re-scanned by
460 	 * first ExecProcNode.
461 	 */
462 	if (node->ps.lefttree->chgParam == NULL)
463 		ExecReScan(node->ps.lefttree);
464 }
465