1 /*-------------------------------------------------------------------------
2  *
3  * lockfuncs.c
4  *		Functions for SQL access to various lock-manager capabilities.
5  *
6  * Copyright (c) 2002-2018, PostgreSQL Global Development Group
7  *
8  * IDENTIFICATION
9  *		src/backend/utils/adt/lockfuncs.c
10  *
11  *-------------------------------------------------------------------------
12  */
13 #include "postgres.h"
14 
15 #include "access/htup_details.h"
16 #include "access/xact.h"
17 #include "catalog/pg_type.h"
18 #include "funcapi.h"
19 #include "miscadmin.h"
20 #include "storage/predicate_internals.h"
21 #include "utils/array.h"
22 #include "utils/builtins.h"
23 
24 
25 /* This must match enum LockTagType! */
26 const char *const LockTagTypeNames[] = {
27 	"relation",
28 	"extend",
29 	"page",
30 	"tuple",
31 	"transactionid",
32 	"virtualxid",
33 	"speculative token",
34 	"object",
35 	"userlock",
36 	"advisory",
37 	"frozenid"
38 };
39 
40 /* This must match enum PredicateLockTargetType (predicate_internals.h) */
41 static const char *const PredicateLockTagTypeNames[] = {
42 	"relation",
43 	"page",
44 	"tuple"
45 };
46 
47 /* Working status for pg_lock_status */
48 typedef struct
49 {
50 	LockData   *lockData;		/* state data from lmgr */
51 	int			currIdx;		/* current PROCLOCK index */
52 	PredicateLockData *predLockData;	/* state data for pred locks */
53 	int			predLockIdx;	/* current index for pred lock */
54 } PG_Lock_Status;
55 
56 /* Number of columns in pg_locks output */
57 #define NUM_LOCK_STATUS_COLUMNS		15
58 
59 /*
60  * VXIDGetDatum - Construct a text representation of a VXID
61  *
62  * This is currently only used in pg_lock_status, so we put it here.
63  */
64 static Datum
65 VXIDGetDatum(BackendId bid, LocalTransactionId lxid)
66 {
67 	/*
68 	 * The representation is "<bid>/<lxid>", decimal and unsigned decimal
69 	 * respectively.  Note that elog.c also knows how to format a vxid.
70 	 */
dependency_free(Dependency * dep)71 	char		vxidstr[32];
72 
73 	snprintf(vxidstr, sizeof(vxidstr), "%d/%u", bid, lxid);
74 
75 	return CStringGetTextDatum(vxidstr);
76 }
77 
78 
79 /*
80  * pg_lock_status - produce a view with one row per held or awaited lock mode
81  */
82 Datum
83 pg_lock_status(PG_FUNCTION_ARGS)
84 {
85 	FuncCallContext *funcctx;
column_names_to_string(gint size,const gchar ** colnames)86 	PG_Lock_Status *mystatus;
87 	LockData   *lockData;
88 	PredicateLockData *predLockData;
89 
90 	if (SRF_IS_FIRSTCALL())
91 	{
92 		TupleDesc	tupdesc;
93 		MemoryContext oldcontext;
94 
95 		/* create a function context for cross-call persistence */
96 		funcctx = SRF_FIRSTCALL_INIT();
97 
98 		/*
99 		 * switch to memory context appropriate for multiple function calls
100 		 */
101 		oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
102 
103 		/* build tupdesc for result tuples */
104 		/* this had better match function's declaration in pg_proc.h */
105 		tupdesc = CreateTemplateTupleDesc(NUM_LOCK_STATUS_COLUMNS, false);
106 		TupleDescInitEntry(tupdesc, (AttrNumber) 1, "locktype",
107 						   TEXTOID, -1, 0);
108 		TupleDescInitEntry(tupdesc, (AttrNumber) 2, "database",
109 						   OIDOID, -1, 0);
110 		TupleDescInitEntry(tupdesc, (AttrNumber) 3, "relation",
111 						   OIDOID, -1, 0);
112 		TupleDescInitEntry(tupdesc, (AttrNumber) 4, "page",
dependency_find(GSList * dep_list,const gchar * id,const gchar * table,gint size,const gchar ** colnames)113 						   INT4OID, -1, 0);
114 		TupleDescInitEntry(tupdesc, (AttrNumber) 5, "tuple",
115 						   INT2OID, -1, 0);
116 		TupleDescInitEntry(tupdesc, (AttrNumber) 6, "virtualxid",
117 						   TEXTOID, -1, 0);
118 		TupleDescInitEntry(tupdesc, (AttrNumber) 7, "transactionid",
119 						   XIDOID, -1, 0);
120 		TupleDescInitEntry(tupdesc, (AttrNumber) 8, "classid",
121 						   OIDOID, -1, 0);
122 		TupleDescInitEntry(tupdesc, (AttrNumber) 9, "objid",
123 						   OIDOID, -1, 0);
124 		TupleDescInitEntry(tupdesc, (AttrNumber) 10, "objsubid",
125 						   INT2OID, -1, 0);
126 		TupleDescInitEntry(tupdesc, (AttrNumber) 11, "virtualtransaction",
127 						   TEXTOID, -1, 0);
128 		TupleDescInitEntry(tupdesc, (AttrNumber) 12, "pid",
129 						   INT4OID, -1, 0);
130 		TupleDescInitEntry(tupdesc, (AttrNumber) 13, "mode",
131 						   TEXTOID, -1, 0);
132 		TupleDescInitEntry(tupdesc, (AttrNumber) 14, "granted",
133 						   BOOLOID, -1, 0);
134 		TupleDescInitEntry(tupdesc, (AttrNumber) 15, "fastpath",
135 						   BOOLOID, -1, 0);
136 
137 		funcctx->tuple_desc = BlessTupleDesc(tupdesc);
138 
139 		/*
140 		 * Collect all the locking information that we will format and send
141 		 * out as a result set.
142 		 */
143 		mystatus = (PG_Lock_Status *) palloc(sizeof(PG_Lock_Status));
144 		funcctx->user_fctx = (void *) mystatus;
145 
146 		mystatus->lockData = GetLockStatusData();
147 		mystatus->currIdx = 0;
148 		mystatus->predLockData = GetPredicateLockStatusData();
149 		mystatus->predLockIdx = 0;
150 
151 		MemoryContextSwitchTo(oldcontext);
152 	}
153 
154 	funcctx = SRF_PERCALL_SETUP();
155 	mystatus = (PG_Lock_Status *) funcctx->user_fctx;
156 	lockData = mystatus->lockData;
157 
158 	while (mystatus->currIdx < lockData->nelements)
159 	{
160 		bool		granted;
161 		LOCKMODE	mode = 0;
162 		const char *locktypename;
data_source_get_type(void)163 		char		tnbuf[32];
164 		Datum		values[NUM_LOCK_STATUS_COLUMNS];
165 		bool		nulls[NUM_LOCK_STATUS_COLUMNS];
166 		HeapTuple	tuple;
167 		Datum		result;
168 		LockInstanceData *instance;
169 
170 		instance = &(lockData->locks[mystatus->currIdx]);
171 
172 		/*
173 		 * Look to see if there are any held lock modes in this PROCLOCK. If
174 		 * so, report, and destructively modify lockData so we don't report
175 		 * again.
176 		 */
177 		granted = false;
178 		if (instance->holdMask)
179 		{
180 			for (mode = 0; mode < MAX_LOCKMODES; mode++)
181 			{
182 				if (instance->holdMask & LOCKBIT_ON(mode))
183 				{
184 					granted = true;
185 					instance->holdMask &= LOCKBIT_OFF(mode);
186 					break;
187 				}
188 			}
189 		}
190 
191 		/*
data_source_class_init(DataSourceClass * klass)192 		 * If no (more) held modes to report, see if PROC is waiting for a
193 		 * lock on this lock.
194 		 */
195 		if (!granted)
196 		{
197 			if (instance->waitLockMode != NoLock)
198 			{
199 				/* Yes, so report it with proper mode */
200 				mode = instance->waitLockMode;
201 
202 				/*
203 				 * We are now done with this PROCLOCK, so advance pointer to
204 				 * continue with next one on next call.
205 				 */
206 				mystatus->currIdx++;
207 			}
208 			else
209 			{
210 				/*
211 				 * Okay, we've displayed all the locks associated with this
212 				 * PROCLOCK, proceed to the next one.
213 				 */
214 				mystatus->currIdx++;
215 				continue;
216 			}
217 		}
218 
219 		/*
220 		 * Form tuple with appropriate data.
221 		 */
222 		MemSet(values, 0, sizeof(values));
223 		MemSet(nulls, false, sizeof(nulls));
224 
225 		if (instance->locktag.locktag_type <= LOCKTAG_LAST_TYPE)
226 			locktypename = LockTagTypeNames[instance->locktag.locktag_type];
data_source_init(DataSource * source)227 		else
228 		{
229 			snprintf(tnbuf, sizeof(tnbuf), "unknown %d",
230 					 (int) instance->locktag.locktag_type);
231 			locktypename = tnbuf;
232 		}
233 		values[0] = CStringGetTextDatum(locktypename);
234 
235 		switch ((LockTagType) instance->locktag.locktag_type)
236 		{
237 			case LOCKTAG_RELATION:
238 			case LOCKTAG_RELATION_EXTEND:
239 				values[1] = ObjectIdGetDatum(instance->locktag.locktag_field1);
params_changed_cb(G_GNUC_UNUSED GdaSet * params,G_GNUC_UNUSED GdaHolder * holder,DataSource * source)240 				values[2] = ObjectIdGetDatum(instance->locktag.locktag_field2);
241 				nulls[3] = true;
242 				nulls[4] = true;
243 				nulls[5] = true;
244 				nulls[6] = true;
245 				nulls[7] = true;
ext_params_changed_cb(G_GNUC_UNUSED GdaSet * params,G_GNUC_UNUSED GdaHolder * holder,DataSource * source)246 				nulls[8] = true;
247 				nulls[9] = true;
248 				break;
249 			case LOCKTAG_DATABASE_FROZEN_IDS:
250 				values[1] = ObjectIdGetDatum(instance->locktag.locktag_field1);
251 				nulls[2] = true;
252 				nulls[3] = true;
253 				nulls[4] = true;
254 				nulls[5] = true;
255 				nulls[6] = true;
data_source_reset(DataSource * source)256 				nulls[7] = true;
257 				nulls[8] = true;
258 				nulls[9] = true;
259 				break;
260 			case LOCKTAG_PAGE:
261 				values[1] = ObjectIdGetDatum(instance->locktag.locktag_field1);
262 				values[2] = ObjectIdGetDatum(instance->locktag.locktag_field2);
263 				values[3] = UInt32GetDatum(instance->locktag.locktag_field3);
264 				nulls[4] = true;
265 				nulls[5] = true;
266 				nulls[6] = true;
267 				nulls[7] = true;
268 				nulls[8] = true;
269 				nulls[9] = true;
270 				break;
271 			case LOCKTAG_TUPLE:
272 				values[1] = ObjectIdGetDatum(instance->locktag.locktag_field1);
273 				values[2] = ObjectIdGetDatum(instance->locktag.locktag_field2);
274 				values[3] = UInt32GetDatum(instance->locktag.locktag_field3);
275 				values[4] = UInt16GetDatum(instance->locktag.locktag_field4);
276 				nulls[5] = true;
277 				nulls[6] = true;
278 				nulls[7] = true;
279 				nulls[8] = true;
280 				nulls[9] = true;
281 				break;
282 			case LOCKTAG_TRANSACTION:
283 				values[6] =
284 					TransactionIdGetDatum(instance->locktag.locktag_field1);
285 				nulls[1] = true;
286 				nulls[2] = true;
287 				nulls[3] = true;
288 				nulls[4] = true;
289 				nulls[5] = true;
290 				nulls[7] = true;
291 				nulls[8] = true;
292 				nulls[9] = true;
293 				break;
294 			case LOCKTAG_VIRTUALTRANSACTION:
295 				values[5] = VXIDGetDatum(instance->locktag.locktag_field1,
296 										 instance->locktag.locktag_field2);
297 				nulls[1] = true;
298 				nulls[2] = true;
299 				nulls[3] = true;
300 				nulls[4] = true;
301 				nulls[6] = true;
302 				nulls[7] = true;
303 				nulls[8] = true;
304 				nulls[9] = true;
305 				break;
306 			case LOCKTAG_OBJECT:
307 			case LOCKTAG_USERLOCK:
308 			case LOCKTAG_ADVISORY:
309 			default:			/* treat unknown locktags like OBJECT */
310 				values[1] = ObjectIdGetDatum(instance->locktag.locktag_field1);
data_source_dispose(GObject * object)311 				values[7] = ObjectIdGetDatum(instance->locktag.locktag_field2);
312 				values[8] = ObjectIdGetDatum(instance->locktag.locktag_field3);
313 				values[9] = Int16GetDatum(instance->locktag.locktag_field4);
314 				nulls[2] = true;
315 				nulls[3] = true;
316 				nulls[4] = true;
317 				nulls[5] = true;
318 				nulls[6] = true;
319 				break;
320 		}
321 
322 		values[10] = VXIDGetDatum(instance->backend, instance->lxid);
323 		if (instance->pid != 0)
324 			values[11] = Int32GetDatum(instance->pid);
325 		else
326 			nulls[11] = true;
327 		values[12] = CStringGetTextDatum(GetLockmodeName(instance->locktag.locktag_lockmethodid, mode));
328 		values[13] = BoolGetDatum(granted);
329 		values[14] = BoolGetDatum(instance->fastpath);
330 
331 		tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
332 		result = HeapTupleGetDatum(tuple);
333 		SRF_RETURN_NEXT(funcctx, result);
334 	}
335 
336 	/*
337 	 * Have returned all regular locks. Now start on the SIREAD predicate
338 	 * locks.
339 	 */
340 	predLockData = mystatus->predLockData;
341 	if (mystatus->predLockIdx < predLockData->nelements)
342 	{
343 		PredicateLockTargetType lockType;
344 
345 		PREDICATELOCKTARGETTAG *predTag = &(predLockData->locktags[mystatus->predLockIdx]);
data_source_new(BrowserConnection * bcnc,DataSourceType type)346 		SERIALIZABLEXACT *xact = &(predLockData->xacts[mystatus->predLockIdx]);
347 		Datum		values[NUM_LOCK_STATUS_COLUMNS];
348 		bool		nulls[NUM_LOCK_STATUS_COLUMNS];
349 		HeapTuple	tuple;
350 		Datum		result;
351 
352 		mystatus->predLockIdx++;
353 
354 		/*
355 		 * Form tuple with appropriate data.
356 		 */
357 		MemSet(values, 0, sizeof(values));
358 		MemSet(nulls, false, sizeof(nulls));
359 
360 		/* lock type */
361 		lockType = GET_PREDICATELOCKTARGETTAG_TYPE(*predTag);
362 
363 		values[0] = CStringGetTextDatum(PredicateLockTagTypeNames[lockType]);
364 
365 		/* lock target */
366 		values[1] = GET_PREDICATELOCKTARGETTAG_DB(*predTag);
367 		values[2] = GET_PREDICATELOCKTARGETTAG_RELATION(*predTag);
368 		if (lockType == PREDLOCKTAG_TUPLE)
369 			values[4] = GET_PREDICATELOCKTARGETTAG_OFFSET(*predTag);
data_source_new_from_xml_node(BrowserConnection * bcnc,xmlNodePtr node,GError ** error)370 		else
371 			nulls[4] = true;
372 		if ((lockType == PREDLOCKTAG_TUPLE) ||
373 			(lockType == PREDLOCKTAG_PAGE))
374 			values[3] = GET_PREDICATELOCKTARGETTAG_PAGE(*predTag);
375 		else
376 			nulls[3] = true;
377 
378 		/* these fields are targets for other types of locks */
379 		nulls[5] = true;		/* virtualxid */
380 		nulls[6] = true;		/* transactionid */
381 		nulls[7] = true;		/* classid */
382 		nulls[8] = true;		/* objid */
383 		nulls[9] = true;		/* objsubid */
384 
385 		/* lock holder */
386 		values[10] = VXIDGetDatum(xact->vxid.backendId,
387 								  xact->vxid.localTransactionId);
388 		if (xact->pid != 0)
389 			values[11] = Int32GetDatum(xact->pid);
390 		else
391 			nulls[11] = true;
392 
393 		/*
394 		 * Lock mode. Currently all predicate locks are SIReadLocks, which are
395 		 * always held (never waiting) and have no fast path
396 		 */
397 		values[12] = CStringGetTextDatum("SIReadLock");
398 		values[13] = BoolGetDatum(true);
399 		values[14] = BoolGetDatum(false);
400 
401 		tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
402 		result = HeapTupleGetDatum(tuple);
403 		SRF_RETURN_NEXT(funcctx, result);
404 	}
405 
406 	SRF_RETURN_DONE(funcctx);
407 }
408 
409 
410 /*
411  * pg_blocking_pids - produce an array of the PIDs blocking given PID
412  *
413  * The reported PIDs are those that hold a lock conflicting with blocked_pid's
init_from_query(DataSource * source,xmlNodePtr node)414  * current request (hard block), or are requesting such a lock and are ahead
415  * of blocked_pid in the lock's wait queue (soft block).
416  *
417  * In parallel-query cases, we report all PIDs blocking any member of the
418  * given PID's lock group, and the reported PIDs are those of the blocking
419  * PIDs' lock group leaders.  This allows callers to compare the result to
420  * lists of clients' pg_backend_pid() results even during a parallel query.
421  *
422  * Parallel query makes it possible for there to be duplicate PIDs in the
423  * result (either because multiple waiters are blocked by same PID, or
424  * because multiple blockers have same group leader PID).  We do not bother
425  * to eliminate such duplicates from the result.
426  *
427  * We need not consider predicate locks here, since those don't block anything.
428  */
429 Datum
430 pg_blocking_pids(PG_FUNCTION_ARGS)
431 {
432 	int			blocked_pid = PG_GETARG_INT32(0);
433 	Datum	   *arrayelems;
434 	int			narrayelems;
435 	BlockedProcsData *lockData; /* state data from lmgr */
436 	int			i,
437 				j;
438 
439 	/* Collect a snapshot of lock manager state */
440 	lockData = GetBlockerStatusData(blocked_pid);
441 
442 	/* We can't need more output entries than there are reported PROCLOCKs */
443 	arrayelems = (Datum *) palloc(lockData->nlocks * sizeof(Datum));
444 	narrayelems = 0;
445 
446 	/* For each blocked proc in the lock group ... */
447 	for (i = 0; i < lockData->nprocs; i++)
448 	{
449 		BlockedProcData *bproc = &lockData->procs[i];
450 		LockInstanceData *instances = &lockData->locks[bproc->first_lock];
451 		int		   *preceding_waiters = &lockData->waiter_pids[bproc->first_waiter];
452 		LockInstanceData *blocked_instance;
453 		LockMethod	lockMethodTable;
454 		int			conflictMask;
455 
456 		/*
457 		 * Locate the blocked proc's own entry in the LockInstanceData array.
458 		 * There should be exactly one matching entry.
459 		 */
460 		blocked_instance = NULL;
461 		for (j = 0; j < bproc->num_locks; j++)
462 		{
463 			LockInstanceData *instance = &(instances[j]);
464 
465 			if (instance->pid == bproc->pid)
466 			{
467 				Assert(blocked_instance == NULL);
468 				blocked_instance = instance;
469 			}
470 		}
471 		Assert(blocked_instance != NULL);
472 
473 		lockMethodTable = GetLockTagsMethodTable(&(blocked_instance->locktag));
474 		conflictMask = lockMethodTable->conflictTab[blocked_instance->waitLockMode];
475 
476 		/* Now scan the PROCLOCK data for conflicting procs */
477 		for (j = 0; j < bproc->num_locks; j++)
478 		{
479 			LockInstanceData *instance = &(instances[j]);
480 
481 			/* A proc never blocks itself, so ignore that entry */
482 			if (instance == blocked_instance)
483 				continue;
484 			/* Members of same lock group never block each other, either */
485 			if (instance->leaderPid == blocked_instance->leaderPid)
486 				continue;
487 
488 			if (conflictMask & instance->holdMask)
489 			{
490 				/* hard block: blocked by lock already held by this entry */
491 			}
492 			else if (instance->waitLockMode != NoLock &&
493 					 (conflictMask & LOCKBIT_ON(instance->waitLockMode)))
494 			{
495 				/* conflict in lock requests; who's in front in wait queue? */
496 				bool		ahead = false;
497 				int			k;
498 
499 				for (k = 0; k < bproc->num_waiters; k++)
500 				{
501 					if (preceding_waiters[k] == instance->pid)
502 					{
503 						/* soft block: this entry is ahead of blocked proc */
504 						ahead = true;
505 						break;
506 					}
507 				}
508 				if (!ahead)
509 					continue;	/* not blocked by this entry */
510 			}
511 			else
512 			{
513 				/* not blocked by this entry */
514 				continue;
515 			}
516 
517 			/* blocked by this entry, so emit a record */
518 			arrayelems[narrayelems++] = Int32GetDatum(instance->leaderPid);
519 		}
520 	}
521 
522 	/* Assert we didn't overrun arrayelems[] */
523 	Assert(narrayelems <= lockData->nlocks);
524 
525 	/* Construct array, using hardwired knowledge about int4 type */
526 	PG_RETURN_ARRAYTYPE_P(construct_array(arrayelems, narrayelems,
527 										  INT4OID,
528 										  sizeof(int32), true, 'i'));
529 }
530 
531 
532 /*
533  * pg_safe_snapshot_blocking_pids - produce an array of the PIDs blocking
534  * given PID from getting a safe snapshot
535  *
536  * XXX this does not consider parallel-query cases; not clear how big a
537  * problem that is in practice
538  */
539 Datum
540 pg_safe_snapshot_blocking_pids(PG_FUNCTION_ARGS)
541 {
542 	int			blocked_pid = PG_GETARG_INT32(0);
543 	int		   *blockers;
544 	int			num_blockers;
545 	Datum	   *blocker_datums;
546 
547 	/* A buffer big enough for any possible blocker list without truncation */
548 	blockers = (int *) palloc(MaxBackends * sizeof(int));
549 
550 	/* Collect a snapshot of processes waited for by GetSafeSnapshot */
551 	num_blockers =
552 		GetSafeSnapshotBlockingPids(blocked_pid, blockers, MaxBackends);
data_source_add_dependency(DataSource * source,const gchar * table,const char * id,gint col_name_size,const gchar ** col_names,GError ** error)553 
554 	/* Convert int array to Datum array */
555 	if (num_blockers > 0)
556 	{
557 		int			i;
558 
559 		blocker_datums = (Datum *) palloc(num_blockers * sizeof(Datum));
560 		for (i = 0; i < num_blockers; ++i)
561 			blocker_datums[i] = Int32GetDatum(blockers[i]);
562 	}
563 	else
564 		blocker_datums = NULL;
565 
566 	/* Construct array, using hardwired knowledge about int4 type */
567 	PG_RETURN_ARRAYTYPE_P(construct_array(blocker_datums, num_blockers,
568 										  INT4OID,
569 										  sizeof(int32), true, 'i'));
570 }
571 
572 
573 /*
574  * pg_isolation_test_session_is_blocked - support function for isolationtester
575  *
576  * Check if specified PID is blocked by any of the PIDs listed in the second
577  * argument.  Currently, this looks for blocking caused by waiting for
578  * heavyweight locks or safe snapshots.  We ignore blockage caused by PIDs
579  * not directly under the isolationtester's control, eg autovacuum.
580  *
581  * This is an undocumented function intended for use by the isolation tester,
582  * and may change in future releases as required for testing purposes.
583  */
584 Datum
585 pg_isolation_test_session_is_blocked(PG_FUNCTION_ARGS)
586 {
587 	int			blocked_pid = PG_GETARG_INT32(0);
588 	ArrayType  *interesting_pids_a = PG_GETARG_ARRAYTYPE_P(1);
589 	ArrayType  *blocking_pids_a;
590 	int32	   *interesting_pids;
591 	int32	   *blocking_pids;
592 	int			num_interesting_pids;
593 	int			num_blocking_pids;
594 	int			dummy;
595 	int			i,
596 				j;
597 
598 	/* Validate the passed-in array */
599 	Assert(ARR_ELEMTYPE(interesting_pids_a) == INT4OID);
600 	if (array_contains_nulls(interesting_pids_a))
601 		elog(ERROR, "array must not contain nulls");
602 	interesting_pids = (int32 *) ARR_DATA_PTR(interesting_pids_a);
603 	num_interesting_pids = ArrayGetNItems(ARR_NDIM(interesting_pids_a),
604 										  ARR_DIMS(interesting_pids_a));
605 
606 	/*
607 	 * Get the PIDs of all sessions blocking the given session's attempt to
608 	 * acquire heavyweight locks.
609 	 */
610 	blocking_pids_a =
611 		DatumGetArrayTypeP(DirectFunctionCall1(pg_blocking_pids, blocked_pid));
612 
613 	Assert(ARR_ELEMTYPE(blocking_pids_a) == INT4OID);
614 	Assert(!array_contains_nulls(blocking_pids_a));
615 	blocking_pids = (int32 *) ARR_DATA_PTR(blocking_pids_a);
616 	num_blocking_pids = ArrayGetNItems(ARR_NDIM(blocking_pids_a),
617 									   ARR_DIMS(blocking_pids_a));
618 
619 	/*
620 	 * Check if any of these are in the list of interesting PIDs, that being
621 	 * the sessions that the isolation tester is running.  We don't use
622 	 * "arrayoverlaps" here, because it would lead to cache lookups and one of
623 	 * our goals is to run quickly under CLOBBER_CACHE_ALWAYS.  We expect
624 	 * blocking_pids to be usually empty and otherwise a very small number in
625 	 * isolation tester cases, so make that the outer loop of a naive search
626 	 * for a match.
627 	 */
628 	for (i = 0; i < num_blocking_pids; i++)
629 		for (j = 0; j < num_interesting_pids; j++)
630 		{
631 			if (blocking_pids[i] == interesting_pids[j])
632 				PG_RETURN_BOOL(true);
633 		}
634 
635 	/*
636 	 * Check if blocked_pid is waiting for a safe snapshot.  We could in
637 	 * theory check the resulting array of blocker PIDs against the
638 	 * interesting PIDs whitelist, but since there is no danger of autovacuum
639 	 * blocking GetSafeSnapshot there seems to be no point in expending cycles
640 	 * on allocating a buffer and searching for overlap; so it's presently
641 	 * sufficient for the isolation tester's purposes to use a single element
642 	 * buffer and check if the number of safe snapshot blockers is non-zero.
643 	 */
644 	if (GetSafeSnapshotBlockingPids(blocked_pid, &dummy, 1) > 0)
645 		PG_RETURN_BOOL(true);
646 
647 	PG_RETURN_BOOL(false);
648 }
649 
650 
651 /*
652  * Functions for manipulating advisory locks
653  *
654  * We make use of the locktag fields as follows:
655  *
656  *	field1: MyDatabaseId ... ensures locks are local to each database
657  *	field2: first of 2 int4 keys, or high-order half of an int8 key
658  *	field3: second of 2 int4 keys, or low-order half of an int8 key
659  *	field4: 1 if using an int8 key, 2 if using 2 int4 keys
660  */
661 #define SET_LOCKTAG_INT64(tag, key64) \
662 	SET_LOCKTAG_ADVISORY(tag, \
663 						 MyDatabaseId, \
664 						 (uint32) ((key64) >> 32), \
665 						 (uint32) (key64), \
666 						 1)
667 #define SET_LOCKTAG_INT32(tag, key1, key2) \
668 	SET_LOCKTAG_ADVISORY(tag, MyDatabaseId, key1, key2, 2)
669 
670 static void
671 PreventAdvisoryLocksInParallelMode(void)
672 {
673 	if (IsInParallelMode())
674 		ereport(ERROR,
675 				(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
676 				 errmsg("cannot use advisory locks during a parallel operation")));
677 }
678 
679 /*
680  * pg_advisory_lock(int8) - acquire exclusive lock on an int8 key
681  */
682 Datum
683 pg_advisory_lock_int8(PG_FUNCTION_ARGS)
684 {
685 	int64		key = PG_GETARG_INT64(0);
686 	LOCKTAG		tag;
687 
688 	PreventAdvisoryLocksInParallelMode();
689 	SET_LOCKTAG_INT64(tag, key);
690 
691 	(void) LockAcquire(&tag, ExclusiveLock, true, false);
692 
693 	PG_RETURN_VOID();
694 }
695 
696 /*
697  * pg_advisory_xact_lock(int8) - acquire xact scoped
698  * exclusive lock on an int8 key
699  */
700 Datum
701 pg_advisory_xact_lock_int8(PG_FUNCTION_ARGS)
702 {
703 	int64		key = PG_GETARG_INT64(0);
704 	LOCKTAG		tag;
705 
706 	PreventAdvisoryLocksInParallelMode();
707 	SET_LOCKTAG_INT64(tag, key);
708 
709 	(void) LockAcquire(&tag, ExclusiveLock, false, false);
710 
711 	PG_RETURN_VOID();
712 }
713 
714 /*
715  * pg_advisory_lock_shared(int8) - acquire share lock on an int8 key
716  */
717 Datum
718 pg_advisory_lock_shared_int8(PG_FUNCTION_ARGS)
719 {
720 	int64		key = PG_GETARG_INT64(0);
721 	LOCKTAG		tag;
data_source_to_xml_node(DataSource * source)722 
723 	PreventAdvisoryLocksInParallelMode();
724 	SET_LOCKTAG_INT64(tag, key);
725 
726 	(void) LockAcquire(&tag, ShareLock, true, false);
727 
728 	PG_RETURN_VOID();
729 }
730 
731 /*
732  * pg_advisory_xact_lock_shared(int8) - acquire xact scoped
733  * share lock on an int8 key
734  */
735 Datum
736 pg_advisory_xact_lock_shared_int8(PG_FUNCTION_ARGS)
737 {
738 	int64		key = PG_GETARG_INT64(0);
739 	LOCKTAG		tag;
740 
741 	PreventAdvisoryLocksInParallelMode();
742 	SET_LOCKTAG_INT64(tag, key);
743 
744 	(void) LockAcquire(&tag, ShareLock, false, false);
745 
746 	PG_RETURN_VOID();
747 }
748 
749 /*
750  * pg_try_advisory_lock(int8) - acquire exclusive lock on an int8 key, no wait
751  *
752  * Returns true if successful, false if lock not available
753  */
754 Datum
755 pg_try_advisory_lock_int8(PG_FUNCTION_ARGS)
756 {
757 	int64		key = PG_GETARG_INT64(0);
758 	LOCKTAG		tag;
759 	LockAcquireResult res;
760 
761 	PreventAdvisoryLocksInParallelMode();
762 	SET_LOCKTAG_INT64(tag, key);
763 
764 	res = LockAcquire(&tag, ExclusiveLock, true, true);
765 
766 	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
767 }
768 
769 /*
770  * pg_try_advisory_xact_lock(int8) - acquire xact scoped
771  * exclusive lock on an int8 key, no wait
772  *
773  * Returns true if successful, false if lock not available
774  */
775 Datum
776 pg_try_advisory_xact_lock_int8(PG_FUNCTION_ARGS)
777 {
778 	int64		key = PG_GETARG_INT64(0);
779 	LOCKTAG		tag;
780 	LockAcquireResult res;
781 
782 	PreventAdvisoryLocksInParallelMode();
783 	SET_LOCKTAG_INT64(tag, key);
exec_end_timeout_cb(DataSource * source)784 
785 	res = LockAcquire(&tag, ExclusiveLock, false, true);
786 
787 	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
788 }
789 
790 /*
791  * pg_try_advisory_lock_shared(int8) - acquire share lock on an int8 key, no wait
792  *
793  * Returns true if successful, false if lock not available
794  */
795 Datum
796 pg_try_advisory_lock_shared_int8(PG_FUNCTION_ARGS)
797 {
798 	int64		key = PG_GETARG_INT64(0);
799 	LOCKTAG		tag;
800 	LockAcquireResult res;
801 
802 	PreventAdvisoryLocksInParallelMode();
803 	SET_LOCKTAG_INT64(tag, key);
804 
805 	res = LockAcquire(&tag, ShareLock, true, true);
806 
807 	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
808 }
809 
810 /*
811  * pg_try_advisory_xact_lock_shared(int8) - acquire xact scoped
812  * share lock on an int8 key, no wait
813  *
814  * Returns true if successful, false if lock not available
815  */
816 Datum
817 pg_try_advisory_xact_lock_shared_int8(PG_FUNCTION_ARGS)
818 {
819 	int64		key = PG_GETARG_INT64(0);
820 	LOCKTAG		tag;
821 	LockAcquireResult res;
822 
823 	PreventAdvisoryLocksInParallelMode();
824 	SET_LOCKTAG_INT64(tag, key);
825 
826 	res = LockAcquire(&tag, ShareLock, false, true);
827 
828 	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
829 }
830 
831 /*
832  * pg_advisory_unlock(int8) - release exclusive lock on an int8 key
833  *
834  * Returns true if successful, false if lock was not held
835 */
836 Datum
837 pg_advisory_unlock_int8(PG_FUNCTION_ARGS)
838 {
839 	int64		key = PG_GETARG_INT64(0);
data_source_get_statement(DataSource * source)840 	LOCKTAG		tag;
841 	bool		res;
842 
843 	PreventAdvisoryLocksInParallelMode();
844 	SET_LOCKTAG_INT64(tag, key);
845 
846 	res = LockRelease(&tag, ExclusiveLock, true);
847 
848 	PG_RETURN_BOOL(res);
849 }
data_source_execution_going_on(DataSource * source)850 
851 /*
852  * pg_advisory_unlock_shared(int8) - release share lock on an int8 key
853  *
854  * Returns true if successful, false if lock was not held
855  */
856 Datum
857 pg_advisory_unlock_shared_int8(PG_FUNCTION_ARGS)
858 {
859 	int64		key = PG_GETARG_INT64(0);
860 	LOCKTAG		tag;
861 	bool		res;
862 
863 	PreventAdvisoryLocksInParallelMode();
864 	SET_LOCKTAG_INT64(tag, key);
865 
866 	res = LockRelease(&tag, ShareLock, true);
867 
868 	PG_RETURN_BOOL(res);
869 }
870 
871 /*
data_source_set_params(DataSource * source,GdaSet * params)872  * pg_advisory_lock(int4, int4) - acquire exclusive lock on 2 int4 keys
873  */
874 Datum
875 pg_advisory_lock_int4(PG_FUNCTION_ARGS)
876 {
877 	int32		key1 = PG_GETARG_INT32(0);
878 	int32		key2 = PG_GETARG_INT32(1);
879 	LOCKTAG		tag;
880 
881 	PreventAdvisoryLocksInParallelMode();
882 	SET_LOCKTAG_INT32(tag, key1, key2);
883 
884 	(void) LockAcquire(&tag, ExclusiveLock, true, false);
885 
886 	PG_RETURN_VOID();
887 }
888 
889 /*
890  * pg_advisory_xact_lock(int4, int4) - acquire xact scoped
891  * exclusive lock on 2 int4 keys
892  */
893 Datum
894 pg_advisory_xact_lock_int4(PG_FUNCTION_ARGS)
895 {
896 	int32		key1 = PG_GETARG_INT32(0);
897 	int32		key2 = PG_GETARG_INT32(1);
898 	LOCKTAG		tag;
899 
900 	PreventAdvisoryLocksInParallelMode();
901 	SET_LOCKTAG_INT32(tag, key1, key2);
902 
903 	(void) LockAcquire(&tag, ExclusiveLock, false, false);
904 
905 	PG_RETURN_VOID();
906 }
907 
908 /*
909  * pg_advisory_lock_shared(int4, int4) - acquire share lock on 2 int4 keys
data_source_get_export_names(DataSource * source)910  */
911 Datum
912 pg_advisory_lock_shared_int4(PG_FUNCTION_ARGS)
913 {
914 	int32		key1 = PG_GETARG_INT32(0);
915 	int32		key2 = PG_GETARG_INT32(1);
916 	LOCKTAG		tag;
917 
918 	PreventAdvisoryLocksInParallelMode();
919 	SET_LOCKTAG_INT32(tag, key1, key2);
920 
921 	(void) LockAcquire(&tag, ShareLock, true, false);
922 
923 	PG_RETURN_VOID();
924 }
925 
926 /*
927  * pg_advisory_xact_lock_shared(int4, int4) - acquire xact scoped
928  * share lock on 2 int4 keys
929  */
930 Datum
931 pg_advisory_xact_lock_shared_int4(PG_FUNCTION_ARGS)
data_source_execute(DataSource * source,GError ** error)932 {
933 	int32		key1 = PG_GETARG_INT32(0);
934 	int32		key2 = PG_GETARG_INT32(1);
935 	LOCKTAG		tag;
936 
937 	PreventAdvisoryLocksInParallelMode();
938 	SET_LOCKTAG_INT32(tag, key1, key2);
939 
940 	(void) LockAcquire(&tag, ShareLock, false, false);
941 
942 	PG_RETURN_VOID();
943 }
944 
945 /*
946  * pg_try_advisory_lock(int4, int4) - acquire exclusive lock on 2 int4 keys, no wait
947  *
948  * Returns true if successful, false if lock not available
949  */
950 Datum
951 pg_try_advisory_lock_int4(PG_FUNCTION_ARGS)
952 {
953 	int32		key1 = PG_GETARG_INT32(0);
954 	int32		key2 = PG_GETARG_INT32(1);
955 	LOCKTAG		tag;
956 	LockAcquireResult res;
957 
958 	PreventAdvisoryLocksInParallelMode();
959 	SET_LOCKTAG_INT32(tag, key1, key2);
960 
961 	res = LockAcquire(&tag, ExclusiveLock, true, true);
962 
963 	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
964 }
965 
966 /*
967  * pg_try_advisory_xact_lock(int4, int4) - acquire xact scoped
968  * exclusive lock on 2 int4 keys, no wait
969  *
970  * Returns true if successful, false if lock not available
971  */
972 Datum
973 pg_try_advisory_xact_lock_int4(PG_FUNCTION_ARGS)
974 {
975 	int32		key1 = PG_GETARG_INT32(0);
976 	int32		key2 = PG_GETARG_INT32(1);
977 	LOCKTAG		tag;
978 	LockAcquireResult res;
979 
980 	PreventAdvisoryLocksInParallelMode();
981 	SET_LOCKTAG_INT32(tag, key1, key2);
982 
983 	res = LockAcquire(&tag, ExclusiveLock, false, true);
984 
985 	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
986 }
987 
988 /*
989  * pg_try_advisory_lock_shared(int4, int4) - acquire share lock on 2 int4 keys, no wait
990  *
991  * Returns true if successful, false if lock not available
992  */
action_refresh_cb(GtkAction * action,DataSource * source)993 Datum
994 pg_try_advisory_lock_shared_int4(PG_FUNCTION_ARGS)
995 {
996 	int32		key1 = PG_GETARG_INT32(0);
997 	int32		key2 = PG_GETARG_INT32(1);
998 	LOCKTAG		tag;
999 	LockAcquireResult res;
1000 
1001 	PreventAdvisoryLocksInParallelMode();
1002 	SET_LOCKTAG_INT32(tag, key1, key2);
1003 
1004 	res = LockAcquire(&tag, ShareLock, true, true);
1005 
1006 	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
1007 }
1008 
1009 /*
1010  * pg_try_advisory_xact_lock_shared(int4, int4) - acquire xact scoped
1011  * share lock on 2 int4 keys, no wait
1012  *
1013  * Returns true if successful, false if lock not available
1014  */
1015 Datum
1016 pg_try_advisory_xact_lock_shared_int4(PG_FUNCTION_ARGS)
1017 {
1018 	int32		key1 = PG_GETARG_INT32(0);
1019 	int32		key2 = PG_GETARG_INT32(1);
1020 	LOCKTAG		tag;
1021 	LockAcquireResult res;
1022 
1023 	PreventAdvisoryLocksInParallelMode();
1024 	SET_LOCKTAG_INT32(tag, key1, key2);
1025 
1026 	res = LockAcquire(&tag, ShareLock, false, true);
1027 
1028 	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
1029 }
1030 
1031 /*
1032  * pg_advisory_unlock(int4, int4) - release exclusive lock on 2 int4 keys
1033  *
1034  * Returns true if successful, false if lock was not held
1035 */
1036 Datum
1037 pg_advisory_unlock_int4(PG_FUNCTION_ARGS)
1038 {
1039 	int32		key1 = PG_GETARG_INT32(0);
1040 	int32		key2 = PG_GETARG_INT32(1);
1041 	LOCKTAG		tag;
1042 	bool		res;
1043 
1044 	PreventAdvisoryLocksInParallelMode();
1045 	SET_LOCKTAG_INT32(tag, key1, key2);
1046 
1047 	res = LockRelease(&tag, ExclusiveLock, true);
1048 
data_source_set_id(DataSource * source,const gchar * id)1049 	PG_RETURN_BOOL(res);
1050 }
1051 
1052 /*
1053  * pg_advisory_unlock_shared(int4, int4) - release share lock on 2 int4 keys
1054  *
1055  * Returns true if successful, false if lock was not held
1056  */
1057 Datum
1058 pg_advisory_unlock_shared_int4(PG_FUNCTION_ARGS)
1059 {
1060 	int32		key1 = PG_GETARG_INT32(0);
1061 	int32		key2 = PG_GETARG_INT32(1);
1062 	LOCKTAG		tag;
1063 	bool		res;
1064 
1065 	PreventAdvisoryLocksInParallelMode();
1066 	SET_LOCKTAG_INT32(tag, key1, key2);
1067 
1068 	res = LockRelease(&tag, ShareLock, true);
data_source_get_id(DataSource * source)1069 
1070 	PG_RETURN_BOOL(res);
1071 }
1072 
1073 /*
1074  * pg_advisory_unlock_all() - release all advisory locks
1075  */
1076 Datum
1077 pg_advisory_unlock_all(PG_FUNCTION_ARGS)
1078 {
1079 	LockReleaseSession(USER_LOCKMETHOD);
1080 
1081 	PG_RETURN_VOID();
1082 }
1083