1 /*------------------------------------------------------------------------
2  *
3  * regress.c
4  *	 Code for various C-language functions defined as part of the
5  *	 regression tests.
6  *
7  * This code is released under the terms of the PostgreSQL License.
8  *
9  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
10  * Portions Copyright (c) 1994, Regents of the University of California
11  *
12  * src/test/regress/regress.c
13  *
14  *-------------------------------------------------------------------------
15  */
16 
17 #include "postgres.h"
18 
19 #include <float.h>
20 #include <math.h>
21 #include <signal.h>
22 
23 #include "access/htup_details.h"
24 #include "access/transam.h"
25 #include "access/tuptoaster.h"
26 #include "access/xact.h"
27 #include "catalog/pg_type.h"
28 #include "commands/sequence.h"
29 #include "commands/trigger.h"
30 #include "executor/executor.h"
31 #include "executor/spi.h"
testbuftestbuf32 #include "miscadmin.h"
33 #include "port/atomics.h"
34 #include "storage/spin.h"
35 #include "utils/builtins.h"
36 #include "utils/geo_decls.h"
37 #include "utils/rel.h"
38 #include "utils/typcache.h"
39 #include "utils/memutils.h"
40 
41 
42 #define EXPECT_TRUE(expr)	\
43 	do { \
44 		if (!(expr)) \
45 			elog(ERROR, \
46 				 "%s was unexpectedly false in file \"%s\" line %u", \
47 				 #expr, __FILE__, __LINE__); \
48 	} while (0)
49 
50 #define EXPECT_EQ_U32(result_expr, expected_expr)	\
51 	do { \
52 		uint32		result = (result_expr); \
53 		uint32		expected = (expected_expr); \
54 		if (result != expected) \
55 			elog(ERROR, \
56 				 "%s yielded %u, expected %s in file \"%s\" line %u", \
57 				 #result_expr, result, #expected_expr, __FILE__, __LINE__); \
58 	} while (0)
59 
60 #define EXPECT_EQ_U64(result_expr, expected_expr)	\
61 	do { \
62 		uint64		result = (result_expr); \
63 		uint64		expected = (expected_expr); \
64 		if (result != expected) \
65 			elog(ERROR, \
66 				 "%s yielded " UINT64_FORMAT ", expected %s in file \"%s\" line %u", \
67 				 #result_expr, result, #expected_expr, __FILE__, __LINE__); \
68 	} while (0)
69 
70 #define LDELIM			'('
71 #define RDELIM			')'
72 #define DELIM			','
73 
74 static void regress_lseg_construct(LSEG *lseg, Point *pt1, Point *pt2);
75 
76 PG_MODULE_MAGIC;
77 
78 
79 /* return the point where two paths intersect, or NULL if no intersection. */
80 PG_FUNCTION_INFO_V1(interpt_pp);
81 
82 Datum
83 interpt_pp(PG_FUNCTION_ARGS)
84 {
85 	PATH	   *p1 = PG_GETARG_PATH_P(0);
86 	PATH	   *p2 = PG_GETARG_PATH_P(1);
87 	int			i,
88 				j;
89 	LSEG		seg1,
90 				seg2;
91 	bool		found;			/* We've found the intersection */
92 
93 	found = false;				/* Haven't found it yet */
94 
95 	for (i = 0; i < p1->npts - 1 && !found; i++)
96 	{
97 		regress_lseg_construct(&seg1, &p1->p[i], &p1->p[i + 1]);
98 		for (j = 0; j < p2->npts - 1 && !found; j++)
99 		{
100 			regress_lseg_construct(&seg2, &p2->p[j], &p2->p[j + 1]);
101 			if (DatumGetBool(DirectFunctionCall2(lseg_intersect,
102 												 LsegPGetDatum(&seg1),
103 												 LsegPGetDatum(&seg2))))
104 				found = true;
105 		}
106 	}
107 
108 	if (!found)
109 		PG_RETURN_NULL();
110 
111 	/*
112 	 * Note: DirectFunctionCall2 will kick out an error if lseg_interpt()
113 	 * returns NULL, but that should be impossible since we know the two
114 	 * segments intersect.
115 	 */
116 	PG_RETURN_DATUM(DirectFunctionCall2(lseg_interpt,
117 										LsegPGetDatum(&seg1),
118 										LsegPGetDatum(&seg2)));
119 }
120 
121 
122 /* like lseg_construct, but assume space already allocated */
123 static void
124 regress_lseg_construct(LSEG *lseg, Point *pt1, Point *pt2)
125 {
126 	lseg->p[0].x = pt1->x;
127 	lseg->p[0].y = pt1->y;
128 	lseg->p[1].x = pt2->x;
129 	lseg->p[1].y = pt2->y;
130 }
131 
132 PG_FUNCTION_INFO_V1(overpaid);
133 
134 Datum
135 overpaid(PG_FUNCTION_ARGS)
136 {
137 	HeapTupleHeader tuple = PG_GETARG_HEAPTUPLEHEADER(0);
138 	bool		isnull;
139 	int32		salary;
140 
141 	salary = DatumGetInt32(GetAttributeByName(tuple, "salary", &isnull));
142 	if (isnull)
143 		PG_RETURN_NULL();
144 	PG_RETURN_BOOL(salary > 699);
145 }
146 
147 /* New type "widget"
148  * This used to be "circle", but I added circle to builtins,
149  *	so needed to make sure the names do not collide. - tgl 97/04/21
150  */
151 
152 typedef struct
153 {
154 	Point		center;
155 	double		radius;
156 } WIDGET;
157 
158 PG_FUNCTION_INFO_V1(widget_in);
159 PG_FUNCTION_INFO_V1(widget_out);
160 
161 #define NARGS	3
162 
163 Datum
164 widget_in(PG_FUNCTION_ARGS)
165 {
166 	char	   *str = PG_GETARG_CSTRING(0);
167 	char	   *p,
168 			   *coord[NARGS];
169 	int			i;
170 	WIDGET	   *result;
171 
172 	for (i = 0, p = str; *p && i < NARGS && *p != RDELIM; p++)
173 	{
174 		if (*p == DELIM || (*p == LDELIM && i == 0))
175 			coord[i++] = p + 1;
176 	}
177 
178 	if (i < NARGS)
179 		ereport(ERROR,
180 				(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
181 				 errmsg("invalid input syntax for type widget: \"%s\"",
182 						str)));
183 
184 	result = (WIDGET *) palloc(sizeof(WIDGET));
185 	result->center.x = atof(coord[0]);
186 	result->center.y = atof(coord[1]);
187 	result->radius = atof(coord[2]);
188 
189 	PG_RETURN_POINTER(result);
190 }
191 
192 Datum
193 widget_out(PG_FUNCTION_ARGS)
194 {
195 	WIDGET	   *widget = (WIDGET *) PG_GETARG_POINTER(0);
196 	char	   *str = psprintf("(%g,%g,%g)",
197 							   widget->center.x, widget->center.y, widget->radius);
198 
199 	PG_RETURN_CSTRING(str);
200 }
201 
202 PG_FUNCTION_INFO_V1(pt_in_widget);
203 
204 Datum
205 pt_in_widget(PG_FUNCTION_ARGS)
206 {
207 	Point	   *point = PG_GETARG_POINT_P(0);
208 	WIDGET	   *widget = (WIDGET *) PG_GETARG_POINTER(1);
209 
210 	PG_RETURN_BOOL(point_dt(point, &widget->center) < widget->radius);
211 }
212 
213 PG_FUNCTION_INFO_V1(reverse_name);
214 
215 Datum
216 reverse_name(PG_FUNCTION_ARGS)
217 {
218 	char	   *string = PG_GETARG_CSTRING(0);
219 	int			i;
220 	int			len;
221 	char	   *new_string;
222 
223 	new_string = palloc0(NAMEDATALEN);
224 	for (i = 0; i < NAMEDATALEN && string[i]; ++i)
225 		;
226 	if (i == NAMEDATALEN || !string[i])
227 		--i;
228 	len = i;
229 	for (; i >= 0; --i)
230 		new_string[len - i] = string[i];
231 	PG_RETURN_CSTRING(new_string);
232 }
233 
234 PG_FUNCTION_INFO_V1(trigger_return_old);
235 
236 Datum
237 trigger_return_old(PG_FUNCTION_ARGS)
238 {
239 	TriggerData *trigdata = (TriggerData *) fcinfo->context;
240 	HeapTuple	tuple;
241 
242 	if (!CALLED_AS_TRIGGER(fcinfo))
243 		elog(ERROR, "trigger_return_old: not fired by trigger manager");
244 
245 	tuple = trigdata->tg_trigtuple;
246 
247 	return PointerGetDatum(tuple);
248 }
249 
250 #define TTDUMMY_INFINITY	999999
251 
252 static SPIPlanPtr splan = NULL;
253 static bool ttoff = false;
254 
255 PG_FUNCTION_INFO_V1(ttdummy);
256 
257 Datum
258 ttdummy(PG_FUNCTION_ARGS)
259 {
260 	TriggerData *trigdata = (TriggerData *) fcinfo->context;
261 	Trigger    *trigger;		/* to get trigger name */
262 	char	  **args;			/* arguments */
263 	int			attnum[2];		/* fnumbers of start/stop columns */
264 	Datum		oldon,
265 				oldoff;
266 	Datum		newon,
267 				newoff;
268 	Datum	   *cvals;			/* column values */
269 	char	   *cnulls;			/* column nulls */
270 	char	   *relname;		/* triggered relation name */
271 	Relation	rel;			/* triggered relation */
272 	HeapTuple	trigtuple;
273 	HeapTuple	newtuple = NULL;
274 	HeapTuple	rettuple;
275 	TupleDesc	tupdesc;		/* tuple description */
276 	int			natts;			/* # of attributes */
277 	bool		isnull;			/* to know is some column NULL or not */
278 	int			ret;
279 	int			i;
280 
281 	if (!CALLED_AS_TRIGGER(fcinfo))
282 		elog(ERROR, "ttdummy: not fired by trigger manager");
283 	if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
284 		elog(ERROR, "ttdummy: must be fired for row");
285 	if (!TRIGGER_FIRED_BEFORE(trigdata->tg_event))
286 		elog(ERROR, "ttdummy: must be fired before event");
287 	if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
288 		elog(ERROR, "ttdummy: cannot process INSERT event");
289 	if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
290 		newtuple = trigdata->tg_newtuple;
291 
292 	trigtuple = trigdata->tg_trigtuple;
293 
294 	rel = trigdata->tg_relation;
295 	relname = SPI_getrelname(rel);
296 
297 	/* check if TT is OFF for this relation */
298 	if (ttoff)					/* OFF - nothing to do */
299 	{
300 		pfree(relname);
301 		return PointerGetDatum((newtuple != NULL) ? newtuple : trigtuple);
302 	}
303 
304 	trigger = trigdata->tg_trigger;
305 
306 	if (trigger->tgnargs != 2)
307 		elog(ERROR, "ttdummy (%s): invalid (!= 2) number of arguments %d",
308 			 relname, trigger->tgnargs);
309 
310 	args = trigger->tgargs;
311 	tupdesc = rel->rd_att;
312 	natts = tupdesc->natts;
313 
314 	for (i = 0; i < 2; i++)
315 	{
316 		attnum[i] = SPI_fnumber(tupdesc, args[i]);
317 		if (attnum[i] <= 0)
318 			elog(ERROR, "ttdummy (%s): there is no attribute %s",
319 				 relname, args[i]);
320 		if (SPI_gettypeid(tupdesc, attnum[i]) != INT4OID)
321 			elog(ERROR, "ttdummy (%s): attribute %s must be of integer type",
322 				 relname, args[i]);
323 	}
324 
325 	oldon = SPI_getbinval(trigtuple, tupdesc, attnum[0], &isnull);
326 	if (isnull)
327 		elog(ERROR, "ttdummy (%s): %s must be NOT NULL", relname, args[0]);
328 
329 	oldoff = SPI_getbinval(trigtuple, tupdesc, attnum[1], &isnull);
330 	if (isnull)
331 		elog(ERROR, "ttdummy (%s): %s must be NOT NULL", relname, args[1]);
332 
333 	if (newtuple != NULL)		/* UPDATE */
334 	{
335 		newon = SPI_getbinval(newtuple, tupdesc, attnum[0], &isnull);
336 		if (isnull)
337 			elog(ERROR, "ttdummy (%s): %s must be NOT NULL", relname, args[0]);
338 		newoff = SPI_getbinval(newtuple, tupdesc, attnum[1], &isnull);
339 		if (isnull)
340 			elog(ERROR, "ttdummy (%s): %s must be NOT NULL", relname, args[1]);
341 
342 		if (oldon != newon || oldoff != newoff)
343 			ereport(ERROR,
344 					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
345 					 errmsg("ttdummy (%s): you cannot change %s and/or %s columns (use set_ttdummy)",
346 							relname, args[0], args[1])));
347 
348 		if (newoff != TTDUMMY_INFINITY)
349 		{
350 			pfree(relname);		/* allocated in upper executor context */
351 			return PointerGetDatum(NULL);
352 		}
353 	}
354 	else if (oldoff != TTDUMMY_INFINITY)	/* DELETE */
355 	{
356 		pfree(relname);
357 		return PointerGetDatum(NULL);
358 	}
359 
360 	newoff = DirectFunctionCall1(nextval, CStringGetTextDatum("ttdummy_seq"));
361 	/* nextval now returns int64; coerce down to int32 */
362 	newoff = Int32GetDatum((int32) DatumGetInt64(newoff));
363 
364 	/* Connect to SPI manager */
365 	if ((ret = SPI_connect()) < 0)
366 		elog(ERROR, "ttdummy (%s): SPI_connect returned %d", relname, ret);
367 
368 	/* Fetch tuple values and nulls */
369 	cvals = (Datum *) palloc(natts * sizeof(Datum));
370 	cnulls = (char *) palloc(natts * sizeof(char));
371 	for (i = 0; i < natts; i++)
372 	{
373 		cvals[i] = SPI_getbinval((newtuple != NULL) ? newtuple : trigtuple,
374 								 tupdesc, i + 1, &isnull);
375 		cnulls[i] = (isnull) ? 'n' : ' ';
376 	}
377 
378 	/* change date column(s) */
379 	if (newtuple)				/* UPDATE */
380 	{
381 		cvals[attnum[0] - 1] = newoff;	/* start_date eq current date */
382 		cnulls[attnum[0] - 1] = ' ';
383 		cvals[attnum[1] - 1] = TTDUMMY_INFINITY;	/* stop_date eq INFINITY */
384 		cnulls[attnum[1] - 1] = ' ';
385 	}
386 	else
387 		/* DELETE */
388 	{
389 		cvals[attnum[1] - 1] = newoff;	/* stop_date eq current date */
390 		cnulls[attnum[1] - 1] = ' ';
391 	}
392 
393 	/* if there is no plan ... */
394 	if (splan == NULL)
395 	{
396 		SPIPlanPtr	pplan;
397 		Oid		   *ctypes;
398 		char	   *query;
399 
400 		/* allocate space in preparation */
401 		ctypes = (Oid *) palloc(natts * sizeof(Oid));
402 		query = (char *) palloc(100 + 16 * natts);
403 
404 		/*
405 		 * Construct query: INSERT INTO _relation_ VALUES ($1, ...)
406 		 */
407 		sprintf(query, "INSERT INTO %s VALUES (", relname);
408 		for (i = 1; i <= natts; i++)
409 		{
410 			sprintf(query + strlen(query), "$%d%s",
411 					i, (i < natts) ? ", " : ")");
412 			ctypes[i - 1] = SPI_gettypeid(tupdesc, i);
413 		}
414 
415 		/* Prepare plan for query */
416 		pplan = SPI_prepare(query, natts, ctypes);
417 		if (pplan == NULL)
418 			elog(ERROR, "ttdummy (%s): SPI_prepare returned %s", relname, SPI_result_code_string(SPI_result));
419 
420 		if (SPI_keepplan(pplan))
421 			elog(ERROR, "ttdummy (%s): SPI_keepplan failed", relname);
422 
423 		splan = pplan;
424 	}
425 
426 	ret = SPI_execp(splan, cvals, cnulls, 0);
427 
428 	if (ret < 0)
429 		elog(ERROR, "ttdummy (%s): SPI_execp returned %d", relname, ret);
430 
431 	/* Tuple to return to upper Executor ... */
432 	if (newtuple)				/* UPDATE */
433 		rettuple = SPI_modifytuple(rel, trigtuple, 1, &(attnum[1]), &newoff, NULL);
434 	else						/* DELETE */
435 		rettuple = trigtuple;
436 
437 	SPI_finish();				/* don't forget say Bye to SPI mgr */
438 
439 	pfree(relname);
440 
441 	return PointerGetDatum(rettuple);
442 }
443 
444 PG_FUNCTION_INFO_V1(set_ttdummy);
445 
446 Datum
447 set_ttdummy(PG_FUNCTION_ARGS)
448 {
449 	int32		on = PG_GETARG_INT32(0);
450 
451 	if (ttoff)					/* OFF currently */
452 	{
453 		if (on == 0)
454 			PG_RETURN_INT32(0);
455 
456 		/* turn ON */
457 		ttoff = false;
458 		PG_RETURN_INT32(0);
459 	}
460 
461 	/* ON currently */
462 	if (on != 0)
463 		PG_RETURN_INT32(1);
464 
465 	/* turn OFF */
466 	ttoff = true;
467 
468 	PG_RETURN_INT32(1);
469 }
470 
471 
472 /*
473  * Type int44 has no real-world use, but the regression tests use it
474  * (under the alias "city_budget").  It's a four-element vector of int4's.
475  */
476 
477 /*
478  *		int44in			- converts "num, num, ..." to internal form
479  *
480  *		Note: Fills any missing positions with zeroes.
481  */
482 PG_FUNCTION_INFO_V1(int44in);
483 
484 Datum
485 int44in(PG_FUNCTION_ARGS)
486 {
487 	char	   *input_string = PG_GETARG_CSTRING(0);
488 	int32	   *result = (int32 *) palloc(4 * sizeof(int32));
489 	int			i;
490 
491 	i = sscanf(input_string,
492 			   "%d, %d, %d, %d",
493 			   &result[0],
494 			   &result[1],
495 			   &result[2],
496 			   &result[3]);
497 	while (i < 4)
498 		result[i++] = 0;
499 
500 	PG_RETURN_POINTER(result);
501 }
502 
503 /*
504  *		int44out		- converts internal form to "num, num, ..."
505  */
506 PG_FUNCTION_INFO_V1(int44out);
507 
508 Datum
509 int44out(PG_FUNCTION_ARGS)
510 {
511 	int32	   *an_array = (int32 *) PG_GETARG_POINTER(0);
512 	char	   *result = (char *) palloc(16 * 4);
513 
514 	snprintf(result, 16 * 4, "%d,%d,%d,%d",
515 			 an_array[0],
516 			 an_array[1],
517 			 an_array[2],
518 			 an_array[3]);
519 
520 	PG_RETURN_CSTRING(result);
521 }
522 
523 PG_FUNCTION_INFO_V1(make_tuple_indirect);
524 Datum
525 make_tuple_indirect(PG_FUNCTION_ARGS)
526 {
527 	HeapTupleHeader rec = PG_GETARG_HEAPTUPLEHEADER(0);
528 	HeapTupleData tuple;
529 	int			ncolumns;
530 	Datum	   *values;
531 	bool	   *nulls;
532 
533 	Oid			tupType;
534 	int32		tupTypmod;
535 	TupleDesc	tupdesc;
536 
537 	HeapTuple	newtup;
538 
539 	int			i;
540 
541 	MemoryContext old_context;
542 
543 	/* Extract type info from the tuple itself */
544 	tupType = HeapTupleHeaderGetTypeId(rec);
545 	tupTypmod = HeapTupleHeaderGetTypMod(rec);
546 	tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
547 	ncolumns = tupdesc->natts;
548 
549 	/* Build a temporary HeapTuple control structure */
550 	tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
551 	ItemPointerSetInvalid(&(tuple.t_self));
552 	tuple.t_tableOid = InvalidOid;
553 	tuple.t_data = rec;
554 
555 	values = (Datum *) palloc(ncolumns * sizeof(Datum));
556 	nulls = (bool *) palloc(ncolumns * sizeof(bool));
557 
558 	heap_deform_tuple(&tuple, tupdesc, values, nulls);
559 
560 	old_context = MemoryContextSwitchTo(TopTransactionContext);
561 
562 	for (i = 0; i < ncolumns; i++)
563 	{
564 		struct varlena *attr;
565 		struct varlena *new_attr;
566 		struct varatt_indirect redirect_pointer;
567 
568 		/* only work on existing, not-null varlenas */
569 		if (TupleDescAttr(tupdesc, i)->attisdropped ||
570 			nulls[i] ||
571 			TupleDescAttr(tupdesc, i)->attlen != -1)
572 			continue;
573 
574 		attr = (struct varlena *) DatumGetPointer(values[i]);
575 
576 		/* don't recursively indirect */
577 		if (VARATT_IS_EXTERNAL_INDIRECT(attr))
578 			continue;
579 
580 		/* copy datum, so it still lives later */
581 		if (VARATT_IS_EXTERNAL_ONDISK(attr))
582 			attr = heap_tuple_fetch_attr(attr);
583 		else
584 		{
585 			struct varlena *oldattr = attr;
586 
587 			attr = palloc0(VARSIZE_ANY(oldattr));
588 			memcpy(attr, oldattr, VARSIZE_ANY(oldattr));
589 		}
590 
591 		/* build indirection Datum */
592 		new_attr = (struct varlena *) palloc0(INDIRECT_POINTER_SIZE);
593 		redirect_pointer.pointer = attr;
594 		SET_VARTAG_EXTERNAL(new_attr, VARTAG_INDIRECT);
595 		memcpy(VARDATA_EXTERNAL(new_attr), &redirect_pointer,
596 			   sizeof(redirect_pointer));
597 
598 		values[i] = PointerGetDatum(new_attr);
599 	}
600 
601 	newtup = heap_form_tuple(tupdesc, values, nulls);
602 	pfree(values);
603 	pfree(nulls);
604 	ReleaseTupleDesc(tupdesc);
605 
606 	MemoryContextSwitchTo(old_context);
607 
608 	/*
609 	 * We intentionally don't use PG_RETURN_HEAPTUPLEHEADER here, because that
610 	 * would cause the indirect toast pointers to be flattened out of the
611 	 * tuple immediately, rendering subsequent testing irrelevant.  So just
612 	 * return the HeapTupleHeader pointer as-is.  This violates the general
613 	 * rule that composite Datums shouldn't contain toast pointers, but so
614 	 * long as the regression test scripts don't insert the result of this
615 	 * function into a container type (record, array, etc) it should be OK.
616 	 */
617 	PG_RETURN_POINTER(newtup->t_data);
618 }
619 
620 PG_FUNCTION_INFO_V1(regress_putenv);
621 
622 Datum
623 regress_putenv(PG_FUNCTION_ARGS)
624 {
625 	MemoryContext oldcontext;
626 	char	   *envbuf;
627 
628 	if (!superuser())
629 		elog(ERROR, "must be superuser to change environment variables");
630 
631 	oldcontext = MemoryContextSwitchTo(TopMemoryContext);
632 	envbuf = text_to_cstring((text *) PG_GETARG_POINTER(0));
633 	MemoryContextSwitchTo(oldcontext);
634 
635 	if (putenv(envbuf) != 0)
636 		elog(ERROR, "could not set environment variable: %m");
637 
638 	PG_RETURN_VOID();
639 }
640 
641 /* Sleep until no process has a given PID. */
642 PG_FUNCTION_INFO_V1(wait_pid);
643 
644 Datum
645 wait_pid(PG_FUNCTION_ARGS)
646 {
647 	int			pid = PG_GETARG_INT32(0);
648 
649 	if (!superuser())
650 		elog(ERROR, "must be superuser to check PID liveness");
651 
652 	while (kill(pid, 0) == 0)
653 	{
654 		CHECK_FOR_INTERRUPTS();
655 		pg_usleep(50000);
656 	}
657 
658 	if (errno != ESRCH)
659 		elog(ERROR, "could not check PID %d liveness: %m", pid);
660 
661 	PG_RETURN_VOID();
662 }
663 
664 static void
665 test_atomic_flag(void)
666 {
667 	pg_atomic_flag flag;
668 
669 	pg_atomic_init_flag(&flag);
670 	EXPECT_TRUE(pg_atomic_unlocked_test_flag(&flag));
671 	EXPECT_TRUE(pg_atomic_test_set_flag(&flag));
672 	EXPECT_TRUE(!pg_atomic_unlocked_test_flag(&flag));
673 	EXPECT_TRUE(!pg_atomic_test_set_flag(&flag));
674 	pg_atomic_clear_flag(&flag);
675 	EXPECT_TRUE(pg_atomic_unlocked_test_flag(&flag));
676 	EXPECT_TRUE(pg_atomic_test_set_flag(&flag));
677 	pg_atomic_clear_flag(&flag);
678 }
679 
680 static void
681 test_atomic_uint32(void)
682 {
683 	pg_atomic_uint32 var;
684 	uint32		expected;
685 	int			i;
686 
687 	pg_atomic_init_u32(&var, 0);
688 	EXPECT_EQ_U32(pg_atomic_read_u32(&var), 0);
689 	pg_atomic_write_u32(&var, 3);
690 	EXPECT_EQ_U32(pg_atomic_read_u32(&var), 3);
691 	EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&var, pg_atomic_read_u32(&var) - 2),
692 				  3);
693 	EXPECT_EQ_U32(pg_atomic_fetch_sub_u32(&var, 1), 4);
694 	EXPECT_EQ_U32(pg_atomic_sub_fetch_u32(&var, 3), 0);
695 	EXPECT_EQ_U32(pg_atomic_add_fetch_u32(&var, 10), 10);
696 	EXPECT_EQ_U32(pg_atomic_exchange_u32(&var, 5), 10);
697 	EXPECT_EQ_U32(pg_atomic_exchange_u32(&var, 0), 5);
698 
699 	/* test around numerical limits */
700 	EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&var, INT_MAX), 0);
701 	EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&var, INT_MAX), INT_MAX);
702 	pg_atomic_fetch_add_u32(&var, 2);	/* wrap to 0 */
703 	EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&var, PG_INT16_MAX), 0);
704 	EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&var, PG_INT16_MAX + 1),
705 				  PG_INT16_MAX);
706 	EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&var, PG_INT16_MIN),
707 				  2 * PG_INT16_MAX + 1);
708 	EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&var, PG_INT16_MIN - 1),
709 				  PG_INT16_MAX);
710 	pg_atomic_fetch_add_u32(&var, 1);	/* top up to UINT_MAX */
711 	EXPECT_EQ_U32(pg_atomic_read_u32(&var), UINT_MAX);
712 	EXPECT_EQ_U32(pg_atomic_fetch_sub_u32(&var, INT_MAX), UINT_MAX);
713 	EXPECT_EQ_U32(pg_atomic_read_u32(&var), (uint32) INT_MAX + 1);
714 	EXPECT_EQ_U32(pg_atomic_sub_fetch_u32(&var, INT_MAX), 1);
715 	pg_atomic_sub_fetch_u32(&var, 1);
716 
717 	/* fail exchange because of old expected */
718 	expected = 10;
719 	EXPECT_TRUE(!pg_atomic_compare_exchange_u32(&var, &expected, 1));
720 
721 	/* CAS is allowed to fail due to interrupts, try a couple of times */
722 	for (i = 0; i < 1000; i++)
723 	{
724 		expected = 0;
725 		if (!pg_atomic_compare_exchange_u32(&var, &expected, 1))
726 			break;
727 	}
728 	if (i == 1000)
729 		elog(ERROR, "atomic_compare_exchange_u32() never succeeded");
730 	EXPECT_EQ_U32(pg_atomic_read_u32(&var), 1);
731 	pg_atomic_write_u32(&var, 0);
732 
733 	/* try setting flagbits */
734 	EXPECT_TRUE(!(pg_atomic_fetch_or_u32(&var, 1) & 1));
735 	EXPECT_TRUE(pg_atomic_fetch_or_u32(&var, 2) & 1);
736 	EXPECT_EQ_U32(pg_atomic_read_u32(&var), 3);
737 	/* try clearing flagbits */
738 	EXPECT_EQ_U32(pg_atomic_fetch_and_u32(&var, ~2) & 3, 3);
739 	EXPECT_EQ_U32(pg_atomic_fetch_and_u32(&var, ~1), 1);
740 	/* no bits set anymore */
741 	EXPECT_EQ_U32(pg_atomic_fetch_and_u32(&var, ~0), 0);
742 }
743 
744 static void
745 test_atomic_uint64(void)
746 {
747 	pg_atomic_uint64 var;
748 	uint64		expected;
749 	int			i;
750 
751 	pg_atomic_init_u64(&var, 0);
752 	EXPECT_EQ_U64(pg_atomic_read_u64(&var), 0);
753 	pg_atomic_write_u64(&var, 3);
754 	EXPECT_EQ_U64(pg_atomic_read_u64(&var), 3);
755 	EXPECT_EQ_U64(pg_atomic_fetch_add_u64(&var, pg_atomic_read_u64(&var) - 2),
756 				  3);
757 	EXPECT_EQ_U64(pg_atomic_fetch_sub_u64(&var, 1), 4);
758 	EXPECT_EQ_U64(pg_atomic_sub_fetch_u64(&var, 3), 0);
759 	EXPECT_EQ_U64(pg_atomic_add_fetch_u64(&var, 10), 10);
760 	EXPECT_EQ_U64(pg_atomic_exchange_u64(&var, 5), 10);
761 	EXPECT_EQ_U64(pg_atomic_exchange_u64(&var, 0), 5);
762 
763 	/* fail exchange because of old expected */
764 	expected = 10;
765 	EXPECT_TRUE(!pg_atomic_compare_exchange_u64(&var, &expected, 1));
766 
767 	/* CAS is allowed to fail due to interrupts, try a couple of times */
768 	for (i = 0; i < 100; i++)
769 	{
770 		expected = 0;
771 		if (!pg_atomic_compare_exchange_u64(&var, &expected, 1))
772 			break;
773 	}
774 	if (i == 100)
775 		elog(ERROR, "atomic_compare_exchange_u64() never succeeded");
776 	EXPECT_EQ_U64(pg_atomic_read_u64(&var), 1);
777 
778 	pg_atomic_write_u64(&var, 0);
779 
780 	/* try setting flagbits */
781 	EXPECT_TRUE(!(pg_atomic_fetch_or_u64(&var, 1) & 1));
782 	EXPECT_TRUE(pg_atomic_fetch_or_u64(&var, 2) & 1);
783 	EXPECT_EQ_U64(pg_atomic_read_u64(&var), 3);
784 	/* try clearing flagbits */
785 	EXPECT_EQ_U64((pg_atomic_fetch_and_u64(&var, ~2) & 3), 3);
786 	EXPECT_EQ_U64(pg_atomic_fetch_and_u64(&var, ~1), 1);
787 	/* no bits set anymore */
788 	EXPECT_EQ_U64(pg_atomic_fetch_and_u64(&var, ~0), 0);
789 }
790 
791 /*
792  * Perform, fairly minimal, testing of the spinlock implementation.
793  *
794  * It's likely worth expanding these to actually test concurrency etc, but
795  * having some regularly run tests is better than none.
796  */
797 static void
798 test_spinlock(void)
799 {
800 	/*
801 	 * Basic tests for spinlocks, as well as the underlying operations.
802 	 *
803 	 * We embed the spinlock in a struct with other members to test that the
804 	 * spinlock operations don't perform too wide writes.
805 	 */
806 	{
807 		struct test_lock_struct
808 		{
809 			char		data_before[4];
810 			slock_t		lock;
811 			char		data_after[4];
812 		} struct_w_lock;
813 
814 		memcpy(struct_w_lock.data_before, "abcd", 4);
815 		memcpy(struct_w_lock.data_after, "ef12", 4);
816 
817 		/* test basic operations via the SpinLock* API */
818 		SpinLockInit(&struct_w_lock.lock);
819 		SpinLockAcquire(&struct_w_lock.lock);
820 		SpinLockRelease(&struct_w_lock.lock);
821 
822 		/* test basic operations via underlying S_* API */
823 		S_INIT_LOCK(&struct_w_lock.lock);
824 		S_LOCK(&struct_w_lock.lock);
825 		S_UNLOCK(&struct_w_lock.lock);
826 
827 		/* and that "contended" acquisition works */
828 		s_lock(&struct_w_lock.lock, "testfile", 17, "testfunc");
829 		S_UNLOCK(&struct_w_lock.lock);
830 
831 		/*
832 		 * Check, using TAS directly, that a single spin cycle doesn't block
833 		 * when acquiring an already acquired lock.
834 		 */
835 #ifdef TAS
836 		S_LOCK(&struct_w_lock.lock);
837 
838 		if (!TAS(&struct_w_lock.lock))
839 			elog(ERROR, "acquired already held spinlock");
840 
841 #ifdef TAS_SPIN
842 		if (!TAS_SPIN(&struct_w_lock.lock))
843 			elog(ERROR, "acquired already held spinlock");
844 #endif							/* defined(TAS_SPIN) */
845 
846 		S_UNLOCK(&struct_w_lock.lock);
847 #endif							/* defined(TAS) */
848 
849 		/*
850 		 * Verify that after all of this the non-lock contents are still
851 		 * correct.
852 		 */
853 		if (memcmp(struct_w_lock.data_before, "abcd", 4) != 0)
854 			elog(ERROR, "padding before spinlock modified");
855 		if (memcmp(struct_w_lock.data_after, "ef12", 4) != 0)
856 			elog(ERROR, "padding after spinlock modified");
857 	}
858 
859 	/*
860 	 * Ensure that allocating more than INT32_MAX emulated spinlocks
861 	 * works. That's interesting because the spinlock emulation uses a 32bit
862 	 * integer to map spinlocks onto semaphores. There've been bugs...
863 	 */
864 #ifndef HAVE_SPINLOCKS
865 	{
866 		uint32	i;
867 
868 		/*
869 		 * Initialize enough spinlocks to advance counter close to
870 		 * wraparound. It's too expensive to perform acquire/release for each,
871 		 * as those may be syscalls when the spinlock emulation is used (and
872 		 * even just atomic TAS would be expensive).
873 		 */
874 		for (i = 0; i < INT32_MAX - 100000; i++)
875 		{
876 			slock_t lock;
877 
878 			SpinLockInit(&lock);
879 		}
880 
881 		for (i = 0; i < 200000; i++)
882 		{
883 			slock_t lock;
884 
885 			SpinLockInit(&lock);
886 
887 			SpinLockAcquire(&lock);
888 			SpinLockRelease(&lock);
889 			SpinLockAcquire(&lock);
890 			SpinLockRelease(&lock);
891 		}
892 	}
893 #endif
894 }
895 
896 /*
897  * Verify that performing atomic ops inside a spinlock isn't a
898  * problem. Realistically that's only going to be a problem when both
899  * --disable-spinlocks and --disable-atomics are used, but it's cheap enough
900  * to just always test.
901  *
902  * The test works by initializing enough atomics that we'd conflict if there
903  * were an overlap between a spinlock and an atomic by holding a spinlock
904  * while manipulating more than NUM_SPINLOCK_SEMAPHORES atomics.
905  *
906  * NUM_TEST_ATOMICS doesn't really need to be more than
907  * NUM_SPINLOCK_SEMAPHORES, but it seems better to test a bit more
908  * extensively.
909  */
910 static void
911 test_atomic_spin_nest(void)
912 {
913 	slock_t lock;
914 #define NUM_TEST_ATOMICS (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES + 27)
915 	pg_atomic_uint32 atomics32[NUM_TEST_ATOMICS];
916 	pg_atomic_uint64 atomics64[NUM_TEST_ATOMICS];
917 	int		i;
918 
919 	SpinLockInit(&lock);
920 
921 	for (i = 0; i < NUM_TEST_ATOMICS; i++)
922 	{
923 		pg_atomic_init_u32(&atomics32[i], 0);
924 		pg_atomic_init_u64(&atomics64[i], 0);
925 	}
926 
927 	/* just so it's not all zeroes */
928 	for (i = 0; i < NUM_TEST_ATOMICS; i++)
929 	{
930 		EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&atomics32[i], i), 0);
931 		EXPECT_EQ_U64(pg_atomic_fetch_add_u64(&atomics64[i], i), 0);
932 	}
933 
934 	/* test whether we can do atomic op with lock held */
935 	SpinLockAcquire(&lock);
936 	for (i = 0; i < NUM_TEST_ATOMICS; i++)
937 	{
938 		EXPECT_EQ_U32(pg_atomic_fetch_sub_u32(&atomics32[i], i), i);
939 		EXPECT_EQ_U32(pg_atomic_read_u32(&atomics32[i]), 0);
940 		EXPECT_EQ_U64(pg_atomic_fetch_sub_u64(&atomics64[i], i), i);
941 		EXPECT_EQ_U64(pg_atomic_read_u64(&atomics64[i]), 0);
942 	}
943 	SpinLockRelease(&lock);
944 }
945 #undef NUM_TEST_ATOMICS
946 
947 PG_FUNCTION_INFO_V1(test_atomic_ops);
948 Datum
949 test_atomic_ops(PG_FUNCTION_ARGS)
950 {
951 	test_atomic_flag();
952 
953 	test_atomic_uint32();
954 
955 	test_atomic_uint64();
956 
957 	/*
958 	 * Arguably this shouldn't be tested as part of this function, but it's
959 	 * closely enough related that that seems ok for now.
960 	 */
961 	test_spinlock();
962 
963 	test_atomic_spin_nest();
964 
965 	PG_RETURN_BOOL(true);
966 }
967 
968 PG_FUNCTION_INFO_V1(test_fdw_handler);
969 Datum
970 test_fdw_handler(PG_FUNCTION_ARGS)
971 {
972 	elog(ERROR, "test_fdw_handler is not implemented");
973 	PG_RETURN_NULL();
974 }
975