1 /*
2  *	pg_upgrade.c
3  *
4  *	main source file
5  *
6  *	Copyright (c) 2010-2020, PostgreSQL Global Development Group
7  *	src/bin/pg_upgrade/pg_upgrade.c
8  */
9 
10 /*
11  *	To simplify the upgrade process, we force certain system values to be
12  *	identical between old and new clusters:
13  *
14  *	We control all assignments of pg_class.oid (and relfilenode) so toast
15  *	oids are the same between old and new clusters.  This is important
16  *	because toast oids are stored as toast pointers in user tables.
17  *
18  *	While pg_class.oid and pg_class.relfilenode are initially the same
19  *	in a cluster, they can diverge due to CLUSTER, REINDEX, or VACUUM
20  *	FULL.  In the new cluster, pg_class.oid and pg_class.relfilenode will
21  *	be the same and will match the old pg_class.oid value.  Because of
22  *	this, old/new pg_class.relfilenode values will not match if CLUSTER,
23  *	REINDEX, or VACUUM FULL have been performed in the old cluster.
24  *
25  *	We control all assignments of pg_type.oid because these oids are stored
26  *	in user composite type values.
27  *
28  *	We control all assignments of pg_enum.oid because these oids are stored
29  *	in user tables as enum values.
30  *
31  *	We control all assignments of pg_authid.oid for historical reasons (the
32  *	oids used to be stored in pg_largeobject_metadata, which is now copied via
33  *	SQL commands), that might change at some point in the future.
34  */
35 
36 
37 
38 #include "postgres_fe.h"
39 
40 #ifdef HAVE_LANGINFO_H
41 #include <langinfo.h>
42 #endif
43 
44 #include "catalog/pg_class_d.h"
45 #include "common/file_perm.h"
46 #include "common/logging.h"
47 #include "common/restricted_token.h"
48 #include "fe_utils/string_utils.h"
49 #include "pg_upgrade.h"
50 
51 static void prepare_new_cluster(void);
52 static void prepare_new_globals(void);
53 static void create_new_objects(void);
54 static void copy_xact_xlog_xid(void);
55 static void set_frozenxids(bool minmxid_only);
56 static void setup(char *argv0, bool *live_check);
57 static void cleanup(void);
58 
59 ClusterInfo old_cluster,
60 			new_cluster;
61 OSInfo		os_info;
62 
63 char	   *output_files[] = {
64 	SERVER_LOG_FILE,
65 #ifdef WIN32
66 	/* unique file for pg_ctl start */
67 	SERVER_START_LOG_FILE,
68 #endif
69 	UTILITY_LOG_FILE,
70 	INTERNAL_LOG_FILE,
71 	NULL
72 };
73 
74 
75 int
main(int argc,char ** argv)76 main(int argc, char **argv)
77 {
78 	char	   *analyze_script_file_name = NULL;
79 	char	   *deletion_script_file_name = NULL;
80 	bool		live_check = false;
81 
82 	pg_logging_init(argv[0]);
83 	set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_upgrade"));
84 
85 	/* Set default restrictive mask until new cluster permissions are read */
86 	umask(PG_MODE_MASK_OWNER);
87 
88 	parseCommandLine(argc, argv);
89 
90 	get_restricted_token();
91 
92 	adjust_data_dir(&old_cluster);
93 	adjust_data_dir(&new_cluster);
94 
95 	setup(argv[0], &live_check);
96 
97 	output_check_banner(live_check);
98 
99 	check_cluster_versions();
100 
101 	get_sock_dir(&old_cluster, live_check);
102 	get_sock_dir(&new_cluster, false);
103 
104 	check_cluster_compatibility(live_check);
105 
106 	/* Set mask based on PGDATA permissions */
107 	if (!GetDataDirectoryCreatePerm(new_cluster.pgdata))
108 		pg_fatal("could not read permissions of directory \"%s\": %s\n",
109 				 new_cluster.pgdata, strerror(errno));
110 
111 	umask(pg_mode_mask);
112 
113 	check_and_dump_old_cluster(live_check);
114 
115 
116 	/* -- NEW -- */
117 	start_postmaster(&new_cluster, true);
118 
119 	check_new_cluster();
120 	report_clusters_compatible();
121 
122 	pg_log(PG_REPORT,
123 		   "\n"
124 		   "Performing Upgrade\n"
125 		   "------------------\n");
126 
127 	prepare_new_cluster();
128 
129 	stop_postmaster(false);
130 
131 	/*
132 	 * Destructive Changes to New Cluster
133 	 */
134 
135 	copy_xact_xlog_xid();
136 
137 	/* New now using xids of the old system */
138 
139 	/* -- NEW -- */
140 	start_postmaster(&new_cluster, true);
141 
142 	prepare_new_globals();
143 
144 	create_new_objects();
145 
146 	stop_postmaster(false);
147 
148 	/*
149 	 * Most failures happen in create_new_objects(), which has completed at
150 	 * this point.  We do this here because it is just before linking, which
151 	 * will link the old and new cluster data files, preventing the old
152 	 * cluster from being safely started once the new cluster is started.
153 	 */
154 	if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
155 		disable_old_cluster();
156 
157 	transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr,
158 								 old_cluster.pgdata, new_cluster.pgdata);
159 
160 	/*
161 	 * Assuming OIDs are only used in system tables, there is no need to
162 	 * restore the OID counter because we have not transferred any OIDs from
163 	 * the old system, but we do it anyway just in case.  We do it late here
164 	 * because there is no need to have the schema load use new oids.
165 	 */
166 	prep_status("Setting next OID for new cluster");
167 	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
168 			  "\"%s/pg_resetwal\" -o %u \"%s\"",
169 			  new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid,
170 			  new_cluster.pgdata);
171 	check_ok();
172 
173 	prep_status("Sync data directory to disk");
174 	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
175 			  "\"%s/initdb\" --sync-only \"%s\"", new_cluster.bindir,
176 			  new_cluster.pgdata);
177 	check_ok();
178 
179 	create_script_for_cluster_analyze(&analyze_script_file_name);
180 	create_script_for_old_cluster_deletion(&deletion_script_file_name);
181 
182 	issue_warnings_and_set_wal_level();
183 
184 	pg_log(PG_REPORT,
185 		   "\n"
186 		   "Upgrade Complete\n"
187 		   "----------------\n");
188 
189 	output_completion_banner(analyze_script_file_name,
190 							 deletion_script_file_name);
191 
192 	pg_free(analyze_script_file_name);
193 	pg_free(deletion_script_file_name);
194 
195 	cleanup();
196 
197 	return 0;
198 }
199 
200 
201 static void
setup(char * argv0,bool * live_check)202 setup(char *argv0, bool *live_check)
203 {
204 	/*
205 	 * make sure the user has a clean environment, otherwise, we may confuse
206 	 * libpq when we connect to one (or both) of the servers.
207 	 */
208 	check_pghost_envvar();
209 
210 	/*
211 	 * In case the user hasn't specified the directory for the new binaries
212 	 * with -B, default to using the path of the currently executed pg_upgrade
213 	 * binary.
214 	 */
215 	if (!new_cluster.bindir)
216 	{
217 		char		exec_path[MAXPGPATH];
218 
219 		if (find_my_exec(argv0, exec_path) < 0)
220 			pg_fatal("%s: could not find own program executable\n", argv0);
221 		/* Trim off program name and keep just path */
222 		*last_dir_separator(exec_path) = '\0';
223 		canonicalize_path(exec_path);
224 		new_cluster.bindir = pg_strdup(exec_path);
225 	}
226 
227 	verify_directories();
228 
229 	/* no postmasters should be running, except for a live check */
230 	if (pid_lock_file_exists(old_cluster.pgdata))
231 	{
232 		/*
233 		 * If we have a postmaster.pid file, try to start the server.  If it
234 		 * starts, the pid file was stale, so stop the server.  If it doesn't
235 		 * start, assume the server is running.  If the pid file is left over
236 		 * from a server crash, this also allows any committed transactions
237 		 * stored in the WAL to be replayed so they are not lost, because WAL
238 		 * files are not transferred from old to new servers.  We later check
239 		 * for a clean shutdown.
240 		 */
241 		if (start_postmaster(&old_cluster, false))
242 			stop_postmaster(false);
243 		else
244 		{
245 			if (!user_opts.check)
246 				pg_fatal("There seems to be a postmaster servicing the old cluster.\n"
247 						 "Please shutdown that postmaster and try again.\n");
248 			else
249 				*live_check = true;
250 		}
251 	}
252 
253 	/* same goes for the new postmaster */
254 	if (pid_lock_file_exists(new_cluster.pgdata))
255 	{
256 		if (start_postmaster(&new_cluster, false))
257 			stop_postmaster(false);
258 		else
259 			pg_fatal("There seems to be a postmaster servicing the new cluster.\n"
260 					 "Please shutdown that postmaster and try again.\n");
261 	}
262 }
263 
264 
265 static void
prepare_new_cluster(void)266 prepare_new_cluster(void)
267 {
268 	/*
269 	 * It would make more sense to freeze after loading the schema, but that
270 	 * would cause us to lose the frozenxids restored by the load. We use
271 	 * --analyze so autovacuum doesn't update statistics later
272 	 */
273 	prep_status("Analyzing all rows in the new cluster");
274 	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
275 			  "\"%s/vacuumdb\" %s --all --analyze %s",
276 			  new_cluster.bindir, cluster_conn_opts(&new_cluster),
277 			  log_opts.verbose ? "--verbose" : "");
278 	check_ok();
279 
280 	/*
281 	 * We do freeze after analyze so pg_statistic is also frozen. template0 is
282 	 * not frozen here, but data rows were frozen by initdb, and we set its
283 	 * datfrozenxid, relfrozenxids, and relminmxid later to match the new xid
284 	 * counter later.
285 	 */
286 	prep_status("Freezing all rows in the new cluster");
287 	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
288 			  "\"%s/vacuumdb\" %s --all --freeze %s",
289 			  new_cluster.bindir, cluster_conn_opts(&new_cluster),
290 			  log_opts.verbose ? "--verbose" : "");
291 	check_ok();
292 }
293 
294 
295 static void
prepare_new_globals(void)296 prepare_new_globals(void)
297 {
298 	/*
299 	 * Before we restore anything, set frozenxids of initdb-created tables.
300 	 */
301 	set_frozenxids(false);
302 
303 	/*
304 	 * Now restore global objects (roles and tablespaces).
305 	 */
306 	prep_status("Restoring global objects in the new cluster");
307 
308 	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
309 			  "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
310 			  new_cluster.bindir, cluster_conn_opts(&new_cluster),
311 			  GLOBALS_DUMP_FILE);
312 	check_ok();
313 }
314 
315 
316 static void
create_new_objects(void)317 create_new_objects(void)
318 {
319 	int			dbnum;
320 
321 	prep_status("Restoring database schemas in the new cluster\n");
322 
323 	/*
324 	 * We cannot process the template1 database concurrently with others,
325 	 * because when it's transiently dropped, connection attempts would fail.
326 	 * So handle it in a separate non-parallelized pass.
327 	 */
328 	for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
329 	{
330 		char		sql_file_name[MAXPGPATH],
331 					log_file_name[MAXPGPATH];
332 		DbInfo	   *old_db = &old_cluster.dbarr.dbs[dbnum];
333 		const char *create_opts;
334 
335 		/* Process only template1 in this pass */
336 		if (strcmp(old_db->db_name, "template1") != 0)
337 			continue;
338 
339 		pg_log(PG_STATUS, "%s", old_db->db_name);
340 		snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
341 		snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
342 
343 		/*
344 		 * template1 database will already exist in the target installation,
345 		 * so tell pg_restore to drop and recreate it; otherwise we would fail
346 		 * to propagate its database-level properties.
347 		 */
348 		create_opts = "--clean --create";
349 
350 		exec_prog(log_file_name,
351 				  NULL,
352 				  true,
353 				  true,
354 				  "\"%s/pg_restore\" %s %s --exit-on-error --verbose "
355 				  "--dbname postgres \"%s\"",
356 				  new_cluster.bindir,
357 				  cluster_conn_opts(&new_cluster),
358 				  create_opts,
359 				  sql_file_name);
360 
361 		break;					/* done once we've processed template1 */
362 	}
363 
364 	for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
365 	{
366 		char		sql_file_name[MAXPGPATH],
367 					log_file_name[MAXPGPATH];
368 		DbInfo	   *old_db = &old_cluster.dbarr.dbs[dbnum];
369 		const char *create_opts;
370 
371 		/* Skip template1 in this pass */
372 		if (strcmp(old_db->db_name, "template1") == 0)
373 			continue;
374 
375 		pg_log(PG_STATUS, "%s", old_db->db_name);
376 		snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
377 		snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
378 
379 		/*
380 		 * postgres database will already exist in the target installation, so
381 		 * tell pg_restore to drop and recreate it; otherwise we would fail to
382 		 * propagate its database-level properties.
383 		 */
384 		if (strcmp(old_db->db_name, "postgres") == 0)
385 			create_opts = "--clean --create";
386 		else
387 			create_opts = "--create";
388 
389 		parallel_exec_prog(log_file_name,
390 						   NULL,
391 						   "\"%s/pg_restore\" %s %s --exit-on-error --verbose "
392 						   "--dbname template1 \"%s\"",
393 						   new_cluster.bindir,
394 						   cluster_conn_opts(&new_cluster),
395 						   create_opts,
396 						   sql_file_name);
397 	}
398 
399 	/* reap all children */
400 	while (reap_child(true) == true)
401 		;
402 
403 	end_progress_output();
404 	check_ok();
405 
406 	/*
407 	 * We don't have minmxids for databases or relations in pre-9.3 clusters,
408 	 * so set those after we have restored the schema.
409 	 */
410 	if (GET_MAJOR_VERSION(old_cluster.major_version) <= 902)
411 		set_frozenxids(true);
412 
413 	/* update new_cluster info now that we have objects in the databases */
414 	get_db_and_rel_infos(&new_cluster);
415 }
416 
417 /*
418  * Delete the given subdirectory contents from the new cluster
419  */
420 static void
remove_new_subdir(const char * subdir,bool rmtopdir)421 remove_new_subdir(const char *subdir, bool rmtopdir)
422 {
423 	char		new_path[MAXPGPATH];
424 
425 	prep_status("Deleting files from new %s", subdir);
426 
427 	snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir);
428 	if (!rmtree(new_path, rmtopdir))
429 		pg_fatal("could not delete directory \"%s\"\n", new_path);
430 
431 	check_ok();
432 }
433 
434 /*
435  * Copy the files from the old cluster into it
436  */
437 static void
copy_subdir_files(const char * old_subdir,const char * new_subdir)438 copy_subdir_files(const char *old_subdir, const char *new_subdir)
439 {
440 	char		old_path[MAXPGPATH];
441 	char		new_path[MAXPGPATH];
442 
443 	remove_new_subdir(new_subdir, true);
444 
445 	snprintf(old_path, sizeof(old_path), "%s/%s", old_cluster.pgdata, old_subdir);
446 	snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, new_subdir);
447 
448 	prep_status("Copying old %s to new server", old_subdir);
449 
450 	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
451 #ifndef WIN32
452 			  "cp -Rf \"%s\" \"%s\"",
453 #else
454 	/* flags: everything, no confirm, quiet, overwrite read-only */
455 			  "xcopy /e /y /q /r \"%s\" \"%s\\\"",
456 #endif
457 			  old_path, new_path);
458 
459 	check_ok();
460 }
461 
462 static void
copy_xact_xlog_xid(void)463 copy_xact_xlog_xid(void)
464 {
465 	/*
466 	 * Copy old commit logs to new data dir. pg_clog has been renamed to
467 	 * pg_xact in post-10 clusters.
468 	 */
469 	copy_subdir_files(GET_MAJOR_VERSION(old_cluster.major_version) <= 906 ?
470 					  "pg_clog" : "pg_xact",
471 					  GET_MAJOR_VERSION(new_cluster.major_version) <= 906 ?
472 					  "pg_clog" : "pg_xact");
473 
474 	prep_status("Setting oldest XID for new cluster");
475 	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
476 			  "\"%s/pg_resetwal\" -f -u %u \"%s\"",
477 			  new_cluster.bindir, old_cluster.controldata.chkpnt_oldstxid,
478 			  new_cluster.pgdata);
479 	check_ok();
480 
481 	/* set the next transaction id and epoch of the new cluster */
482 	prep_status("Setting next transaction ID and epoch for new cluster");
483 	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
484 			  "\"%s/pg_resetwal\" -f -x %u \"%s\"",
485 			  new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid,
486 			  new_cluster.pgdata);
487 	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
488 			  "\"%s/pg_resetwal\" -f -e %u \"%s\"",
489 			  new_cluster.bindir, old_cluster.controldata.chkpnt_nxtepoch,
490 			  new_cluster.pgdata);
491 	/* must reset commit timestamp limits also */
492 	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
493 			  "\"%s/pg_resetwal\" -f -c %u,%u \"%s\"",
494 			  new_cluster.bindir,
495 			  old_cluster.controldata.chkpnt_nxtxid,
496 			  old_cluster.controldata.chkpnt_nxtxid,
497 			  new_cluster.pgdata);
498 	check_ok();
499 
500 	/*
501 	 * If the old server is before the MULTIXACT_FORMATCHANGE_CAT_VER change
502 	 * (see pg_upgrade.h) and the new server is after, then we don't copy
503 	 * pg_multixact files, but we need to reset pg_control so that the new
504 	 * server doesn't attempt to read multis older than the cutoff value.
505 	 */
506 	if (old_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER &&
507 		new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER)
508 	{
509 		copy_subdir_files("pg_multixact/offsets", "pg_multixact/offsets");
510 		copy_subdir_files("pg_multixact/members", "pg_multixact/members");
511 
512 		prep_status("Setting next multixact ID and offset for new cluster");
513 
514 		/*
515 		 * we preserve all files and contents, so we must preserve both "next"
516 		 * counters here and the oldest multi present on system.
517 		 */
518 		exec_prog(UTILITY_LOG_FILE, NULL, true, true,
519 				  "\"%s/pg_resetwal\" -O %u -m %u,%u \"%s\"",
520 				  new_cluster.bindir,
521 				  old_cluster.controldata.chkpnt_nxtmxoff,
522 				  old_cluster.controldata.chkpnt_nxtmulti,
523 				  old_cluster.controldata.chkpnt_oldstMulti,
524 				  new_cluster.pgdata);
525 		check_ok();
526 	}
527 	else if (new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER)
528 	{
529 		/*
530 		 * Remove offsets/0000 file created by initdb that no longer matches
531 		 * the new multi-xid value.  "members" starts at zero so no need to
532 		 * remove it.
533 		 */
534 		remove_new_subdir("pg_multixact/offsets", false);
535 
536 		prep_status("Setting oldest multixact ID in new cluster");
537 
538 		/*
539 		 * We don't preserve files in this case, but it's important that the
540 		 * oldest multi is set to the latest value used by the old system, so
541 		 * that multixact.c returns the empty set for multis that might be
542 		 * present on disk.  We set next multi to the value following that; it
543 		 * might end up wrapped around (i.e. 0) if the old cluster had
544 		 * next=MaxMultiXactId, but multixact.c can cope with that just fine.
545 		 */
546 		exec_prog(UTILITY_LOG_FILE, NULL, true, true,
547 				  "\"%s/pg_resetwal\" -m %u,%u \"%s\"",
548 				  new_cluster.bindir,
549 				  old_cluster.controldata.chkpnt_nxtmulti + 1,
550 				  old_cluster.controldata.chkpnt_nxtmulti,
551 				  new_cluster.pgdata);
552 		check_ok();
553 	}
554 
555 	/* now reset the wal archives in the new cluster */
556 	prep_status("Resetting WAL archives");
557 	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
558 	/* use timeline 1 to match controldata and no WAL history file */
559 			  "\"%s/pg_resetwal\" -l 00000001%s \"%s\"", new_cluster.bindir,
560 			  old_cluster.controldata.nextxlogfile + 8,
561 			  new_cluster.pgdata);
562 	check_ok();
563 }
564 
565 
566 /*
567  *	set_frozenxids()
568  *
569  * This is called on the new cluster before we restore anything, with
570  * minmxid_only = false.  Its purpose is to ensure that all initdb-created
571  * vacuumable tables have relfrozenxid/relminmxid matching the old cluster's
572  * xid/mxid counters.  We also initialize the datfrozenxid/datminmxid of the
573  * built-in databases to match.
574  *
575  * As we create user tables later, their relfrozenxid/relminmxid fields will
576  * be restored properly by the binary-upgrade restore script.  Likewise for
577  * user-database datfrozenxid/datminmxid.  However, if we're upgrading from a
578  * pre-9.3 database, which does not store per-table or per-DB minmxid, then
579  * the relminmxid/datminmxid values filled in by the restore script will just
580  * be zeroes.
581  *
582  * Hence, with a pre-9.3 source database, a second call occurs after
583  * everything is restored, with minmxid_only = true.  This pass will
584  * initialize all tables and databases, both those made by initdb and user
585  * objects, with the desired minmxid value.  frozenxid values are left alone.
586  */
587 static void
set_frozenxids(bool minmxid_only)588 set_frozenxids(bool minmxid_only)
589 {
590 	int			dbnum;
591 	PGconn	   *conn,
592 			   *conn_template1;
593 	PGresult   *dbres;
594 	int			ntups;
595 	int			i_datname;
596 	int			i_datallowconn;
597 
598 	if (!minmxid_only)
599 		prep_status("Setting frozenxid and minmxid counters in new cluster");
600 	else
601 		prep_status("Setting minmxid counter in new cluster");
602 
603 	conn_template1 = connectToServer(&new_cluster, "template1");
604 
605 	if (!minmxid_only)
606 		/* set pg_database.datfrozenxid */
607 		PQclear(executeQueryOrDie(conn_template1,
608 								  "UPDATE pg_catalog.pg_database "
609 								  "SET	datfrozenxid = '%u'",
610 								  old_cluster.controldata.chkpnt_nxtxid));
611 
612 	/* set pg_database.datminmxid */
613 	PQclear(executeQueryOrDie(conn_template1,
614 							  "UPDATE pg_catalog.pg_database "
615 							  "SET	datminmxid = '%u'",
616 							  old_cluster.controldata.chkpnt_nxtmulti));
617 
618 	/* get database names */
619 	dbres = executeQueryOrDie(conn_template1,
620 							  "SELECT	datname, datallowconn "
621 							  "FROM	pg_catalog.pg_database");
622 
623 	i_datname = PQfnumber(dbres, "datname");
624 	i_datallowconn = PQfnumber(dbres, "datallowconn");
625 
626 	ntups = PQntuples(dbres);
627 	for (dbnum = 0; dbnum < ntups; dbnum++)
628 	{
629 		char	   *datname = PQgetvalue(dbres, dbnum, i_datname);
630 		char	   *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);
631 
632 		/*
633 		 * We must update databases where datallowconn = false, e.g.
634 		 * template0, because autovacuum increments their datfrozenxids,
635 		 * relfrozenxids, and relminmxid even if autovacuum is turned off, and
636 		 * even though all the data rows are already frozen.  To enable this,
637 		 * we temporarily change datallowconn.
638 		 */
639 		if (strcmp(datallowconn, "f") == 0)
640 			PQclear(executeQueryOrDie(conn_template1,
641 									  "ALTER DATABASE %s ALLOW_CONNECTIONS = true",
642 									  quote_identifier(datname)));
643 
644 		conn = connectToServer(&new_cluster, datname);
645 
646 		if (!minmxid_only)
647 			/* set pg_class.relfrozenxid */
648 			PQclear(executeQueryOrDie(conn,
649 									  "UPDATE	pg_catalog.pg_class "
650 									  "SET	relfrozenxid = '%u' "
651 			/* only heap, materialized view, and TOAST are vacuumed */
652 									  "WHERE	relkind IN ("
653 									  CppAsString2(RELKIND_RELATION) ", "
654 									  CppAsString2(RELKIND_MATVIEW) ", "
655 									  CppAsString2(RELKIND_TOASTVALUE) ")",
656 									  old_cluster.controldata.chkpnt_nxtxid));
657 
658 		/* set pg_class.relminmxid */
659 		PQclear(executeQueryOrDie(conn,
660 								  "UPDATE	pg_catalog.pg_class "
661 								  "SET	relminmxid = '%u' "
662 		/* only heap, materialized view, and TOAST are vacuumed */
663 								  "WHERE	relkind IN ("
664 								  CppAsString2(RELKIND_RELATION) ", "
665 								  CppAsString2(RELKIND_MATVIEW) ", "
666 								  CppAsString2(RELKIND_TOASTVALUE) ")",
667 								  old_cluster.controldata.chkpnt_nxtmulti));
668 		PQfinish(conn);
669 
670 		/* Reset datallowconn flag */
671 		if (strcmp(datallowconn, "f") == 0)
672 			PQclear(executeQueryOrDie(conn_template1,
673 									  "ALTER DATABASE %s ALLOW_CONNECTIONS = false",
674 									  quote_identifier(datname)));
675 	}
676 
677 	PQclear(dbres);
678 
679 	PQfinish(conn_template1);
680 
681 	check_ok();
682 }
683 
684 
685 static void
cleanup(void)686 cleanup(void)
687 {
688 	fclose(log_opts.internal);
689 
690 	/* Remove dump and log files? */
691 	if (!log_opts.retain)
692 	{
693 		int			dbnum;
694 		char	  **filename;
695 
696 		for (filename = output_files; *filename != NULL; filename++)
697 			unlink(*filename);
698 
699 		/* remove dump files */
700 		unlink(GLOBALS_DUMP_FILE);
701 
702 		if (old_cluster.dbarr.dbs)
703 			for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
704 			{
705 				char		sql_file_name[MAXPGPATH],
706 							log_file_name[MAXPGPATH];
707 				DbInfo	   *old_db = &old_cluster.dbarr.dbs[dbnum];
708 
709 				snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
710 				unlink(sql_file_name);
711 
712 				snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
713 				unlink(log_file_name);
714 			}
715 	}
716 }
717