1 /*
2  *	pg_upgrade.c
3  *
4  *	main source file
5  *
6  *	Copyright (c) 2010-2016, PostgreSQL Global Development Group
7  *	src/bin/pg_upgrade/pg_upgrade.c
8  */
9 
10 /*
11  *	To simplify the upgrade process, we force certain system values to be
12  *	identical between old and new clusters:
13  *
14  *	We control all assignments of pg_class.oid (and relfilenode) so toast
15  *	oids are the same between old and new clusters.  This is important
16  *	because toast oids are stored as toast pointers in user tables.
17  *
18  *	While pg_class.oid and pg_class.relfilenode are initially the same
19  *	in a cluster, they can diverge due to CLUSTER, REINDEX, or VACUUM
20  *	FULL.  In the new cluster, pg_class.oid and pg_class.relfilenode will
21  *	be the same and will match the old pg_class.oid value.  Because of
22  *	this, old/new pg_class.relfilenode values will not match if CLUSTER,
23  *	REINDEX, or VACUUM FULL have been performed in the old cluster.
24  *
25  *	We control all assignments of pg_type.oid because these oids are stored
26  *	in user composite type values.
27  *
28  *	We control all assignments of pg_enum.oid because these oids are stored
29  *	in user tables as enum values.
30  *
31  *	We control all assignments of pg_authid.oid because these oids are stored
32  *	in pg_largeobject_metadata.
33  */
34 
35 
36 
37 #include "postgres_fe.h"
38 
39 #include "pg_upgrade.h"
40 #include "common/restricted_token.h"
41 #include "fe_utils/string_utils.h"
42 
43 #ifdef HAVE_LANGINFO_H
44 #include <langinfo.h>
45 #endif
46 
47 static void prepare_new_cluster(void);
48 static void prepare_new_databases(void);
49 static void create_new_objects(void);
50 static void copy_clog_xlog_xid(void);
51 static void set_frozenxids(bool minmxid_only);
52 static void setup(char *argv0, bool *live_check);
53 static void cleanup(void);
54 
55 ClusterInfo old_cluster,
56 			new_cluster;
57 OSInfo		os_info;
58 
59 char	   *output_files[] = {
60 	SERVER_LOG_FILE,
61 #ifdef WIN32
62 	/* unique file for pg_ctl start */
63 	SERVER_START_LOG_FILE,
64 #endif
65 	UTILITY_LOG_FILE,
66 	INTERNAL_LOG_FILE,
67 	NULL
68 };
69 
70 
71 int
main(int argc,char ** argv)72 main(int argc, char **argv)
73 {
74 	char	   *analyze_script_file_name = NULL;
75 	char	   *deletion_script_file_name = NULL;
76 	bool		live_check = false;
77 
78 	/* Ensure that all files created by pg_upgrade are non-world-readable */
79 	umask(S_IRWXG | S_IRWXO);
80 
81 	parseCommandLine(argc, argv);
82 
83 	get_restricted_token(os_info.progname);
84 
85 	adjust_data_dir(&old_cluster);
86 	adjust_data_dir(&new_cluster);
87 
88 	setup(argv[0], &live_check);
89 
90 	output_check_banner(live_check);
91 
92 	check_cluster_versions();
93 
94 	get_sock_dir(&old_cluster, live_check);
95 	get_sock_dir(&new_cluster, false);
96 
97 	check_cluster_compatibility(live_check);
98 
99 	check_and_dump_old_cluster(live_check);
100 
101 
102 	/* -- NEW -- */
103 	start_postmaster(&new_cluster, true);
104 
105 	check_new_cluster();
106 	report_clusters_compatible();
107 
108 	pg_log(PG_REPORT, "\nPerforming Upgrade\n");
109 	pg_log(PG_REPORT, "------------------\n");
110 
111 	prepare_new_cluster();
112 
113 	stop_postmaster(false);
114 
115 	/*
116 	 * Destructive Changes to New Cluster
117 	 */
118 
119 	copy_clog_xlog_xid();
120 
121 	/* New now using xids of the old system */
122 
123 	/* -- NEW -- */
124 	start_postmaster(&new_cluster, true);
125 
126 	prepare_new_databases();
127 
128 	create_new_objects();
129 
130 	stop_postmaster(false);
131 
132 	/*
133 	 * Most failures happen in create_new_objects(), which has completed at
134 	 * this point.  We do this here because it is just before linking, which
135 	 * will link the old and new cluster data files, preventing the old
136 	 * cluster from being safely started once the new cluster is started.
137 	 */
138 	if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
139 		disable_old_cluster();
140 
141 	transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr,
142 								 old_cluster.pgdata, new_cluster.pgdata);
143 
144 	/*
145 	 * Assuming OIDs are only used in system tables, there is no need to
146 	 * restore the OID counter because we have not transferred any OIDs from
147 	 * the old system, but we do it anyway just in case.  We do it late here
148 	 * because there is no need to have the schema load use new oids.
149 	 */
150 	prep_status("Setting next OID for new cluster");
151 	exec_prog(UTILITY_LOG_FILE, NULL, true,
152 			  "\"%s/pg_resetxlog\" -o %u \"%s\"",
153 			  new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid,
154 			  new_cluster.pgdata);
155 	check_ok();
156 
157 	prep_status("Sync data directory to disk");
158 	exec_prog(UTILITY_LOG_FILE, NULL, true,
159 			  "\"%s/initdb\" --sync-only \"%s\"", new_cluster.bindir,
160 			  new_cluster.pgdata);
161 	check_ok();
162 
163 	create_script_for_cluster_analyze(&analyze_script_file_name);
164 	create_script_for_old_cluster_deletion(&deletion_script_file_name);
165 
166 	issue_warnings_and_set_wal_level();
167 
168 	pg_log(PG_REPORT, "\nUpgrade Complete\n");
169 	pg_log(PG_REPORT, "----------------\n");
170 
171 	output_completion_banner(analyze_script_file_name,
172 							 deletion_script_file_name);
173 
174 	pg_free(analyze_script_file_name);
175 	pg_free(deletion_script_file_name);
176 
177 	cleanup();
178 
179 	return 0;
180 }
181 
182 
183 static void
setup(char * argv0,bool * live_check)184 setup(char *argv0, bool *live_check)
185 {
186 	char		exec_path[MAXPGPATH];	/* full path to my executable */
187 
188 	/*
189 	 * make sure the user has a clean environment, otherwise, we may confuse
190 	 * libpq when we connect to one (or both) of the servers.
191 	 */
192 	check_pghost_envvar();
193 
194 	verify_directories();
195 
196 	/* no postmasters should be running, except for a live check */
197 	if (pid_lock_file_exists(old_cluster.pgdata))
198 	{
199 		/*
200 		 * If we have a postmaster.pid file, try to start the server.  If it
201 		 * starts, the pid file was stale, so stop the server.  If it doesn't
202 		 * start, assume the server is running.  If the pid file is left over
203 		 * from a server crash, this also allows any committed transactions
204 		 * stored in the WAL to be replayed so they are not lost, because WAL
205 		 * files are not transferred from old to new servers.  We later check
206 		 * for a clean shutdown.
207 		 */
208 		if (start_postmaster(&old_cluster, false))
209 			stop_postmaster(false);
210 		else
211 		{
212 			if (!user_opts.check)
213 				pg_fatal("There seems to be a postmaster servicing the old cluster.\n"
214 						 "Please shutdown that postmaster and try again.\n");
215 			else
216 				*live_check = true;
217 		}
218 	}
219 
220 	/* same goes for the new postmaster */
221 	if (pid_lock_file_exists(new_cluster.pgdata))
222 	{
223 		if (start_postmaster(&new_cluster, false))
224 			stop_postmaster(false);
225 		else
226 			pg_fatal("There seems to be a postmaster servicing the new cluster.\n"
227 					 "Please shutdown that postmaster and try again.\n");
228 	}
229 
230 	/* get path to pg_upgrade executable */
231 	if (find_my_exec(argv0, exec_path) < 0)
232 		pg_fatal("%s: could not find own program executable\n", argv0);
233 
234 	/* Trim off program name and keep just path */
235 	*last_dir_separator(exec_path) = '\0';
236 	canonicalize_path(exec_path);
237 	os_info.exec_path = pg_strdup(exec_path);
238 }
239 
240 
241 static void
prepare_new_cluster(void)242 prepare_new_cluster(void)
243 {
244 	/*
245 	 * It would make more sense to freeze after loading the schema, but that
246 	 * would cause us to lose the frozenids restored by the load. We use
247 	 * --analyze so autovacuum doesn't update statistics later
248 	 */
249 	prep_status("Analyzing all rows in the new cluster");
250 	exec_prog(UTILITY_LOG_FILE, NULL, true,
251 			  "\"%s/vacuumdb\" %s --all --analyze %s",
252 			  new_cluster.bindir, cluster_conn_opts(&new_cluster),
253 			  log_opts.verbose ? "--verbose" : "");
254 	check_ok();
255 
256 	/*
257 	 * We do freeze after analyze so pg_statistic is also frozen. template0 is
258 	 * not frozen here, but data rows were frozen by initdb, and we set its
259 	 * datfrozenxid, relfrozenxids, and relminmxid later to match the new xid
260 	 * counter later.
261 	 */
262 	prep_status("Freezing all rows on the new cluster");
263 	exec_prog(UTILITY_LOG_FILE, NULL, true,
264 			  "\"%s/vacuumdb\" %s --all --freeze %s",
265 			  new_cluster.bindir, cluster_conn_opts(&new_cluster),
266 			  log_opts.verbose ? "--verbose" : "");
267 	check_ok();
268 }
269 
270 
271 static void
prepare_new_databases(void)272 prepare_new_databases(void)
273 {
274 	/*
275 	 * Before we restore anything, set frozenxids of initdb-created tables.
276 	 */
277 	set_frozenxids(false);
278 
279 	/*
280 	 * Now restore global objects (roles and tablespaces).
281 	 */
282 	prep_status("Restoring global objects in the new cluster");
283 
284 	/*
285 	 * We have to create the databases first so we can install support
286 	 * functions in all the other databases.  Ideally we could create the
287 	 * support functions in template1 but pg_dumpall creates database using
288 	 * the template0 template.
289 	 */
290 	exec_prog(UTILITY_LOG_FILE, NULL, true,
291 			  "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
292 			  new_cluster.bindir, cluster_conn_opts(&new_cluster),
293 			  GLOBALS_DUMP_FILE);
294 	check_ok();
295 
296 	/* we load this to get a current list of databases */
297 	get_db_and_rel_infos(&new_cluster);
298 }
299 
300 
301 static void
create_new_objects(void)302 create_new_objects(void)
303 {
304 	int			dbnum;
305 
306 	prep_status("Restoring database schemas in the new cluster\n");
307 
308 	for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
309 	{
310 		char		sql_file_name[MAXPGPATH],
311 					log_file_name[MAXPGPATH];
312 		DbInfo	   *old_db = &old_cluster.dbarr.dbs[dbnum];
313 		PQExpBufferData connstr,
314 					escaped_connstr;
315 
316 		initPQExpBuffer(&connstr);
317 		appendPQExpBuffer(&connstr, "dbname=");
318 		appendConnStrVal(&connstr, old_db->db_name);
319 		initPQExpBuffer(&escaped_connstr);
320 		appendShellString(&escaped_connstr, connstr.data);
321 		termPQExpBuffer(&connstr);
322 
323 		pg_log(PG_STATUS, "%s", old_db->db_name);
324 		snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
325 		snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
326 
327 		/*
328 		 * pg_dump only produces its output at the end, so there is little
329 		 * parallelism if using the pipe.
330 		 */
331 		parallel_exec_prog(log_file_name,
332 						   NULL,
333 		 "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname %s \"%s\"",
334 						   new_cluster.bindir,
335 						   cluster_conn_opts(&new_cluster),
336 						   escaped_connstr.data,
337 						   sql_file_name);
338 
339 		termPQExpBuffer(&escaped_connstr);
340 	}
341 
342 	/* reap all children */
343 	while (reap_child(true) == true)
344 		;
345 
346 	end_progress_output();
347 	check_ok();
348 
349 	/*
350 	 * We don't have minmxids for databases or relations in pre-9.3 clusters,
351 	 * so set those after we have restored the schema.
352 	 */
353 	if (GET_MAJOR_VERSION(old_cluster.major_version) <= 902)
354 		set_frozenxids(true);
355 
356 	/* regenerate now that we have objects in the databases */
357 	get_db_and_rel_infos(&new_cluster);
358 }
359 
360 /*
361  * Delete the given subdirectory contents from the new cluster
362  */
363 static void
remove_new_subdir(char * subdir,bool rmtopdir)364 remove_new_subdir(char *subdir, bool rmtopdir)
365 {
366 	char		new_path[MAXPGPATH];
367 
368 	prep_status("Deleting files from new %s", subdir);
369 
370 	snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir);
371 	if (!rmtree(new_path, rmtopdir))
372 		pg_fatal("could not delete directory \"%s\"\n", new_path);
373 
374 	check_ok();
375 }
376 
377 /*
378  * Copy the files from the old cluster into it
379  */
380 static void
copy_subdir_files(char * subdir)381 copy_subdir_files(char *subdir)
382 {
383 	char		old_path[MAXPGPATH];
384 	char		new_path[MAXPGPATH];
385 
386 	remove_new_subdir(subdir, true);
387 
388 	snprintf(old_path, sizeof(old_path), "%s/%s", old_cluster.pgdata, subdir);
389 	snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir);
390 
391 	prep_status("Copying old %s to new server", subdir);
392 
393 	exec_prog(UTILITY_LOG_FILE, NULL, true,
394 #ifndef WIN32
395 			  "cp -Rf \"%s\" \"%s\"",
396 #else
397 	/* flags: everything, no confirm, quiet, overwrite read-only */
398 			  "xcopy /e /y /q /r \"%s\" \"%s\\\"",
399 #endif
400 			  old_path, new_path);
401 
402 	check_ok();
403 }
404 
405 static void
copy_clog_xlog_xid(void)406 copy_clog_xlog_xid(void)
407 {
408 	/* copy old commit logs to new data dir */
409 	copy_subdir_files("pg_clog");
410 
411 	prep_status("Setting oldest XID for new cluster");
412 	exec_prog(UTILITY_LOG_FILE, NULL, true,
413 			  "\"%s/pg_resetxlog\" -f -u %u \"%s\"",
414 			  new_cluster.bindir, old_cluster.controldata.chkpnt_oldstxid,
415 		  new_cluster.pgdata);
416 	check_ok();
417 
418 	/* set the next transaction id and epoch of the new cluster */
419 	prep_status("Setting next transaction ID and epoch for new cluster");
420 	exec_prog(UTILITY_LOG_FILE, NULL, true,
421 			  "\"%s/pg_resetxlog\" -f -x %u \"%s\"",
422 			  new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid,
423 			  new_cluster.pgdata);
424 	exec_prog(UTILITY_LOG_FILE, NULL, true,
425 			  "\"%s/pg_resetxlog\" -f -e %u \"%s\"",
426 			  new_cluster.bindir, old_cluster.controldata.chkpnt_nxtepoch,
427 			  new_cluster.pgdata);
428 	/* must reset commit timestamp limits also */
429 	exec_prog(UTILITY_LOG_FILE, NULL, true,
430 			  "\"%s/pg_resetxlog\" -f -c %u,%u \"%s\"",
431 			  new_cluster.bindir,
432 			  old_cluster.controldata.chkpnt_nxtxid,
433 			  old_cluster.controldata.chkpnt_nxtxid,
434 			  new_cluster.pgdata);
435 	check_ok();
436 
437 	/*
438 	 * If the old server is before the MULTIXACT_FORMATCHANGE_CAT_VER change
439 	 * (see pg_upgrade.h) and the new server is after, then we don't copy
440 	 * pg_multixact files, but we need to reset pg_control so that the new
441 	 * server doesn't attempt to read multis older than the cutoff value.
442 	 */
443 	if (old_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER &&
444 		new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER)
445 	{
446 		copy_subdir_files("pg_multixact/offsets");
447 		copy_subdir_files("pg_multixact/members");
448 
449 		prep_status("Setting next multixact ID and offset for new cluster");
450 
451 		/*
452 		 * we preserve all files and contents, so we must preserve both "next"
453 		 * counters here and the oldest multi present on system.
454 		 */
455 		exec_prog(UTILITY_LOG_FILE, NULL, true,
456 				  "\"%s/pg_resetxlog\" -O %u -m %u,%u \"%s\"",
457 				  new_cluster.bindir,
458 				  old_cluster.controldata.chkpnt_nxtmxoff,
459 				  old_cluster.controldata.chkpnt_nxtmulti,
460 				  old_cluster.controldata.chkpnt_oldstMulti,
461 				  new_cluster.pgdata);
462 		check_ok();
463 	}
464 	else if (new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER)
465 	{
466 		/*
467 		 * Remove offsets/0000 file created by initdb that no longer matches
468 		 * the new multi-xid value.  "members" starts at zero so no need to
469 		 * remove it.
470 		 */
471 		remove_new_subdir("pg_multixact/offsets", false);
472 
473 		prep_status("Setting oldest multixact ID on new cluster");
474 
475 		/*
476 		 * We don't preserve files in this case, but it's important that the
477 		 * oldest multi is set to the latest value used by the old system, so
478 		 * that multixact.c returns the empty set for multis that might be
479 		 * present on disk.  We set next multi to the value following that; it
480 		 * might end up wrapped around (i.e. 0) if the old cluster had
481 		 * next=MaxMultiXactId, but multixact.c can cope with that just fine.
482 		 */
483 		exec_prog(UTILITY_LOG_FILE, NULL, true,
484 				  "\"%s/pg_resetxlog\" -m %u,%u \"%s\"",
485 				  new_cluster.bindir,
486 				  old_cluster.controldata.chkpnt_nxtmulti + 1,
487 				  old_cluster.controldata.chkpnt_nxtmulti,
488 				  new_cluster.pgdata);
489 		check_ok();
490 	}
491 
492 	/* now reset the wal archives in the new cluster */
493 	prep_status("Resetting WAL archives");
494 	exec_prog(UTILITY_LOG_FILE, NULL, true,
495 	/* use timeline 1 to match controldata and no WAL history file */
496 			  "\"%s/pg_resetxlog\" -l 00000001%s \"%s\"", new_cluster.bindir,
497 			  old_cluster.controldata.nextxlogfile + 8,
498 			  new_cluster.pgdata);
499 	check_ok();
500 }
501 
502 
503 /*
504  *	set_frozenxids()
505  *
506  * This is called on the new cluster before we restore anything, with
507  * minmxid_only = false.  Its purpose is to ensure that all initdb-created
508  * vacuumable tables have relfrozenxid/relminmxid matching the old cluster's
509  * xid/mxid counters.  We also initialize the datfrozenxid/datminmxid of the
510  * built-in databases to match.
511  *
512  * As we create user tables later, their relfrozenxid/relminmxid fields will
513  * be restored properly by the binary-upgrade restore script.  Likewise for
514  * user-database datfrozenxid/datminmxid.  However, if we're upgrading from a
515  * pre-9.3 database, which does not store per-table or per-DB minmxid, then
516  * the relminmxid/datminmxid values filled in by the restore script will just
517  * be zeroes.
518  *
519  * Hence, with a pre-9.3 source database, a second call occurs after
520  * everything is restored, with minmxid_only = true.  This pass will
521  * initialize all tables and databases, both those made by initdb and user
522  * objects, with the desired minmxid value.  frozenxid values are left alone.
523  */
524 static void
set_frozenxids(bool minmxid_only)525 set_frozenxids(bool minmxid_only)
526 {
527 	int			dbnum;
528 	PGconn	   *conn,
529 			   *conn_template1;
530 	PGresult   *dbres;
531 	int			ntups;
532 	int			i_datname;
533 	int			i_datallowconn;
534 
535 	if (!minmxid_only)
536 		prep_status("Setting frozenxid and minmxid counters in new cluster");
537 	else
538 		prep_status("Setting minmxid counter in new cluster");
539 
540 	conn_template1 = connectToServer(&new_cluster, "template1");
541 
542 	if (!minmxid_only)
543 		/* set pg_database.datfrozenxid */
544 		PQclear(executeQueryOrDie(conn_template1,
545 								  "UPDATE pg_catalog.pg_database "
546 								  "SET	datfrozenxid = '%u'",
547 								  old_cluster.controldata.chkpnt_nxtxid));
548 
549 	/* set pg_database.datminmxid */
550 	PQclear(executeQueryOrDie(conn_template1,
551 							  "UPDATE pg_catalog.pg_database "
552 							  "SET	datminmxid = '%u'",
553 							  old_cluster.controldata.chkpnt_nxtmulti));
554 
555 	/* get database names */
556 	dbres = executeQueryOrDie(conn_template1,
557 							  "SELECT	datname, datallowconn "
558 							  "FROM	pg_catalog.pg_database");
559 
560 	i_datname = PQfnumber(dbres, "datname");
561 	i_datallowconn = PQfnumber(dbres, "datallowconn");
562 
563 	ntups = PQntuples(dbres);
564 	for (dbnum = 0; dbnum < ntups; dbnum++)
565 	{
566 		char	   *datname = PQgetvalue(dbres, dbnum, i_datname);
567 		char	   *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);
568 
569 		/*
570 		 * We must update databases where datallowconn = false, e.g.
571 		 * template0, because autovacuum increments their datfrozenxids,
572 		 * relfrozenxids, and relminmxid even if autovacuum is turned off, and
573 		 * even though all the data rows are already frozen.  To enable this,
574 		 * we temporarily change datallowconn.
575 		 */
576 		if (strcmp(datallowconn, "f") == 0)
577 			PQclear(executeQueryOrDie(conn_template1,
578 								"ALTER DATABASE %s ALLOW_CONNECTIONS = true",
579 									  quote_identifier(datname)));
580 
581 		conn = connectToServer(&new_cluster, datname);
582 
583 		if (!minmxid_only)
584 			/* set pg_class.relfrozenxid */
585 			PQclear(executeQueryOrDie(conn,
586 									  "UPDATE	pg_catalog.pg_class "
587 									  "SET	relfrozenxid = '%u' "
588 			/* only heap, materialized view, and TOAST are vacuumed */
589 									  "WHERE	relkind IN ('r', 'm', 't')",
590 									  old_cluster.controldata.chkpnt_nxtxid));
591 
592 		/* set pg_class.relminmxid */
593 		PQclear(executeQueryOrDie(conn,
594 								  "UPDATE	pg_catalog.pg_class "
595 								  "SET	relminmxid = '%u' "
596 		/* only heap, materialized view, and TOAST are vacuumed */
597 								  "WHERE	relkind IN ('r', 'm', 't')",
598 								  old_cluster.controldata.chkpnt_nxtmulti));
599 		PQfinish(conn);
600 
601 		/* Reset datallowconn flag */
602 		if (strcmp(datallowconn, "f") == 0)
603 			PQclear(executeQueryOrDie(conn_template1,
604 							   "ALTER DATABASE %s ALLOW_CONNECTIONS = false",
605 									  quote_identifier(datname)));
606 	}
607 
608 	PQclear(dbres);
609 
610 	PQfinish(conn_template1);
611 
612 	check_ok();
613 }
614 
615 
616 static void
cleanup(void)617 cleanup(void)
618 {
619 	fclose(log_opts.internal);
620 
621 	/* Remove dump and log files? */
622 	if (!log_opts.retain)
623 	{
624 		int			dbnum;
625 		char	  **filename;
626 
627 		for (filename = output_files; *filename != NULL; filename++)
628 			unlink(*filename);
629 
630 		/* remove dump files */
631 		unlink(GLOBALS_DUMP_FILE);
632 
633 		if (old_cluster.dbarr.dbs)
634 			for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
635 			{
636 				char		sql_file_name[MAXPGPATH],
637 							log_file_name[MAXPGPATH];
638 				DbInfo	   *old_db = &old_cluster.dbarr.dbs[dbnum];
639 
640 				snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
641 				unlink(sql_file_name);
642 
643 				snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
644 				unlink(log_file_name);
645 			}
646 	}
647 }
648