PostgreSQL Source Code  git master
check.c
Go to the documentation of this file.
1 /*
2  * check.c
3  *
4  * server checks and output routines
5  *
6  * Copyright (c) 2010-2024, PostgreSQL Global Development Group
7  * src/bin/pg_upgrade/check.c
8  */
9 
10 #include "postgres_fe.h"
11 
12 #include "catalog/pg_authid_d.h"
13 #include "catalog/pg_class_d.h"
14 #include "catalog/pg_collation.h"
15 #include "fe_utils/string_utils.h"
16 #include "mb/pg_wchar.h"
17 #include "pg_upgrade.h"
18 
19 static void check_new_cluster_is_empty(void);
28 static void check_for_new_tablespace_dir(void);
32 static void check_old_cluster_for_valid_slots(bool live_check);
33 static void check_old_cluster_subscription_state(void);
34 
35 /*
36  * DataTypesUsageChecks - definitions of data type checks for the old cluster
37  * in order to determine if an upgrade can be performed. See the comment on
38  * data_types_usage_checks below for a more detailed description.
39  */
40 typedef struct
41 {
42  /* Status line to print to the user */
43  const char *status;
44  /* Filename to store report to */
45  const char *report_filename;
46  /* Query to extract the oid of the datatype */
47  const char *base_query;
48  /* Text to store to report in case of error */
49  const char *report_text;
50  /* The latest version where the check applies */
52  /* A function pointer for determining if the check applies */
55 
56 /*
57  * Special values for threshold_version for indicating that a check applies to
58  * all versions, or that a custom function needs to be invoked to determine
59  * if the check applies.
60  */
61 #define MANUAL_CHECK 1
62 #define ALL_VERSIONS -1
63 
64 /*--
65  * Data type usage checks. Each check for problematic data type usage is
66  * defined in this array with metadata, SQL query for finding the data type
67  * and functionality for deciding if the check is applicable to the version
68  * of the old cluster. The struct members are described in detail below:
69  *
70  * status A oneline string which can be printed to the user to
71  * inform about progress. Should not end with newline.
72  * report_filename The filename in which the list of problems detected by
73  * the check will be printed.
74  * base_query A query which extracts the Oid of the datatype checked
75  * for.
76  * report_text The text which will be printed to the user to explain
77  * what the check did, and why it failed. The text should
78  * end with a newline, and does not need to refer to the
79  * report_filename as that is automatically appended to
80  * the report with the path to the log folder.
81  * threshold_version The major version of PostgreSQL for which to run the
82  * check. Iff the old cluster is less than, or equal to,
83  * the threshold version then the check will be executed.
84  * If the old version is greater than the threshold then
85  * the check is skipped. If the threshold_version is set
86  * to ALL_VERSIONS then it will be run unconditionally,
87  * if set to MANUAL_CHECK then the version_hook function
88  * will be executed in order to determine whether or not
89  * to run.
90  * version_hook A function pointer to a version check function of type
91  * DataTypesUsageVersionCheck which is used to determine
92  * if the check is applicable to the old cluster. If the
93  * version_hook returns true then the check will be run,
94  * else it will be skipped. The function will only be
95  * executed iff threshold_version is set to MANUAL_CHECK.
96  */
98 {
99  /*
100  * Look for composite types that were made during initdb *or* belong to
101  * information_schema; that's important in case information_schema was
102  * dropped and reloaded.
103  *
104  * The cutoff OID here should match the source cluster's value of
105  * FirstNormalObjectId. We hardcode it rather than using that C #define
106  * because, if that #define is ever changed, our own version's value is
107  * NOT what to use. Eventually we may need a test on the source cluster's
108  * version to select the correct value.
109  */
110  {
111  .status = gettext_noop("Checking for system-defined composite types in user tables"),
112  .report_filename = "tables_using_composite.txt",
113  .base_query =
114  "SELECT t.oid FROM pg_catalog.pg_type t "
115  "LEFT JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid "
116  " WHERE typtype = 'c' AND (t.oid < 16384 OR nspname = 'information_schema')",
117  .report_text =
118  gettext_noop("Your installation contains system-defined composite types in user tables.\n"
119  "These type OIDs are not stable across PostgreSQL versions,\n"
120  "so this cluster cannot currently be upgraded. You can drop the\n"
121  "problem columns and restart the upgrade.\n"),
122  .threshold_version = ALL_VERSIONS
123  },
124 
125  /*
126  * 9.3 -> 9.4 Fully implement the 'line' data type in 9.4, which
127  * previously returned "not enabled" by default and was only functionally
128  * enabled with a compile-time switch; as of 9.4 "line" has a different
129  * on-disk representation format.
130  */
131  {
132  .status = gettext_noop("Checking for incompatible \"line\" data type"),
133  .report_filename = "tables_using_line.txt",
134  .base_query =
135  "SELECT 'pg_catalog.line'::pg_catalog.regtype AS oid",
136  .report_text =
137  gettext_noop("Your installation contains the \"line\" data type in user tables.\n"
138  "this data type changed its internal and input/output format\n"
139  "between your old and new versions so this\n"
140  "cluster cannot currently be upgraded. You can\n"
141  "drop the problem columns and restart the upgrade.\n"),
142  .threshold_version = 903
143  },
144 
145  /*
146  * pg_upgrade only preserves these system values: pg_class.oid pg_type.oid
147  * pg_enum.oid
148  *
149  * Many of the reg* data types reference system catalog info that is not
150  * preserved, and hence these data types cannot be used in user tables
151  * upgraded by pg_upgrade.
152  */
153  {
154  .status = gettext_noop("Checking for reg* data types in user tables"),
155  .report_filename = "tables_using_reg.txt",
156 
157  /*
158  * Note: older servers will not have all of these reg* types, so we
159  * have to write the query like this rather than depending on casts to
160  * regtype.
161  */
162  .base_query =
163  "SELECT oid FROM pg_catalog.pg_type t "
164  "WHERE t.typnamespace = "
165  " (SELECT oid FROM pg_catalog.pg_namespace "
166  " WHERE nspname = 'pg_catalog') "
167  " AND t.typname IN ( "
168  /* pg_class.oid is preserved, so 'regclass' is OK */
169  " 'regcollation', "
170  " 'regconfig', "
171  " 'regdictionary', "
172  " 'regnamespace', "
173  " 'regoper', "
174  " 'regoperator', "
175  " 'regproc', "
176  " 'regprocedure' "
177  /* pg_authid.oid is preserved, so 'regrole' is OK */
178  /* pg_type.oid is (mostly) preserved, so 'regtype' is OK */
179  " )",
180  .report_text =
181  gettext_noop("Your installation contains one of the reg* data types in user tables.\n"
182  "These data types reference system OIDs that are not preserved by\n"
183  "pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
184  "drop the problem columns and restart the upgrade.\n"),
185  .threshold_version = ALL_VERSIONS
186  },
187 
188  /*
189  * PG 16 increased the size of the 'aclitem' type, which breaks the
190  * on-disk format for existing data.
191  */
192  {
193  .status = gettext_noop("Checking for incompatible \"aclitem\" data type"),
194  .report_filename = "tables_using_aclitem.txt",
195  .base_query =
196  "SELECT 'pg_catalog.aclitem'::pg_catalog.regtype AS oid",
197  .report_text =
198  gettext_noop("Your installation contains the \"aclitem\" data type in user tables.\n"
199  "The internal format of \"aclitem\" changed in PostgreSQL version 16\n"
200  "so this cluster cannot currently be upgraded. You can drop the\n"
201  "problem columns and restart the upgrade.\n"),
202  .threshold_version = 1500
203  },
204 
205  /*
206  * It's no longer allowed to create tables or views with "unknown"-type
207  * columns. We do not complain about views with such columns, because
208  * they should get silently converted to "text" columns during the DDL
209  * dump and reload; it seems unlikely to be worth making users do that by
210  * hand. However, if there's a table with such a column, the DDL reload
211  * will fail, so we should pre-detect that rather than failing
212  * mid-upgrade. Worse, if there's a matview with such a column, the DDL
213  * reload will silently change it to "text" which won't match the on-disk
214  * storage (which is like "cstring"). So we *must* reject that.
215  */
216  {
217  .status = gettext_noop("Checking for invalid \"unknown\" user columns"),
218  .report_filename = "tables_using_unknown.txt",
219  .base_query =
220  "SELECT 'pg_catalog.unknown'::pg_catalog.regtype AS oid",
221  .report_text =
222  gettext_noop("Your installation contains the \"unknown\" data type in user tables.\n"
223  "This data type is no longer allowed in tables, so this cluster\n"
224  "cannot currently be upgraded. You can drop the problem columns\n"
225  "and restart the upgrade.\n"),
226  .threshold_version = 906
227  },
228 
229  /*
230  * PG 12 changed the 'sql_identifier' type storage to be based on name,
231  * not varchar, which breaks on-disk format for existing data. So we need
232  * to prevent upgrade when used in user objects (tables, indexes, ...). In
233  * 12, the sql_identifier data type was switched from name to varchar,
234  * which does affect the storage (name is by-ref, but not varlena). This
235  * means user tables using sql_identifier for columns are broken because
236  * the on-disk format is different.
237  */
238  {
239  .status = gettext_noop("Checking for invalid \"sql_identifier\" user columns"),
240  .report_filename = "tables_using_sql_identifier.txt",
241  .base_query =
242  "SELECT 'information_schema.sql_identifier'::pg_catalog.regtype AS oid",
243  .report_text =
244  gettext_noop("Your installation contains the \"sql_identifier\" data type in user tables.\n"
245  "The on-disk format for this data type has changed, so this\n"
246  "cluster cannot currently be upgraded. You can drop the problem\n"
247  "columns and restart the upgrade.\n"),
248  .threshold_version = 1100
249  },
250 
251  /*
252  * JSONB changed its storage format during 9.4 beta, so check for it.
253  */
254  {
255  .status = gettext_noop("Checking for incompatible \"jsonb\" data type in user tables"),
256  .report_filename = "tables_using_jsonb.txt",
257  .base_query =
258  "SELECT 'pg_catalog.jsonb'::pg_catalog.regtype AS oid",
259  .report_text =
260  gettext_noop("Your installation contains the \"jsonb\" data type in user tables.\n"
261  "The internal format of \"jsonb\" changed during 9.4 beta so this\n"
262  "cluster cannot currently be upgraded. You can drop the problem \n"
263  "columns and restart the upgrade.\n"),
264  .threshold_version = MANUAL_CHECK,
265  .version_hook = jsonb_9_4_check_applicable
266  },
267 
268  /*
269  * PG 12 removed types abstime, reltime, tinterval.
270  */
271  {
272  .status = gettext_noop("Checking for removed \"abstime\" data type in user tables"),
273  .report_filename = "tables_using_abstime.txt",
274  .base_query =
275  "SELECT 'pg_catalog.abstime'::pg_catalog.regtype AS oid",
276  .report_text =
277  gettext_noop("Your installation contains the \"abstime\" data type in user tables.\n"
278  "The \"abstime\" type has been removed in PostgreSQL version 12,\n"
279  "so this cluster cannot currently be upgraded. You can drop the\n"
280  "problem columns, or change them to another data type, and restart\n"
281  "the upgrade.\n"),
282  .threshold_version = 1100
283  },
284  {
285  .status = gettext_noop("Checking for removed \"reltime\" data type in user tables"),
286  .report_filename = "tables_using_reltime.txt",
287  .base_query =
288  "SELECT 'pg_catalog.reltime'::pg_catalog.regtype AS oid",
289  .report_text =
290  gettext_noop("Your installation contains the \"reltime\" data type in user tables.\n"
291  "The \"reltime\" type has been removed in PostgreSQL version 12,\n"
292  "so this cluster cannot currently be upgraded. You can drop the\n"
293  "problem columns, or change them to another data type, and restart\n"
294  "the upgrade.\n"),
295  .threshold_version = 1100
296  },
297  {
298  .status = gettext_noop("Checking for removed \"tinterval\" data type in user tables"),
299  .report_filename = "tables_using_tinterval.txt",
300  .base_query =
301  "SELECT 'pg_catalog.tinterval'::pg_catalog.regtype AS oid",
302  .report_text =
303  gettext_noop("Your installation contains the \"tinterval\" data type in user tables.\n"
304  "The \"tinterval\" type has been removed in PostgreSQL version 12,\n"
305  "so this cluster cannot currently be upgraded. You can drop the\n"
306  "problem columns, or change them to another data type, and restart\n"
307  "the upgrade.\n"),
308  .threshold_version = 1100
309  },
310 
311  /* End of checks marker, must remain last */
312  {
313  NULL, NULL, NULL, NULL, 0, NULL
314  }
315 };
316 
317 /*
318  * check_for_data_types_usage()
319  * Detect whether there are any stored columns depending on given type(s)
320  *
321  * If so, write a report to the given file name and signal a failure to the
322  * user.
323  *
324  * The checks to run are defined in a DataTypesUsageChecks structure where
325  * each check has a metadata for explaining errors to the user, a base_query,
326  * a report filename and a function pointer hook for validating if the check
327  * should be executed given the cluster at hand.
328  *
329  * base_query should be a SELECT yielding a single column named "oid",
330  * containing the pg_type OIDs of one or more types that are known to have
331  * inconsistent on-disk representations across server versions.
332  *
333  * We check for the type(s) in tables, matviews, and indexes, but not views;
334  * there's no storage involved in a view.
335  */
336 static void
338 {
339  bool found = false;
340  bool *results;
341  PQExpBufferData report;
342  DataTypesUsageChecks *tmp = checks;
343  int n_data_types_usage_checks = 0;
344 
345  prep_status("Checking for data type usage");
346 
347  /* Gather number of checks to perform */
348  while (tmp->status != NULL)
349  {
350  n_data_types_usage_checks++;
351  tmp++;
352  }
353 
354  /* Prepare an array to store the results of checks in */
355  results = pg_malloc0(sizeof(bool) * n_data_types_usage_checks);
356 
357  /*
358  * Connect to each database in the cluster and run all defined checks
359  * against that database before trying the next one.
360  */
361  for (int dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
362  {
363  DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
364  PGconn *conn = connectToServer(cluster, active_db->db_name);
365 
366  for (int checknum = 0; checknum < n_data_types_usage_checks; checknum++)
367  {
368  PGresult *res;
369  int ntups;
370  int i_nspname;
371  int i_relname;
372  int i_attname;
373  FILE *script = NULL;
374  bool db_used = false;
375  char output_path[MAXPGPATH];
376  DataTypesUsageChecks *cur_check = &checks[checknum];
377 
378  if (cur_check->threshold_version == MANUAL_CHECK)
379  {
380  Assert(cur_check->version_hook);
381 
382  /*
383  * Make sure that the check applies to the current cluster
384  * version and skip if not. If no check hook has been defined
385  * we run the check for all versions.
386  */
387  if (!cur_check->version_hook(cluster))
388  continue;
389  }
390  else if (cur_check->threshold_version != ALL_VERSIONS)
391  {
392  if (GET_MAJOR_VERSION(cluster->major_version) > cur_check->threshold_version)
393  continue;
394  }
395  else
396  Assert(cur_check->threshold_version == ALL_VERSIONS);
397 
398  snprintf(output_path, sizeof(output_path), "%s/%s",
400  cur_check->report_filename);
401 
402  /*
403  * The type(s) of interest might be wrapped in a domain, array,
404  * composite, or range, and these container types can be nested
405  * (to varying extents depending on server version, but that's not
406  * of concern here). To handle all these cases we need a
407  * recursive CTE.
408  */
410  "WITH RECURSIVE oids AS ( "
411  /* start with the type(s) returned by base_query */
412  " %s "
413  " UNION ALL "
414  " SELECT * FROM ( "
415  /* inner WITH because we can only reference the CTE once */
416  " WITH x AS (SELECT oid FROM oids) "
417  /* domains on any type selected so far */
418  " SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typbasetype = x.oid AND typtype = 'd' "
419  " UNION ALL "
420  /* arrays over any type selected so far */
421  " SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typelem = x.oid AND typtype = 'b' "
422  " UNION ALL "
423  /* composite types containing any type selected so far */
424  " SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_class c, pg_catalog.pg_attribute a, x "
425  " WHERE t.typtype = 'c' AND "
426  " t.oid = c.reltype AND "
427  " c.oid = a.attrelid AND "
428  " NOT a.attisdropped AND "
429  " a.atttypid = x.oid "
430  " UNION ALL "
431  /* ranges containing any type selected so far */
432  " SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_range r, x "
433  " WHERE t.typtype = 'r' AND r.rngtypid = t.oid AND r.rngsubtype = x.oid"
434  " ) foo "
435  ") "
436  /* now look for stored columns of any such type */
437  "SELECT n.nspname, c.relname, a.attname "
438  "FROM pg_catalog.pg_class c, "
439  " pg_catalog.pg_namespace n, "
440  " pg_catalog.pg_attribute a "
441  "WHERE c.oid = a.attrelid AND "
442  " NOT a.attisdropped AND "
443  " a.atttypid IN (SELECT oid FROM oids) AND "
444  " c.relkind IN ("
445  CppAsString2(RELKIND_RELATION) ", "
446  CppAsString2(RELKIND_MATVIEW) ", "
447  CppAsString2(RELKIND_INDEX) ") AND "
448  " c.relnamespace = n.oid AND "
449  /* exclude possible orphaned temp tables */
450  " n.nspname !~ '^pg_temp_' AND "
451  " n.nspname !~ '^pg_toast_temp_' AND "
452  /* exclude system catalogs, too */
453  " n.nspname NOT IN ('pg_catalog', 'information_schema')",
454  cur_check->base_query);
455 
456  ntups = PQntuples(res);
457 
458  /*
459  * The datatype was found, so extract the data and log to the
460  * requested filename. We need to open the file for appending
461  * since the check might have already found the type in another
462  * database earlier in the loop.
463  */
464  if (ntups)
465  {
466  /*
467  * Make sure we have a buffer to save reports to now that we
468  * found a first failing check.
469  */
470  if (!found)
471  initPQExpBuffer(&report);
472  found = true;
473 
474  /*
475  * If this is the first time we see an error for the check in
476  * question then print a status message of the failure.
477  */
478  if (!results[checknum])
479  {
480  pg_log(PG_REPORT, " failed check: %s", _(cur_check->status));
481  appendPQExpBuffer(&report, "\n%s\n%s %s\n",
482  _(cur_check->report_text),
483  _("A list of the problem columns is in the file:"),
484  output_path);
485  }
486  results[checknum] = true;
487 
488  i_nspname = PQfnumber(res, "nspname");
489  i_relname = PQfnumber(res, "relname");
490  i_attname = PQfnumber(res, "attname");
491 
492  for (int rowno = 0; rowno < ntups; rowno++)
493  {
494  if (script == NULL && (script = fopen_priv(output_path, "a")) == NULL)
495  pg_fatal("could not open file \"%s\": %m", output_path);
496 
497  if (!db_used)
498  {
499  fprintf(script, "In database: %s\n", active_db->db_name);
500  db_used = true;
501  }
502  fprintf(script, " %s.%s.%s\n",
503  PQgetvalue(res, rowno, i_nspname),
504  PQgetvalue(res, rowno, i_relname),
505  PQgetvalue(res, rowno, i_attname));
506  }
507 
508  if (script)
509  {
510  fclose(script);
511  script = NULL;
512  }
513  }
514 
515  PQclear(res);
516  }
517 
518  PQfinish(conn);
519  }
520 
521  if (found)
522  pg_fatal("Data type checks failed: %s", report.data);
523 
524  pg_free(results);
525 
526  check_ok();
527 }
528 
529 /*
530  * fix_path_separator
531  * For non-Windows, just return the argument.
532  * For Windows convert any forward slash to a backslash
533  * such as is suitable for arguments to builtin commands
534  * like RMDIR and DEL.
535  */
536 static char *
538 {
539 #ifdef WIN32
540 
541  char *result;
542  char *c;
543 
544  result = pg_strdup(path);
545 
546  for (c = result; *c != '\0'; c++)
547  if (*c == '/')
548  *c = '\\';
549 
550  return result;
551 #else
552 
553  return path;
554 #endif
555 }
556 
557 void
558 output_check_banner(bool live_check)
559 {
560  if (user_opts.check && live_check)
561  {
563  "Performing Consistency Checks on Old Live Server\n"
564  "------------------------------------------------");
565  }
566  else
567  {
569  "Performing Consistency Checks\n"
570  "-----------------------------");
571  }
572 }
573 
574 
575 void
577 {
578  /* -- OLD -- */
579 
580  if (!live_check)
582 
583  /*
584  * Extract a list of databases, tables, and logical replication slots from
585  * the old cluster.
586  */
588 
590 
592 
593 
594  /*
595  * Check for various failure cases
596  */
601 
603  {
604  /*
605  * Logical replication slots can be migrated since PG17. See comments
606  * atop get_old_cluster_logical_slot_infos().
607  */
609 
610  /*
611  * Subscriptions and their dependencies can be migrated since PG17.
612  * See comments atop get_db_subscription_count().
613  */
615  }
616 
618 
619  /*
620  * PG 14 changed the function signature of encoding conversion functions.
621  * Conversions from older versions cannot be upgraded automatically
622  * because the user-defined functions used by the encoding conversions
623  * need to be changed to match the new signature.
624  */
627 
628  /*
629  * Pre-PG 14 allowed user defined postfix operators, which are not
630  * supported anymore. Verify there are none, iff applicable.
631  */
634 
635  /*
636  * PG 14 changed polymorphic functions from anyarray to
637  * anycompatiblearray.
638  */
641 
642  /*
643  * Pre-PG 12 allowed tables to be declared WITH OIDS, which is not
644  * supported anymore. Verify there are none, iff applicable.
645  */
648 
649  /*
650  * Pre-PG 10 allowed tables with 'unknown' type columns and non WAL logged
651  * hash indexes
652  */
654  {
655  if (user_opts.check)
657  }
658 
659  /* 9.5 and below should not have roles starting with pg_ */
662 
663  /*
664  * While not a check option, we do this now because this is the only time
665  * the old server is running.
666  */
667  if (!user_opts.check)
669 
670  if (!live_check)
671  stop_postmaster(false);
672 }
673 
674 
675 void
677 {
679 
681 
683 
684  switch (user_opts.transfer_mode)
685  {
686  case TRANSFER_MODE_CLONE:
688  break;
689  case TRANSFER_MODE_COPY:
690  break;
693  break;
694  case TRANSFER_MODE_LINK:
695  check_hard_link();
696  break;
697  }
698 
700 
702 
704 
706 
708 }
709 
710 
711 void
713 {
714  if (user_opts.check)
715  {
716  pg_log(PG_REPORT, "\n*Clusters are compatible*");
717  /* stops new cluster */
718  stop_postmaster(false);
719 
721  exit(0);
722  }
723 
724  pg_log(PG_REPORT, "\n"
725  "If pg_upgrade fails after this point, you must re-initdb the\n"
726  "new cluster before continuing.");
727 }
728 
729 
730 void
732 {
733  /*
734  * We unconditionally start/stop the new server because pg_resetwal -o set
735  * wal_level to 'minimum'. If the user is upgrading standby servers using
736  * the rsync instructions, they will need pg_upgrade to write its final
737  * WAL record showing wal_level as 'replica'.
738  */
740 
741  /* Reindex hash indexes for old < 10.0 */
744 
746 
747  stop_postmaster(false);
748 }
749 
750 
751 void
752 output_completion_banner(char *deletion_script_file_name)
753 {
754  PQExpBufferData user_specification;
755 
756  initPQExpBuffer(&user_specification);
758  {
759  appendPQExpBufferStr(&user_specification, "-U ");
760  appendShellString(&user_specification, os_info.user);
761  appendPQExpBufferChar(&user_specification, ' ');
762  }
763 
765  "Optimizer statistics are not transferred by pg_upgrade.\n"
766  "Once you start the new server, consider running:\n"
767  " %s/vacuumdb %s--all --analyze-in-stages", new_cluster.bindir, user_specification.data);
768 
769  if (deletion_script_file_name)
771  "Running this script will delete the old cluster's data files:\n"
772  " %s",
773  deletion_script_file_name);
774  else
776  "Could not create a script to delete the old cluster's data files\n"
777  "because user-defined tablespaces or the new cluster's data directory\n"
778  "exist in the old cluster directory. The old cluster's contents must\n"
779  "be deleted manually.");
780 
781  termPQExpBuffer(&user_specification);
782 }
783 
784 
785 void
787 {
788  prep_status("Checking cluster versions");
789 
790  /* cluster versions should already have been obtained */
793 
794  /*
795  * We allow upgrades from/to the same major version for alpha/beta
796  * upgrades
797  */
798 
800  pg_fatal("This utility can only upgrade from PostgreSQL version %s and later.",
801  "9.2");
802 
803  /* Only current PG version is supported as a target */
805  pg_fatal("This utility can only upgrade to PostgreSQL version %s.",
806  PG_MAJORVERSION);
807 
808  /*
809  * We can't allow downgrading because we use the target pg_dump, and
810  * pg_dump cannot operate on newer database versions, only current and
811  * older versions.
812  */
814  pg_fatal("This utility cannot be used to downgrade to older major PostgreSQL versions.");
815 
816  /* Ensure binaries match the designated data directories */
819  pg_fatal("Old cluster data and binary directories are from different major versions.");
822  pg_fatal("New cluster data and binary directories are from different major versions.");
823 
824  check_ok();
825 }
826 
827 
828 void
830 {
831  /* get/check pg_control data of servers */
832  get_control_data(&old_cluster, live_check);
833  get_control_data(&new_cluster, false);
835 
836  if (live_check && old_cluster.port == new_cluster.port)
837  pg_fatal("When checking a live server, "
838  "the old and new port numbers must be different.");
839 }
840 
841 
842 static void
844 {
845  int dbnum;
846 
847  for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
848  {
849  int relnum;
850  RelInfoArr *rel_arr = &new_cluster.dbarr.dbs[dbnum].rel_arr;
851 
852  for (relnum = 0; relnum < rel_arr->nrels;
853  relnum++)
854  {
855  /* pg_largeobject and its index should be skipped */
856  if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0)
857  pg_fatal("New cluster database \"%s\" is not empty: found relation \"%s.%s\"",
858  new_cluster.dbarr.dbs[dbnum].db_name,
859  rel_arr->rels[relnum].nspname,
860  rel_arr->rels[relnum].relname);
861  }
862  }
863 }
864 
865 /*
866  * A previous run of pg_upgrade might have failed and the new cluster
867  * directory recreated, but they might have forgotten to remove
868  * the new cluster's tablespace directories. Therefore, check that
869  * new cluster tablespace directories do not already exist. If
870  * they do, it would cause an error while restoring global objects.
871  * This allows the failure to be detected at check time, rather than
872  * during schema restore.
873  */
874 static void
876 {
877  int tblnum;
878  char new_tablespace_dir[MAXPGPATH];
879 
880  prep_status("Checking for new cluster tablespace directories");
881 
882  for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
883  {
884  struct stat statbuf;
885 
886  snprintf(new_tablespace_dir, MAXPGPATH, "%s%s",
887  os_info.old_tablespaces[tblnum],
889 
890  if (stat(new_tablespace_dir, &statbuf) == 0 || errno != ENOENT)
891  pg_fatal("new cluster tablespace directory already exists: \"%s\"",
892  new_tablespace_dir);
893  }
894 
895  check_ok();
896 }
897 
898 /*
899  * create_script_for_old_cluster_deletion()
900  *
901  * This is particularly useful for tablespace deletion.
902  */
903 void
904 create_script_for_old_cluster_deletion(char **deletion_script_file_name)
905 {
906  FILE *script = NULL;
907  int tblnum;
908  char old_cluster_pgdata[MAXPGPATH],
909  new_cluster_pgdata[MAXPGPATH];
910 
911  *deletion_script_file_name = psprintf("%sdelete_old_cluster.%s",
913 
914  strlcpy(old_cluster_pgdata, old_cluster.pgdata, MAXPGPATH);
915  canonicalize_path(old_cluster_pgdata);
916 
917  strlcpy(new_cluster_pgdata, new_cluster.pgdata, MAXPGPATH);
918  canonicalize_path(new_cluster_pgdata);
919 
920  /* Some people put the new data directory inside the old one. */
921  if (path_is_prefix_of_path(old_cluster_pgdata, new_cluster_pgdata))
922  {
924  "\nWARNING: new data directory should not be inside the old data directory, i.e. %s", old_cluster_pgdata);
925 
926  /* Unlink file in case it is left over from a previous run. */
927  unlink(*deletion_script_file_name);
928  pg_free(*deletion_script_file_name);
929  *deletion_script_file_name = NULL;
930  return;
931  }
932 
933  /*
934  * Some users (oddly) create tablespaces inside the cluster data
935  * directory. We can't create a proper old cluster delete script in that
936  * case.
937  */
938  for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
939  {
940  char old_tablespace_dir[MAXPGPATH];
941 
942  strlcpy(old_tablespace_dir, os_info.old_tablespaces[tblnum], MAXPGPATH);
943  canonicalize_path(old_tablespace_dir);
944  if (path_is_prefix_of_path(old_cluster_pgdata, old_tablespace_dir))
945  {
946  /* reproduce warning from CREATE TABLESPACE that is in the log */
948  "\nWARNING: user-defined tablespace locations should not be inside the data directory, i.e. %s", old_tablespace_dir);
949 
950  /* Unlink file in case it is left over from a previous run. */
951  unlink(*deletion_script_file_name);
952  pg_free(*deletion_script_file_name);
953  *deletion_script_file_name = NULL;
954  return;
955  }
956  }
957 
958  prep_status("Creating script to delete old cluster");
959 
960  if ((script = fopen_priv(*deletion_script_file_name, "w")) == NULL)
961  pg_fatal("could not open file \"%s\": %m",
962  *deletion_script_file_name);
963 
964 #ifndef WIN32
965  /* add shebang header */
966  fprintf(script, "#!/bin/sh\n\n");
967 #endif
968 
969  /* delete old cluster's default tablespace */
970  fprintf(script, RMDIR_CMD " %c%s%c\n", PATH_QUOTE,
972 
973  /* delete old cluster's alternate tablespaces */
974  for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
975  {
976  /*
977  * Do the old cluster's per-database directories share a directory
978  * with a new version-specific tablespace?
979  */
980  if (strlen(old_cluster.tablespace_suffix) == 0)
981  {
982  /* delete per-database directories */
983  int dbnum;
984 
985  fprintf(script, "\n");
986 
987  for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
988  fprintf(script, RMDIR_CMD " %c%s%c%u%c\n", PATH_QUOTE,
991  PATH_QUOTE);
992  }
993  else
994  {
995  char *suffix_path = pg_strdup(old_cluster.tablespace_suffix);
996 
997  /*
998  * Simply delete the tablespace directory, which might be ".old"
999  * or a version-specific subdirectory.
1000  */
1001  fprintf(script, RMDIR_CMD " %c%s%s%c\n", PATH_QUOTE,
1003  fix_path_separator(suffix_path), PATH_QUOTE);
1004  pfree(suffix_path);
1005  }
1006  }
1007 
1008  fclose(script);
1009 
1010 #ifndef WIN32
1011  if (chmod(*deletion_script_file_name, S_IRWXU) != 0)
1012  pg_fatal("could not add execute permission to file \"%s\": %m",
1013  *deletion_script_file_name);
1014 #endif
1015 
1016  check_ok();
1017 }
1018 
1019 
1020 /*
1021  * check_is_install_user()
1022  *
1023  * Check we are the install user, and that the new cluster
1024  * has no other users.
1025  */
1026 static void
1028 {
1029  PGresult *res;
1030  PGconn *conn = connectToServer(cluster, "template1");
1031 
1032  prep_status("Checking database user is the install user");
1033 
1034  /* Can't use pg_authid because only superusers can view it. */
1036  "SELECT rolsuper, oid "
1037  "FROM pg_catalog.pg_roles "
1038  "WHERE rolname = current_user "
1039  "AND rolname !~ '^pg_'");
1040 
1041  /*
1042  * We only allow the install user in the new cluster (see comment below)
1043  * and we preserve pg_authid.oid, so this must be the install user in the
1044  * old cluster too.
1045  */
1046  if (PQntuples(res) != 1 ||
1047  atooid(PQgetvalue(res, 0, 1)) != BOOTSTRAP_SUPERUSERID)
1048  pg_fatal("database user \"%s\" is not the install user",
1049  os_info.user);
1050 
1051  PQclear(res);
1052 
1054  "SELECT COUNT(*) "
1055  "FROM pg_catalog.pg_roles "
1056  "WHERE rolname !~ '^pg_'");
1057 
1058  if (PQntuples(res) != 1)
1059  pg_fatal("could not determine the number of users");
1060 
1061  /*
1062  * We only allow the install user in the new cluster because other defined
1063  * users might match users defined in the old cluster and generate an
1064  * error during pg_dump restore.
1065  */
1066  if (cluster == &new_cluster && strcmp(PQgetvalue(res, 0, 0), "1") != 0)
1067  pg_fatal("Only the install user can be defined in the new cluster.");
1068 
1069  PQclear(res);
1070 
1071  PQfinish(conn);
1072 
1073  check_ok();
1074 }
1075 
1076 
1077 /*
1078  * check_proper_datallowconn
1079  *
1080  * Ensure that all non-template0 databases allow connections since they
1081  * otherwise won't be restored; and that template0 explicitly doesn't allow
1082  * connections since it would make pg_dumpall --globals restore fail.
1083  */
1084 static void
1086 {
1087  int dbnum;
1088  PGconn *conn_template1;
1089  PGresult *dbres;
1090  int ntups;
1091  int i_datname;
1092  int i_datallowconn;
1093  FILE *script = NULL;
1094  char output_path[MAXPGPATH];
1095 
1096  prep_status("Checking database connection settings");
1097 
1098  snprintf(output_path, sizeof(output_path), "%s/%s",
1099  log_opts.basedir,
1100  "databases_with_datallowconn_false.txt");
1101 
1102  conn_template1 = connectToServer(cluster, "template1");
1103 
1104  /* get database names */
1105  dbres = executeQueryOrDie(conn_template1,
1106  "SELECT datname, datallowconn "
1107  "FROM pg_catalog.pg_database");
1108 
1109  i_datname = PQfnumber(dbres, "datname");
1110  i_datallowconn = PQfnumber(dbres, "datallowconn");
1111 
1112  ntups = PQntuples(dbres);
1113  for (dbnum = 0; dbnum < ntups; dbnum++)
1114  {
1115  char *datname = PQgetvalue(dbres, dbnum, i_datname);
1116  char *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);
1117 
1118  if (strcmp(datname, "template0") == 0)
1119  {
1120  /* avoid restore failure when pg_dumpall tries to create template0 */
1121  if (strcmp(datallowconn, "t") == 0)
1122  pg_fatal("template0 must not allow connections, "
1123  "i.e. its pg_database.datallowconn must be false");
1124  }
1125  else
1126  {
1127  /*
1128  * avoid datallowconn == false databases from being skipped on
1129  * restore
1130  */
1131  if (strcmp(datallowconn, "f") == 0)
1132  {
1133  if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
1134  pg_fatal("could not open file \"%s\": %m", output_path);
1135 
1136  fprintf(script, "%s\n", datname);
1137  }
1138  }
1139  }
1140 
1141  PQclear(dbres);
1142 
1143  PQfinish(conn_template1);
1144 
1145  if (script)
1146  {
1147  fclose(script);
1148  pg_log(PG_REPORT, "fatal");
1149  pg_fatal("All non-template0 databases must allow connections, i.e. their\n"
1150  "pg_database.datallowconn must be true. Your installation contains\n"
1151  "non-template0 databases with their pg_database.datallowconn set to\n"
1152  "false. Consider allowing connection for all non-template0 databases\n"
1153  "or drop the databases which do not allow connections. A list of\n"
1154  "databases with the problem is in the file:\n"
1155  " %s", output_path);
1156  }
1157  else
1158  check_ok();
1159 }
1160 
1161 
1162 /*
1163  * check_for_prepared_transactions()
1164  *
1165  * Make sure there are no prepared transactions because the storage format
1166  * might have changed.
1167  */
1168 static void
1170 {
1171  PGresult *res;
1172  PGconn *conn = connectToServer(cluster, "template1");
1173 
1174  prep_status("Checking for prepared transactions");
1175 
1177  "SELECT * "
1178  "FROM pg_catalog.pg_prepared_xacts");
1179 
1180  if (PQntuples(res) != 0)
1181  {
1182  if (cluster == &old_cluster)
1183  pg_fatal("The source cluster contains prepared transactions");
1184  else
1185  pg_fatal("The target cluster contains prepared transactions");
1186  }
1187 
1188  PQclear(res);
1189 
1190  PQfinish(conn);
1191 
1192  check_ok();
1193 }
1194 
1195 
1196 /*
1197  * check_for_isn_and_int8_passing_mismatch()
1198  *
1199  * contrib/isn relies on data type int8, and in 8.4 int8 can now be passed
1200  * by value. The schema dumps the CREATE TYPE PASSEDBYVALUE setting so
1201  * it must match for the old and new servers.
1202  */
1203 static void
1205 {
1206  int dbnum;
1207  FILE *script = NULL;
1208  char output_path[MAXPGPATH];
1209 
1210  prep_status("Checking for contrib/isn with bigint-passing mismatch");
1211 
1214  {
1215  /* no mismatch */
1216  check_ok();
1217  return;
1218  }
1219 
1220  snprintf(output_path, sizeof(output_path), "%s/%s",
1221  log_opts.basedir,
1222  "contrib_isn_and_int8_pass_by_value.txt");
1223 
1224  for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
1225  {
1226  PGresult *res;
1227  bool db_used = false;
1228  int ntups;
1229  int rowno;
1230  int i_nspname,
1231  i_proname;
1232  DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
1233  PGconn *conn = connectToServer(cluster, active_db->db_name);
1234 
1235  /* Find any functions coming from contrib/isn */
1237  "SELECT n.nspname, p.proname "
1238  "FROM pg_catalog.pg_proc p, "
1239  " pg_catalog.pg_namespace n "
1240  "WHERE p.pronamespace = n.oid AND "
1241  " p.probin = '$libdir/isn'");
1242 
1243  ntups = PQntuples(res);
1244  i_nspname = PQfnumber(res, "nspname");
1245  i_proname = PQfnumber(res, "proname");
1246  for (rowno = 0; rowno < ntups; rowno++)
1247  {
1248  if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
1249  pg_fatal("could not open file \"%s\": %m", output_path);
1250  if (!db_used)
1251  {
1252  fprintf(script, "In database: %s\n", active_db->db_name);
1253  db_used = true;
1254  }
1255  fprintf(script, " %s.%s\n",
1256  PQgetvalue(res, rowno, i_nspname),
1257  PQgetvalue(res, rowno, i_proname));
1258  }
1259 
1260  PQclear(res);
1261 
1262  PQfinish(conn);
1263  }
1264 
1265  if (script)
1266  {
1267  fclose(script);
1268  pg_log(PG_REPORT, "fatal");
1269  pg_fatal("Your installation contains \"contrib/isn\" functions which rely on the\n"
1270  "bigint data type. Your old and new clusters pass bigint values\n"
1271  "differently so this cluster cannot currently be upgraded. You can\n"
1272  "manually dump databases in the old cluster that use \"contrib/isn\"\n"
1273  "facilities, drop them, perform the upgrade, and then restore them. A\n"
1274  "list of the problem functions is in the file:\n"
1275  " %s", output_path);
1276  }
1277  else
1278  check_ok();
1279 }
1280 
1281 /*
1282  * Verify that no user defined postfix operators exist.
1283  */
1284 static void
1286 {
1287  int dbnum;
1288  FILE *script = NULL;
1289  char output_path[MAXPGPATH];
1290 
1291  prep_status("Checking for user-defined postfix operators");
1292 
1293  snprintf(output_path, sizeof(output_path), "%s/%s",
1294  log_opts.basedir,
1295  "postfix_ops.txt");
1296 
1297  /* Find any user defined postfix operators */
1298  for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
1299  {
1300  PGresult *res;
1301  bool db_used = false;
1302  int ntups;
1303  int rowno;
1304  int i_oproid,
1305  i_oprnsp,
1306  i_oprname,
1307  i_typnsp,
1308  i_typname;
1309  DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
1310  PGconn *conn = connectToServer(cluster, active_db->db_name);
1311 
1312  /*
1313  * The query below hardcodes FirstNormalObjectId as 16384 rather than
1314  * interpolating that C #define into the query because, if that
1315  * #define is ever changed, the cutoff we want to use is the value
1316  * used by pre-version 14 servers, not that of some future version.
1317  */
1319  "SELECT o.oid AS oproid, "
1320  " n.nspname AS oprnsp, "
1321  " o.oprname, "
1322  " tn.nspname AS typnsp, "
1323  " t.typname "
1324  "FROM pg_catalog.pg_operator o, "
1325  " pg_catalog.pg_namespace n, "
1326  " pg_catalog.pg_type t, "
1327  " pg_catalog.pg_namespace tn "
1328  "WHERE o.oprnamespace = n.oid AND "
1329  " o.oprleft = t.oid AND "
1330  " t.typnamespace = tn.oid AND "
1331  " o.oprright = 0 AND "
1332  " o.oid >= 16384");
1333  ntups = PQntuples(res);
1334  i_oproid = PQfnumber(res, "oproid");
1335  i_oprnsp = PQfnumber(res, "oprnsp");
1336  i_oprname = PQfnumber(res, "oprname");
1337  i_typnsp = PQfnumber(res, "typnsp");
1338  i_typname = PQfnumber(res, "typname");
1339  for (rowno = 0; rowno < ntups; rowno++)
1340  {
1341  if (script == NULL &&
1342  (script = fopen_priv(output_path, "w")) == NULL)
1343  pg_fatal("could not open file \"%s\": %m", output_path);
1344  if (!db_used)
1345  {
1346  fprintf(script, "In database: %s\n", active_db->db_name);
1347  db_used = true;
1348  }
1349  fprintf(script, " (oid=%s) %s.%s (%s.%s, NONE)\n",
1350  PQgetvalue(res, rowno, i_oproid),
1351  PQgetvalue(res, rowno, i_oprnsp),
1352  PQgetvalue(res, rowno, i_oprname),
1353  PQgetvalue(res, rowno, i_typnsp),
1354  PQgetvalue(res, rowno, i_typname));
1355  }
1356 
1357  PQclear(res);
1358 
1359  PQfinish(conn);
1360  }
1361 
1362  if (script)
1363  {
1364  fclose(script);
1365  pg_log(PG_REPORT, "fatal");
1366  pg_fatal("Your installation contains user-defined postfix operators, which are not\n"
1367  "supported anymore. Consider dropping the postfix operators and replacing\n"
1368  "them with prefix operators or function calls.\n"
1369  "A list of user-defined postfix operators is in the file:\n"
1370  " %s", output_path);
1371  }
1372  else
1373  check_ok();
1374 }
1375 
1376 /*
1377  * check_for_incompatible_polymorphics()
1378  *
1379  * Make sure nothing is using old polymorphic functions with
1380  * anyarray/anyelement rather than the new anycompatible variants.
1381  */
1382 static void
1384 {
1385  PGresult *res;
1386  FILE *script = NULL;
1387  char output_path[MAXPGPATH];
1388  PQExpBufferData old_polymorphics;
1389 
1390  prep_status("Checking for incompatible polymorphic functions");
1391 
1392  snprintf(output_path, sizeof(output_path), "%s/%s",
1393  log_opts.basedir,
1394  "incompatible_polymorphics.txt");
1395 
1396  /* The set of problematic functions varies a bit in different versions */
1397  initPQExpBuffer(&old_polymorphics);
1398 
1399  appendPQExpBufferStr(&old_polymorphics,
1400  "'array_append(anyarray,anyelement)'"
1401  ", 'array_cat(anyarray,anyarray)'"
1402  ", 'array_prepend(anyelement,anyarray)'");
1403 
1404  if (GET_MAJOR_VERSION(cluster->major_version) >= 903)
1405  appendPQExpBufferStr(&old_polymorphics,
1406  ", 'array_remove(anyarray,anyelement)'"
1407  ", 'array_replace(anyarray,anyelement,anyelement)'");
1408 
1409  if (GET_MAJOR_VERSION(cluster->major_version) >= 905)
1410  appendPQExpBufferStr(&old_polymorphics,
1411  ", 'array_position(anyarray,anyelement)'"
1412  ", 'array_position(anyarray,anyelement,integer)'"
1413  ", 'array_positions(anyarray,anyelement)'"
1414  ", 'width_bucket(anyelement,anyarray)'");
1415 
1416  for (int dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
1417  {
1418  bool db_used = false;
1419  DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
1420  PGconn *conn = connectToServer(cluster, active_db->db_name);
1421  int ntups;
1422  int i_objkind,
1423  i_objname;
1424 
1425  /*
1426  * The query below hardcodes FirstNormalObjectId as 16384 rather than
1427  * interpolating that C #define into the query because, if that
1428  * #define is ever changed, the cutoff we want to use is the value
1429  * used by pre-version 14 servers, not that of some future version.
1430  */
1432  /* Aggregate transition functions */
1433  "SELECT 'aggregate' AS objkind, p.oid::regprocedure::text AS objname "
1434  "FROM pg_proc AS p "
1435  "JOIN pg_aggregate AS a ON a.aggfnoid=p.oid "
1436  "JOIN pg_proc AS transfn ON transfn.oid=a.aggtransfn "
1437  "WHERE p.oid >= 16384 "
1438  "AND a.aggtransfn = ANY(ARRAY[%s]::regprocedure[]) "
1439  "AND a.aggtranstype = ANY(ARRAY['anyarray', 'anyelement']::regtype[]) "
1440 
1441  /* Aggregate final functions */
1442  "UNION ALL "
1443  "SELECT 'aggregate' AS objkind, p.oid::regprocedure::text AS objname "
1444  "FROM pg_proc AS p "
1445  "JOIN pg_aggregate AS a ON a.aggfnoid=p.oid "
1446  "JOIN pg_proc AS finalfn ON finalfn.oid=a.aggfinalfn "
1447  "WHERE p.oid >= 16384 "
1448  "AND a.aggfinalfn = ANY(ARRAY[%s]::regprocedure[]) "
1449  "AND a.aggtranstype = ANY(ARRAY['anyarray', 'anyelement']::regtype[]) "
1450 
1451  /* Operators */
1452  "UNION ALL "
1453  "SELECT 'operator' AS objkind, op.oid::regoperator::text AS objname "
1454  "FROM pg_operator AS op "
1455  "WHERE op.oid >= 16384 "
1456  "AND oprcode = ANY(ARRAY[%s]::regprocedure[]) "
1457  "AND oprleft = ANY(ARRAY['anyarray', 'anyelement']::regtype[]);",
1458  old_polymorphics.data,
1459  old_polymorphics.data,
1460  old_polymorphics.data);
1461 
1462  ntups = PQntuples(res);
1463 
1464  i_objkind = PQfnumber(res, "objkind");
1465  i_objname = PQfnumber(res, "objname");
1466 
1467  for (int rowno = 0; rowno < ntups; rowno++)
1468  {
1469  if (script == NULL &&
1470  (script = fopen_priv(output_path, "w")) == NULL)
1471  pg_fatal("could not open file \"%s\": %m", output_path);
1472  if (!db_used)
1473  {
1474  fprintf(script, "In database: %s\n", active_db->db_name);
1475  db_used = true;
1476  }
1477 
1478  fprintf(script, " %s: %s\n",
1479  PQgetvalue(res, rowno, i_objkind),
1480  PQgetvalue(res, rowno, i_objname));
1481  }
1482 
1483  PQclear(res);
1484  PQfinish(conn);
1485  }
1486 
1487  if (script)
1488  {
1489  fclose(script);
1490  pg_log(PG_REPORT, "fatal");
1491  pg_fatal("Your installation contains user-defined objects that refer to internal\n"
1492  "polymorphic functions with arguments of type \"anyarray\" or \"anyelement\".\n"
1493  "These user-defined objects must be dropped before upgrading and restored\n"
1494  "afterwards, changing them to refer to the new corresponding functions with\n"
1495  "arguments of type \"anycompatiblearray\" and \"anycompatible\".\n"
1496  "A list of the problematic objects is in the file:\n"
1497  " %s", output_path);
1498  }
1499  else
1500  check_ok();
1501 
1502  termPQExpBuffer(&old_polymorphics);
1503 }
1504 
1505 /*
1506  * Verify that no tables are declared WITH OIDS.
1507  */
1508 static void
1510 {
1511  int dbnum;
1512  FILE *script = NULL;
1513  char output_path[MAXPGPATH];
1514 
1515  prep_status("Checking for tables WITH OIDS");
1516 
1517  snprintf(output_path, sizeof(output_path), "%s/%s",
1518  log_opts.basedir,
1519  "tables_with_oids.txt");
1520 
1521  /* Find any tables declared WITH OIDS */
1522  for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
1523  {
1524  PGresult *res;
1525  bool db_used = false;
1526  int ntups;
1527  int rowno;
1528  int i_nspname,
1529  i_relname;
1530  DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
1531  PGconn *conn = connectToServer(cluster, active_db->db_name);
1532 
1534  "SELECT n.nspname, c.relname "
1535  "FROM pg_catalog.pg_class c, "
1536  " pg_catalog.pg_namespace n "
1537  "WHERE c.relnamespace = n.oid AND "
1538  " c.relhasoids AND"
1539  " n.nspname NOT IN ('pg_catalog')");
1540 
1541  ntups = PQntuples(res);
1542  i_nspname = PQfnumber(res, "nspname");
1543  i_relname = PQfnumber(res, "relname");
1544  for (rowno = 0; rowno < ntups; rowno++)
1545  {
1546  if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
1547  pg_fatal("could not open file \"%s\": %m", output_path);
1548  if (!db_used)
1549  {
1550  fprintf(script, "In database: %s\n", active_db->db_name);
1551  db_used = true;
1552  }
1553  fprintf(script, " %s.%s\n",
1554  PQgetvalue(res, rowno, i_nspname),
1555  PQgetvalue(res, rowno, i_relname));
1556  }
1557 
1558  PQclear(res);
1559 
1560  PQfinish(conn);
1561  }
1562 
1563  if (script)
1564  {
1565  fclose(script);
1566  pg_log(PG_REPORT, "fatal");
1567  pg_fatal("Your installation contains tables declared WITH OIDS, which is not\n"
1568  "supported anymore. Consider removing the oid column using\n"
1569  " ALTER TABLE ... SET WITHOUT OIDS;\n"
1570  "A list of tables with the problem is in the file:\n"
1571  " %s", output_path);
1572  }
1573  else
1574  check_ok();
1575 }
1576 
1577 
1578 /*
1579  * check_for_pg_role_prefix()
1580  *
1581  * Versions older than 9.6 should not have any pg_* roles
1582  */
1583 static void
1585 {
1586  PGresult *res;
1587  PGconn *conn = connectToServer(cluster, "template1");
1588  int ntups;
1589  int i_roloid;
1590  int i_rolname;
1591  FILE *script = NULL;
1592  char output_path[MAXPGPATH];
1593 
1594  prep_status("Checking for roles starting with \"pg_\"");
1595 
1596  snprintf(output_path, sizeof(output_path), "%s/%s",
1597  log_opts.basedir,
1598  "pg_role_prefix.txt");
1599 
1601  "SELECT oid AS roloid, rolname "
1602  "FROM pg_catalog.pg_roles "
1603  "WHERE rolname ~ '^pg_'");
1604 
1605  ntups = PQntuples(res);
1606  i_roloid = PQfnumber(res, "roloid");
1607  i_rolname = PQfnumber(res, "rolname");
1608  for (int rowno = 0; rowno < ntups; rowno++)
1609  {
1610  if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
1611  pg_fatal("could not open file \"%s\": %m", output_path);
1612  fprintf(script, "%s (oid=%s)\n",
1613  PQgetvalue(res, rowno, i_rolname),
1614  PQgetvalue(res, rowno, i_roloid));
1615  }
1616 
1617  PQclear(res);
1618 
1619  PQfinish(conn);
1620 
1621  if (script)
1622  {
1623  fclose(script);
1624  pg_log(PG_REPORT, "fatal");
1625  pg_fatal("Your installation contains roles starting with \"pg_\".\n"
1626  "\"pg_\" is a reserved prefix for system roles. The cluster\n"
1627  "cannot be upgraded until these roles are renamed.\n"
1628  "A list of roles starting with \"pg_\" is in the file:\n"
1629  " %s", output_path);
1630  }
1631  else
1632  check_ok();
1633 }
1634 
1635 /*
1636  * Verify that no user-defined encoding conversions exist.
1637  */
1638 static void
1640 {
1641  int dbnum;
1642  FILE *script = NULL;
1643  char output_path[MAXPGPATH];
1644 
1645  prep_status("Checking for user-defined encoding conversions");
1646 
1647  snprintf(output_path, sizeof(output_path), "%s/%s",
1648  log_opts.basedir,
1649  "encoding_conversions.txt");
1650 
1651  /* Find any user defined encoding conversions */
1652  for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
1653  {
1654  PGresult *res;
1655  bool db_used = false;
1656  int ntups;
1657  int rowno;
1658  int i_conoid,
1659  i_conname,
1660  i_nspname;
1661  DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
1662  PGconn *conn = connectToServer(cluster, active_db->db_name);
1663 
1664  /*
1665  * The query below hardcodes FirstNormalObjectId as 16384 rather than
1666  * interpolating that C #define into the query because, if that
1667  * #define is ever changed, the cutoff we want to use is the value
1668  * used by pre-version 14 servers, not that of some future version.
1669  */
1671  "SELECT c.oid as conoid, c.conname, n.nspname "
1672  "FROM pg_catalog.pg_conversion c, "
1673  " pg_catalog.pg_namespace n "
1674  "WHERE c.connamespace = n.oid AND "
1675  " c.oid >= 16384");
1676  ntups = PQntuples(res);
1677  i_conoid = PQfnumber(res, "conoid");
1678  i_conname = PQfnumber(res, "conname");
1679  i_nspname = PQfnumber(res, "nspname");
1680  for (rowno = 0; rowno < ntups; rowno++)
1681  {
1682  if (script == NULL &&
1683  (script = fopen_priv(output_path, "w")) == NULL)
1684  pg_fatal("could not open file \"%s\": %m", output_path);
1685  if (!db_used)
1686  {
1687  fprintf(script, "In database: %s\n", active_db->db_name);
1688  db_used = true;
1689  }
1690  fprintf(script, " (oid=%s) %s.%s\n",
1691  PQgetvalue(res, rowno, i_conoid),
1692  PQgetvalue(res, rowno, i_nspname),
1693  PQgetvalue(res, rowno, i_conname));
1694  }
1695 
1696  PQclear(res);
1697 
1698  PQfinish(conn);
1699  }
1700 
1701  if (script)
1702  {
1703  fclose(script);
1704  pg_log(PG_REPORT, "fatal");
1705  pg_fatal("Your installation contains user-defined encoding conversions.\n"
1706  "The conversion function parameters changed in PostgreSQL version 14\n"
1707  "so this cluster cannot currently be upgraded. You can remove the\n"
1708  "encoding conversions in the old cluster and restart the upgrade.\n"
1709  "A list of user-defined encoding conversions is in the file:\n"
1710  " %s", output_path);
1711  }
1712  else
1713  check_ok();
1714 }
1715 
1716 /*
1717  * check_new_cluster_logical_replication_slots()
1718  *
1719  * Verify that there are no logical replication slots on the new cluster and
1720  * that the parameter settings necessary for creating slots are sufficient.
1721  */
1722 static void
1724 {
1725  PGresult *res;
1726  PGconn *conn;
1727  int nslots_on_old;
1728  int nslots_on_new;
1730  char *wal_level;
1731 
1732  /* Logical slots can be migrated since PG17. */
1734  return;
1735 
1736  nslots_on_old = count_old_cluster_logical_slots();
1737 
1738  /* Quick return if there are no logical slots to be migrated. */
1739  if (nslots_on_old == 0)
1740  return;
1741 
1742  conn = connectToServer(&new_cluster, "template1");
1743 
1744  prep_status("Checking for new cluster logical replication slots");
1745 
1746  res = executeQueryOrDie(conn, "SELECT count(*) "
1747  "FROM pg_catalog.pg_replication_slots "
1748  "WHERE slot_type = 'logical' AND "
1749  "temporary IS FALSE;");
1750 
1751  if (PQntuples(res) != 1)
1752  pg_fatal("could not count the number of logical replication slots");
1753 
1754  nslots_on_new = atoi(PQgetvalue(res, 0, 0));
1755 
1756  if (nslots_on_new)
1757  pg_fatal("Expected 0 logical replication slots but found %d.",
1758  nslots_on_new);
1759 
1760  PQclear(res);
1761 
1762  res = executeQueryOrDie(conn, "SELECT setting FROM pg_settings "
1763  "WHERE name IN ('wal_level', 'max_replication_slots') "
1764  "ORDER BY name DESC;");
1765 
1766  if (PQntuples(res) != 2)
1767  pg_fatal("could not determine parameter settings on new cluster");
1768 
1769  wal_level = PQgetvalue(res, 0, 0);
1770 
1771  if (strcmp(wal_level, "logical") != 0)
1772  pg_fatal("wal_level must be \"logical\", but is set to \"%s\"",
1773  wal_level);
1774 
1775  max_replication_slots = atoi(PQgetvalue(res, 1, 0));
1776 
1777  if (nslots_on_old > max_replication_slots)
1778  pg_fatal("max_replication_slots (%d) must be greater than or equal to the number of "
1779  "logical replication slots (%d) on the old cluster",
1780  max_replication_slots, nslots_on_old);
1781 
1782  PQclear(res);
1783  PQfinish(conn);
1784 
1785  check_ok();
1786 }
1787 
1788 /*
1789  * check_new_cluster_subscription_configuration()
1790  *
1791  * Verify that the max_replication_slots configuration specified is enough for
1792  * creating the subscriptions. This is required to create the replication
1793  * origin for each subscription.
1794  */
1795 static void
1797 {
1798  PGresult *res;
1799  PGconn *conn;
1800  int nsubs_on_old;
1802 
1803  /* Subscriptions and their dependencies can be migrated since PG17. */
1805  return;
1806 
1807  nsubs_on_old = count_old_cluster_subscriptions();
1808 
1809  /* Quick return if there are no subscriptions to be migrated. */
1810  if (nsubs_on_old == 0)
1811  return;
1812 
1813  prep_status("Checking for new cluster configuration for subscriptions");
1814 
1815  conn = connectToServer(&new_cluster, "template1");
1816 
1817  res = executeQueryOrDie(conn, "SELECT setting FROM pg_settings "
1818  "WHERE name = 'max_replication_slots';");
1819 
1820  if (PQntuples(res) != 1)
1821  pg_fatal("could not determine parameter settings on new cluster");
1822 
1823  max_replication_slots = atoi(PQgetvalue(res, 0, 0));
1824  if (nsubs_on_old > max_replication_slots)
1825  pg_fatal("max_replication_slots (%d) must be greater than or equal to the number of "
1826  "subscriptions (%d) on the old cluster",
1827  max_replication_slots, nsubs_on_old);
1828 
1829  PQclear(res);
1830  PQfinish(conn);
1831 
1832  check_ok();
1833 }
1834 
1835 /*
1836  * check_old_cluster_for_valid_slots()
1837  *
1838  * Verify that all the logical slots are valid and have consumed all the WAL
1839  * before shutdown.
1840  */
1841 static void
1843 {
1844  char output_path[MAXPGPATH];
1845  FILE *script = NULL;
1846 
1847  prep_status("Checking for valid logical replication slots");
1848 
1849  snprintf(output_path, sizeof(output_path), "%s/%s",
1850  log_opts.basedir,
1851  "invalid_logical_slots.txt");
1852 
1853  for (int dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
1854  {
1855  LogicalSlotInfoArr *slot_arr = &old_cluster.dbarr.dbs[dbnum].slot_arr;
1856 
1857  for (int slotnum = 0; slotnum < slot_arr->nslots; slotnum++)
1858  {
1859  LogicalSlotInfo *slot = &slot_arr->slots[slotnum];
1860 
1861  /* Is the slot usable? */
1862  if (slot->invalid)
1863  {
1864  if (script == NULL &&
1865  (script = fopen_priv(output_path, "w")) == NULL)
1866  pg_fatal("could not open file \"%s\": %m", output_path);
1867 
1868  fprintf(script, "The slot \"%s\" is invalid\n",
1869  slot->slotname);
1870 
1871  continue;
1872  }
1873 
1874  /*
1875  * Do additional check to ensure that all logical replication
1876  * slots have consumed all the WAL before shutdown.
1877  *
1878  * Note: This can be satisfied only when the old cluster has been
1879  * shut down, so we skip this for live checks.
1880  */
1881  if (!live_check && !slot->caught_up)
1882  {
1883  if (script == NULL &&
1884  (script = fopen_priv(output_path, "w")) == NULL)
1885  pg_fatal("could not open file \"%s\": %m", output_path);
1886 
1887  fprintf(script,
1888  "The slot \"%s\" has not consumed the WAL yet\n",
1889  slot->slotname);
1890  }
1891  }
1892  }
1893 
1894  if (script)
1895  {
1896  fclose(script);
1897 
1898  pg_log(PG_REPORT, "fatal");
1899  pg_fatal("Your installation contains logical replication slots that can't be upgraded.\n"
1900  "You can remove invalid slots and/or consume the pending WAL for other slots,\n"
1901  "and then restart the upgrade.\n"
1902  "A list of the problematic slots is in the file:\n"
1903  " %s", output_path);
1904  }
1905 
1906  check_ok();
1907 }
1908 
1909 /*
1910  * check_old_cluster_subscription_state()
1911  *
1912  * Verify that the replication origin corresponding to each of the
1913  * subscriptions are present and each of the subscribed tables is in
1914  * 'i' (initialize) or 'r' (ready) state.
1915  */
1916 static void
1918 {
1919  FILE *script = NULL;
1920  char output_path[MAXPGPATH];
1921  int ntup;
1922 
1923  prep_status("Checking for subscription state");
1924 
1925  snprintf(output_path, sizeof(output_path), "%s/%s",
1926  log_opts.basedir,
1927  "subs_invalid.txt");
1928  for (int dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
1929  {
1930  PGresult *res;
1931  DbInfo *active_db = &old_cluster.dbarr.dbs[dbnum];
1932  PGconn *conn = connectToServer(&old_cluster, active_db->db_name);
1933 
1934  /* We need to check for pg_replication_origin only once. */
1935  if (dbnum == 0)
1936  {
1937  /*
1938  * Check that all the subscriptions have their respective
1939  * replication origin.
1940  */
1942  "SELECT d.datname, s.subname "
1943  "FROM pg_catalog.pg_subscription s "
1944  "LEFT OUTER JOIN pg_catalog.pg_replication_origin o "
1945  " ON o.roname = 'pg_' || s.oid "
1946  "INNER JOIN pg_catalog.pg_database d "
1947  " ON d.oid = s.subdbid "
1948  "WHERE o.roname iS NULL;");
1949 
1950  ntup = PQntuples(res);
1951  for (int i = 0; i < ntup; i++)
1952  {
1953  if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
1954  pg_fatal("could not open file \"%s\": %m", output_path);
1955  fprintf(script, "The replication origin is missing for database:\"%s\" subscription:\"%s\"\n",
1956  PQgetvalue(res, i, 0),
1957  PQgetvalue(res, i, 1));
1958  }
1959  PQclear(res);
1960  }
1961 
1962  /*
1963  * We don't allow upgrade if there is a risk of dangling slot or
1964  * origin corresponding to initial sync after upgrade.
1965  *
1966  * A slot/origin not created yet refers to the 'i' (initialize) state,
1967  * while 'r' (ready) state refers to a slot/origin created previously
1968  * but already dropped. These states are supported for pg_upgrade. The
1969  * other states listed below are not supported:
1970  *
1971  * a) SUBREL_STATE_DATASYNC: A relation upgraded while in this state
1972  * would retain a replication slot, which could not be dropped by the
1973  * sync worker spawned after the upgrade because the subscription ID
1974  * used for the slot name won't match anymore.
1975  *
1976  * b) SUBREL_STATE_SYNCDONE: A relation upgraded while in this state
1977  * would retain the replication origin when there is a failure in
1978  * tablesync worker immediately after dropping the replication slot in
1979  * the publisher.
1980  *
1981  * c) SUBREL_STATE_FINISHEDCOPY: A tablesync worker spawned to work on
1982  * a relation upgraded while in this state would expect an origin ID
1983  * with the OID of the subscription used before the upgrade, causing
1984  * it to fail.
1985  *
1986  * d) SUBREL_STATE_SYNCWAIT, SUBREL_STATE_CATCHUP and
1987  * SUBREL_STATE_UNKNOWN: These states are not stored in the catalog,
1988  * so we need not allow these states.
1989  */
1991  "SELECT r.srsubstate, s.subname, n.nspname, c.relname "
1992  "FROM pg_catalog.pg_subscription_rel r "
1993  "LEFT JOIN pg_catalog.pg_subscription s"
1994  " ON r.srsubid = s.oid "
1995  "LEFT JOIN pg_catalog.pg_class c"
1996  " ON r.srrelid = c.oid "
1997  "LEFT JOIN pg_catalog.pg_namespace n"
1998  " ON c.relnamespace = n.oid "
1999  "WHERE r.srsubstate NOT IN ('i', 'r') "
2000  "ORDER BY s.subname");
2001 
2002  ntup = PQntuples(res);
2003  for (int i = 0; i < ntup; i++)
2004  {
2005  if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
2006  pg_fatal("could not open file \"%s\": %m", output_path);
2007 
2008  fprintf(script, "The table sync state \"%s\" is not allowed for database:\"%s\" subscription:\"%s\" schema:\"%s\" relation:\"%s\"\n",
2009  PQgetvalue(res, i, 0),
2010  active_db->db_name,
2011  PQgetvalue(res, i, 1),
2012  PQgetvalue(res, i, 2),
2013  PQgetvalue(res, i, 3));
2014  }
2015 
2016  PQclear(res);
2017  PQfinish(conn);
2018  }
2019 
2020  if (script)
2021  {
2022  fclose(script);
2023  pg_log(PG_REPORT, "fatal");
2024  pg_fatal("Your installation contains subscriptions without origin or having relations not in i (initialize) or r (ready) state.\n"
2025  "You can allow the initial sync to finish for all relations and then restart the upgrade.\n"
2026  "A list of the problematic subscriptions is in the file:\n"
2027  " %s", output_path);
2028  }
2029  else
2030  check_ok();
2031 }
#define gettext_noop(x)
Definition: c.h:1183
#define CppAsString2(x)
Definition: c.h:314
void output_check_banner(bool live_check)
Definition: check.c:558
void check_cluster_versions(void)
Definition: check.c:786
static void check_for_tables_with_oids(ClusterInfo *cluster)
Definition: check.c:1509
static void check_for_pg_role_prefix(ClusterInfo *cluster)
Definition: check.c:1584
void check_cluster_compatibility(bool live_check)
Definition: check.c:829
static DataTypesUsageChecks data_types_usage_checks[]
Definition: check.c:97
static void check_new_cluster_logical_replication_slots(void)
Definition: check.c:1723
static void check_old_cluster_subscription_state(void)
Definition: check.c:1917
#define ALL_VERSIONS
Definition: check.c:62
void check_and_dump_old_cluster(bool live_check)
Definition: check.c:576
void issue_warnings_and_set_wal_level(void)
Definition: check.c:731
static char * fix_path_separator(char *path)
Definition: check.c:537
static void check_for_data_types_usage(ClusterInfo *cluster, DataTypesUsageChecks *checks)
Definition: check.c:337
static void check_old_cluster_for_valid_slots(bool live_check)
Definition: check.c:1842
#define MANUAL_CHECK
Definition: check.c:61
void check_new_cluster(void)
Definition: check.c:676
void report_clusters_compatible(void)
Definition: check.c:712
static void check_is_install_user(ClusterInfo *cluster)
Definition: check.c:1027
static void check_new_cluster_subscription_configuration(void)
Definition: check.c:1796
void create_script_for_old_cluster_deletion(char **deletion_script_file_name)
Definition: check.c:904
static void check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
Definition: check.c:1204
static void check_new_cluster_is_empty(void)
Definition: check.c:843
static void check_for_user_defined_postfix_ops(ClusterInfo *cluster)
Definition: check.c:1285
void output_completion_banner(char *deletion_script_file_name)
Definition: check.c:752
static void check_for_prepared_transactions(ClusterInfo *cluster)
Definition: check.c:1169
static void check_proper_datallowconn(ClusterInfo *cluster)
Definition: check.c:1085
static void check_for_new_tablespace_dir(void)
Definition: check.c:875
static void check_for_user_defined_encoding_conversions(ClusterInfo *cluster)
Definition: check.c:1639
static void check_for_incompatible_polymorphics(ClusterInfo *cluster)
Definition: check.c:1383
void cluster(ParseState *pstate, ClusterStmt *stmt, bool isTopLevel)
Definition: cluster.c:108
void get_control_data(ClusterInfo *cluster, bool live_check)
Definition: controldata.c:36
void check_control_data(ControlData *oldctrl, ControlData *newctrl)
Definition: controldata.c:654
void generate_old_dump(void)
Definition: dump.c:16
#define _(x)
Definition: elog.c:90
void PQfinish(PGconn *conn)
Definition: fe-connect.c:4669
int PQntuples(const PGresult *res)
Definition: fe-exec.c:3441
char * PQgetvalue(const PGresult *res, int tup_num, int field_num)
Definition: fe-exec.c:3836
int PQfnumber(const PGresult *res, const char *field_name)
Definition: fe-exec.c:3549
void * pg_malloc0(size_t size)
Definition: fe_memutils.c:53
char * pg_strdup(const char *in)
Definition: fe_memutils.c:85
void pg_free(void *ptr)
Definition: fe_memutils.c:105
void check_file_clone(void)
Definition: file.c:360
void check_hard_link(void)
Definition: file.c:437
void check_copy_file_range(void)
Definition: file.c:400
void check_loadable_libraries(void)
Definition: function.c:146
void get_loadable_libraries(void)
Definition: function.c:55
int count_old_cluster_subscriptions(void)
Definition: info.c:789
void get_db_rel_and_slot_infos(ClusterInfo *cluster, bool live_check)
Definition: info.c:280
int count_old_cluster_logical_slots(void)
Definition: info.c:740
static void check_ok(void)
Definition: initdb.c:2036
int i
Definition: isn.c:73
Assert(fmt[strlen(fmt) - 1] !='\n')
exit(1)
void pfree(void *pointer)
Definition: mcxt.c:1508
#define pg_fatal(...)
#define MAXPGPATH
static pid_t start_postmaster(void)
Definition: pg_ctl.c:439
NameData datname
Definition: pg_database.h:35
bool datallowconn
Definition: pg_database.h:50
OSInfo os_info
Definition: pg_upgrade.c:66
ClusterInfo new_cluster
Definition: pg_upgrade.c:65
ClusterInfo old_cluster
Definition: pg_upgrade.c:64
#define PATH_SEPARATOR
Definition: pg_upgrade.h:82
void init_tablespaces(void)
Definition: tablespace.c:19
bool(* DataTypesUsageVersionCheck)(ClusterInfo *cluster)
Definition: pg_upgrade.h:356
bool jsonb_9_4_check_applicable(ClusterInfo *cluster)
Definition: version.c:21
#define RMDIR_CMD
Definition: pg_upgrade.h:85
void cleanup_output_dirs(void)
Definition: util.c:63
void report_extension_updates(ClusterInfo *cluster)
Definition: version.c:147
void void pg_log(eLogType type, const char *fmt,...) pg_attribute_printf(2
@ TRANSFER_MODE_COPY
Definition: pg_upgrade.h:258
@ TRANSFER_MODE_LINK
Definition: pg_upgrade.h:260
@ TRANSFER_MODE_CLONE
Definition: pg_upgrade.h:257
@ TRANSFER_MODE_COPY_FILE_RANGE
Definition: pg_upgrade.h:259
#define SCRIPT_EXT
Definition: pg_upgrade.h:87
#define SCRIPT_PREFIX
Definition: pg_upgrade.h:86
#define PATH_QUOTE
Definition: pg_upgrade.h:83
LogOpts log_opts
Definition: util.c:17
#define fopen_priv(path, mode)
Definition: pg_upgrade.h:420
@ PG_WARNING
Definition: pg_upgrade.h:272
@ PG_REPORT
Definition: pg_upgrade.h:271
PGconn * connectToServer(ClusterInfo *cluster, const char *db_name)
Definition: server.c:28
#define GET_MAJOR_VERSION(v)
Definition: pg_upgrade.h:27
void stop_postmaster(bool in_atexit)
Definition: server.c:342
void prep_status(const char *fmt,...) pg_attribute_printf(1
void old_9_6_invalidate_hash_indexes(ClusterInfo *cluster, bool check_mode)
Definition: version.c:37
PGresult * executeQueryOrDie(PGconn *conn, const char *fmt,...) pg_attribute_printf(2
bool path_is_prefix_of_path(const char *path1, const char *path2)
Definition: path.c:559
void canonicalize_path(char *path)
Definition: path.c:264
#define snprintf
Definition: port.h:238
#define fprintf
Definition: port.h:242
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition: strlcpy.c:45
#define atooid(x)
Definition: postgres_ext.h:42
void initPQExpBuffer(PQExpBuffer str)
Definition: pqexpbuffer.c:90
void appendPQExpBuffer(PQExpBuffer str, const char *fmt,...)
Definition: pqexpbuffer.c:265
void appendPQExpBufferChar(PQExpBuffer str, char ch)
Definition: pqexpbuffer.c:378
void appendPQExpBufferStr(PQExpBuffer str, const char *data)
Definition: pqexpbuffer.c:367
void termPQExpBuffer(PQExpBuffer str)
Definition: pqexpbuffer.c:129
char * c
char * psprintf(const char *fmt,...)
Definition: psprintf.c:46
int max_replication_slots
Definition: slot.c:141
UserOpts user_opts
Definition: option.c:30
PGconn * conn
Definition: streamutil.c:55
void appendShellString(PQExpBuffer buf, const char *str)
Definition: string_utils.c:429
char * pgdata
Definition: pg_upgrade.h:287
unsigned short port
Definition: pg_upgrade.h:294
ControlData controldata
Definition: pg_upgrade.h:284
char * bindir
Definition: pg_upgrade.h:290
uint32 bin_version
Definition: pg_upgrade.h:297
DbInfoArr dbarr
Definition: pg_upgrade.h:286
uint32 major_version
Definition: pg_upgrade.h:295
const char * tablespace_suffix
Definition: pg_upgrade.h:298
bool float8_pass_by_value
Definition: pg_upgrade.h:248
const char * report_text
Definition: check.c:49
int threshold_version
Definition: check.c:51
DataTypesUsageVersionCheck version_hook
Definition: check.c:53
const char * status
Definition: check.c:43
const char * report_filename
Definition: check.c:45
const char * base_query
Definition: check.c:47
DbInfo * dbs
Definition: pg_upgrade.h:217
LogicalSlotInfoArr slot_arr
Definition: pg_upgrade.h:199
char * db_name
Definition: pg_upgrade.h:195
RelInfoArr rel_arr
Definition: pg_upgrade.h:198
Oid db_oid
Definition: pg_upgrade.h:194
char * basedir
Definition: pg_upgrade.h:312
LogicalSlotInfo * slots
Definition: pg_upgrade.h:170
int num_old_tablespaces
Definition: pg_upgrade.h:348
char * user
Definition: pg_upgrade.h:345
char ** old_tablespaces
Definition: pg_upgrade.h:347
bool user_specified
Definition: pg_upgrade.h:346
RelInfo * rels
Definition: pg_upgrade.h:149
char * nspname
Definition: pg_upgrade.h:136
char * relname
Definition: pg_upgrade.h:137
transferMode transfer_mode
Definition: pg_upgrade.h:327
bool check
Definition: pg_upgrade.h:324
#define stat
Definition: win32_port.h:284
#define S_IRWXU
Definition: win32_port.h:298
int wal_level
Definition: xlog.c:131