PostgreSQL Source Code  git master
version.c
Go to the documentation of this file.
1 /*
2  * version.c
3  *
4  * Postgres-version-specific routines
5  *
6  * Copyright (c) 2010-2021, PostgreSQL Global Development Group
7  * src/bin/pg_upgrade/version.c
8  */
9 
10 #include "postgres_fe.h"
11 
12 #include "catalog/pg_class_d.h"
13 #include "fe_utils/string_utils.h"
14 #include "pg_upgrade.h"
15 
16 /*
17  * new_9_0_populate_pg_largeobject_metadata()
18  * new >= 9.0, old <= 8.4
19  * 9.0 has a new pg_largeobject permission table
20  */
21 void
23 {
24  int dbnum;
25  FILE *script = NULL;
26  bool found = false;
27  char output_path[MAXPGPATH];
28 
29  prep_status("Checking for large objects");
30 
31  snprintf(output_path, sizeof(output_path), "pg_largeobject.sql");
32 
33  for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
34  {
35  PGresult *res;
36  int i_count;
37  DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
38  PGconn *conn = connectToServer(cluster, active_db->db_name);
39 
40  /* find if there are any large objects */
41  res = executeQueryOrDie(conn,
42  "SELECT count(*) "
43  "FROM pg_catalog.pg_largeobject ");
44 
45  i_count = PQfnumber(res, "count");
46  if (atoi(PQgetvalue(res, 0, i_count)) != 0)
47  {
48  found = true;
49  if (!check_mode)
50  {
51  PQExpBufferData connectbuf;
52 
53  if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
54  pg_fatal("could not open file \"%s\": %s\n", output_path,
55  strerror(errno));
56 
57  initPQExpBuffer(&connectbuf);
58  appendPsqlMetaConnect(&connectbuf, active_db->db_name);
59  fputs(connectbuf.data, script);
60  termPQExpBuffer(&connectbuf);
61 
62  fprintf(script,
63  "SELECT pg_catalog.lo_create(t.loid)\n"
64  "FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) AS t;\n");
65  }
66  }
67 
68  PQclear(res);
69  PQfinish(conn);
70  }
71 
72  if (script)
73  fclose(script);
74 
75  if (found)
76  {
77  report_status(PG_WARNING, "warning");
78  if (check_mode)
79  pg_log(PG_WARNING, "\n"
80  "Your installation contains large objects. The new database has an\n"
81  "additional large object permission table. After upgrading, you will be\n"
82  "given a command to populate the pg_largeobject_metadata table with\n"
83  "default permissions.\n\n");
84  else
85  pg_log(PG_WARNING, "\n"
86  "Your installation contains large objects. The new database has an\n"
87  "additional large object permission table, so default permissions must be\n"
88  "defined for all large objects. The file\n"
89  " %s\n"
90  "when executed by psql by the database superuser will set the default\n"
91  "permissions.\n\n",
92  output_path);
93  }
94  else
95  check_ok();
96 }
97 
98 
99 /*
100  * check_for_data_types_usage()
101  * Detect whether there are any stored columns depending on given type(s)
102  *
103  * If so, write a report to the given file name, and return true.
104  *
105  * base_query should be a SELECT yielding a single column named "oid",
106  * containing the pg_type OIDs of one or more types that are known to have
107  * inconsistent on-disk representations across server versions.
108  *
109  * We check for the type(s) in tables, matviews, and indexes, but not views;
110  * there's no storage involved in a view.
111  */
112 bool
114  const char *base_query,
115  const char *output_path)
116 {
117  bool found = false;
118  FILE *script = NULL;
119  int dbnum;
120 
121  for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
122  {
123  DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
124  PGconn *conn = connectToServer(cluster, active_db->db_name);
125  PQExpBufferData querybuf;
126  PGresult *res;
127  bool db_used = false;
128  int ntups;
129  int rowno;
130  int i_nspname,
131  i_relname,
132  i_attname;
133 
134  /*
135  * The type(s) of interest might be wrapped in a domain, array,
136  * composite, or range, and these container types can be nested (to
137  * varying extents depending on server version, but that's not of
138  * concern here). To handle all these cases we need a recursive CTE.
139  */
140  initPQExpBuffer(&querybuf);
141  appendPQExpBuffer(&querybuf,
142  "WITH RECURSIVE oids AS ( "
143  /* start with the type(s) returned by base_query */
144  " %s "
145  " UNION ALL "
146  " SELECT * FROM ( "
147  /* inner WITH because we can only reference the CTE once */
148  " WITH x AS (SELECT oid FROM oids) "
149  /* domains on any type selected so far */
150  " SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typbasetype = x.oid AND typtype = 'd' "
151  " UNION ALL "
152  /* arrays over any type selected so far */
153  " SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typelem = x.oid AND typtype = 'b' "
154  " UNION ALL "
155  /* composite types containing any type selected so far */
156  " SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_class c, pg_catalog.pg_attribute a, x "
157  " WHERE t.typtype = 'c' AND "
158  " t.oid = c.reltype AND "
159  " c.oid = a.attrelid AND "
160  " NOT a.attisdropped AND "
161  " a.atttypid = x.oid ",
162  base_query);
163 
164  /* Ranges were introduced in 9.2 */
165  if (GET_MAJOR_VERSION(cluster->major_version) >= 902)
166  appendPQExpBufferStr(&querybuf,
167  " UNION ALL "
168  /* ranges containing any type selected so far */
169  " SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_range r, x "
170  " WHERE t.typtype = 'r' AND r.rngtypid = t.oid AND r.rngsubtype = x.oid");
171 
172  appendPQExpBufferStr(&querybuf,
173  " ) foo "
174  ") "
175  /* now look for stored columns of any such type */
176  "SELECT n.nspname, c.relname, a.attname "
177  "FROM pg_catalog.pg_class c, "
178  " pg_catalog.pg_namespace n, "
179  " pg_catalog.pg_attribute a "
180  "WHERE c.oid = a.attrelid AND "
181  " NOT a.attisdropped AND "
182  " a.atttypid IN (SELECT oid FROM oids) AND "
183  " c.relkind IN ("
184  CppAsString2(RELKIND_RELATION) ", "
185  CppAsString2(RELKIND_MATVIEW) ", "
186  CppAsString2(RELKIND_INDEX) ") AND "
187  " c.relnamespace = n.oid AND "
188  /* exclude possible orphaned temp tables */
189  " n.nspname !~ '^pg_temp_' AND "
190  " n.nspname !~ '^pg_toast_temp_' AND "
191  /* exclude system catalogs, too */
192  " n.nspname NOT IN ('pg_catalog', 'information_schema')");
193 
194  res = executeQueryOrDie(conn, "%s", querybuf.data);
195 
196  ntups = PQntuples(res);
197  i_nspname = PQfnumber(res, "nspname");
198  i_relname = PQfnumber(res, "relname");
199  i_attname = PQfnumber(res, "attname");
200  for (rowno = 0; rowno < ntups; rowno++)
201  {
202  found = true;
203  if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
204  pg_fatal("could not open file \"%s\": %s\n", output_path,
205  strerror(errno));
206  if (!db_used)
207  {
208  fprintf(script, "In database: %s\n", active_db->db_name);
209  db_used = true;
210  }
211  fprintf(script, " %s.%s.%s\n",
212  PQgetvalue(res, rowno, i_nspname),
213  PQgetvalue(res, rowno, i_relname),
214  PQgetvalue(res, rowno, i_attname));
215  }
216 
217  PQclear(res);
218 
219  termPQExpBuffer(&querybuf);
220 
221  PQfinish(conn);
222  }
223 
224  if (script)
225  fclose(script);
226 
227  return found;
228 }
229 
230 /*
231  * check_for_data_type_usage()
232  * Detect whether there are any stored columns depending on the given type
233  *
234  * If so, write a report to the given file name, and return true.
235  *
236  * type_name should be a fully qualified type name. This is just a
237  * trivial wrapper around check_for_data_types_usage() to convert a
238  * type name into a base query.
239  */
240 bool
242  const char *type_name,
243  const char *output_path)
244 {
245  bool found;
246  char *base_query;
247 
248  base_query = psprintf("SELECT '%s'::pg_catalog.regtype AS oid",
249  type_name);
250 
251  found = check_for_data_types_usage(cluster, base_query, output_path);
252 
253  free(base_query);
254 
255  return found;
256 }
257 
258 
259 /*
260  * old_9_3_check_for_line_data_type_usage()
261  * 9.3 -> 9.4
262  * Fully implement the 'line' data type in 9.4, which previously returned
263  * "not enabled" by default and was only functionally enabled with a
264  * compile-time switch; as of 9.4 "line" has a different on-disk
265  * representation format.
266  */
267 void
269 {
270  char output_path[MAXPGPATH];
271 
272  prep_status("Checking for incompatible \"line\" data type");
273 
274  snprintf(output_path, sizeof(output_path), "tables_using_line.txt");
275 
276  if (check_for_data_type_usage(cluster, "pg_catalog.line", output_path))
277  {
278  pg_log(PG_REPORT, "fatal\n");
279  pg_fatal("Your installation contains the \"line\" data type in user tables.\n"
280  "This data type changed its internal and input/output format\n"
281  "between your old and new versions so this\n"
282  "cluster cannot currently be upgraded. You can\n"
283  "drop the problem columns and restart the upgrade.\n"
284  "A list of the problem columns is in the file:\n"
285  " %s\n\n", output_path);
286  }
287  else
288  check_ok();
289 }
290 
291 
292 /*
293  * old_9_6_check_for_unknown_data_type_usage()
294  * 9.6 -> 10
295  * It's no longer allowed to create tables or views with "unknown"-type
296  * columns. We do not complain about views with such columns, because
297  * they should get silently converted to "text" columns during the DDL
298  * dump and reload; it seems unlikely to be worth making users do that
299  * by hand. However, if there's a table with such a column, the DDL
300  * reload will fail, so we should pre-detect that rather than failing
301  * mid-upgrade. Worse, if there's a matview with such a column, the
302  * DDL reload will silently change it to "text" which won't match the
303  * on-disk storage (which is like "cstring"). So we *must* reject that.
304  */
305 void
307 {
308  char output_path[MAXPGPATH];
309 
310  prep_status("Checking for invalid \"unknown\" user columns");
311 
312  snprintf(output_path, sizeof(output_path), "tables_using_unknown.txt");
313 
314  if (check_for_data_type_usage(cluster, "pg_catalog.unknown", output_path))
315  {
316  pg_log(PG_REPORT, "fatal\n");
317  pg_fatal("Your installation contains the \"unknown\" data type in user tables.\n"
318  "This data type is no longer allowed in tables, so this\n"
319  "cluster cannot currently be upgraded. You can\n"
320  "drop the problem columns and restart the upgrade.\n"
321  "A list of the problem columns is in the file:\n"
322  " %s\n\n", output_path);
323  }
324  else
325  check_ok();
326 }
327 
328 /*
329  * old_9_6_invalidate_hash_indexes()
330  * 9.6 -> 10
331  * Hash index binary format has changed from 9.6->10.0
332  */
333 void
335 {
336  int dbnum;
337  FILE *script = NULL;
338  bool found = false;
339  char *output_path = "reindex_hash.sql";
340 
341  prep_status("Checking for hash indexes");
342 
343  for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
344  {
345  PGresult *res;
346  bool db_used = false;
347  int ntups;
348  int rowno;
349  int i_nspname,
350  i_relname;
351  DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
352  PGconn *conn = connectToServer(cluster, active_db->db_name);
353 
354  /* find hash indexes */
355  res = executeQueryOrDie(conn,
356  "SELECT n.nspname, c.relname "
357  "FROM pg_catalog.pg_class c, "
358  " pg_catalog.pg_index i, "
359  " pg_catalog.pg_am a, "
360  " pg_catalog.pg_namespace n "
361  "WHERE i.indexrelid = c.oid AND "
362  " c.relam = a.oid AND "
363  " c.relnamespace = n.oid AND "
364  " a.amname = 'hash'"
365  );
366 
367  ntups = PQntuples(res);
368  i_nspname = PQfnumber(res, "nspname");
369  i_relname = PQfnumber(res, "relname");
370  for (rowno = 0; rowno < ntups; rowno++)
371  {
372  found = true;
373  if (!check_mode)
374  {
375  if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
376  pg_fatal("could not open file \"%s\": %s\n", output_path,
377  strerror(errno));
378  if (!db_used)
379  {
380  PQExpBufferData connectbuf;
381 
382  initPQExpBuffer(&connectbuf);
383  appendPsqlMetaConnect(&connectbuf, active_db->db_name);
384  fputs(connectbuf.data, script);
385  termPQExpBuffer(&connectbuf);
386  db_used = true;
387  }
388  fprintf(script, "REINDEX INDEX %s.%s;\n",
389  quote_identifier(PQgetvalue(res, rowno, i_nspname)),
390  quote_identifier(PQgetvalue(res, rowno, i_relname)));
391  }
392  }
393 
394  PQclear(res);
395 
396  if (!check_mode && db_used)
397  {
398  /* mark hash indexes as invalid */
400  "UPDATE pg_catalog.pg_index i "
401  "SET indisvalid = false "
402  "FROM pg_catalog.pg_class c, "
403  " pg_catalog.pg_am a, "
404  " pg_catalog.pg_namespace n "
405  "WHERE i.indexrelid = c.oid AND "
406  " c.relam = a.oid AND "
407  " c.relnamespace = n.oid AND "
408  " a.amname = 'hash'"));
409  }
410 
411  PQfinish(conn);
412  }
413 
414  if (script)
415  fclose(script);
416 
417  if (found)
418  {
419  report_status(PG_WARNING, "warning");
420  if (check_mode)
421  pg_log(PG_WARNING, "\n"
422  "Your installation contains hash indexes. These indexes have different\n"
423  "internal formats between your old and new clusters, so they must be\n"
424  "reindexed with the REINDEX command. After upgrading, you will be given\n"
425  "REINDEX instructions.\n\n");
426  else
427  pg_log(PG_WARNING, "\n"
428  "Your installation contains hash indexes. These indexes have different\n"
429  "internal formats between your old and new clusters, so they must be\n"
430  "reindexed with the REINDEX command. The file\n"
431  " %s\n"
432  "when executed by psql by the database superuser will recreate all invalid\n"
433  "indexes; until then, none of these indexes will be used.\n\n",
434  output_path);
435  }
436  else
437  check_ok();
438 }
439 
440 /*
441  * old_11_check_for_sql_identifier_data_type_usage()
442  * 11 -> 12
443  * In 12, the sql_identifier data type was switched from name to varchar,
444  * which does affect the storage (name is by-ref, but not varlena). This
445  * means user tables using sql_identifier for columns are broken because
446  * the on-disk format is different.
447  */
448 void
450 {
451  char output_path[MAXPGPATH];
452 
453  prep_status("Checking for invalid \"sql_identifier\" user columns");
454 
455  snprintf(output_path, sizeof(output_path), "tables_using_sql_identifier.txt");
456 
457  if (check_for_data_type_usage(cluster, "information_schema.sql_identifier",
458  output_path))
459  {
460  pg_log(PG_REPORT, "fatal\n");
461  pg_fatal("Your installation contains the \"sql_identifier\" data type in user tables.\n"
462  "The on-disk format for this data type has changed, so this\n"
463  "cluster cannot currently be upgraded. You can\n"
464  "drop the problem columns and restart the upgrade.\n"
465  "A list of the problem columns is in the file:\n"
466  " %s\n\n", output_path);
467  }
468  else
469  check_ok();
470 }
471 
472 
473 /*
474  * report_extension_updates()
475  * Report extensions that should be updated.
476  */
477 void
479 {
480  int dbnum;
481  FILE *script = NULL;
482  bool found = false;
483  char *output_path = "update_extensions.sql";
484 
485  prep_status("Checking for extension updates");
486 
487  for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
488  {
489  PGresult *res;
490  bool db_used = false;
491  int ntups;
492  int rowno;
493  int i_name;
494  DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
495  PGconn *conn = connectToServer(cluster, active_db->db_name);
496 
497  /* find extensions needing updates */
498  res = executeQueryOrDie(conn,
499  "SELECT name "
500  "FROM pg_available_extensions "
501  "WHERE installed_version != default_version"
502  );
503 
504  ntups = PQntuples(res);
505  i_name = PQfnumber(res, "name");
506  for (rowno = 0; rowno < ntups; rowno++)
507  {
508  found = true;
509 
510  if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
511  pg_fatal("could not open file \"%s\": %s\n", output_path,
512  strerror(errno));
513  if (!db_used)
514  {
515  PQExpBufferData connectbuf;
516 
517  initPQExpBuffer(&connectbuf);
518  appendPsqlMetaConnect(&connectbuf, active_db->db_name);
519  fputs(connectbuf.data, script);
520  termPQExpBuffer(&connectbuf);
521  db_used = true;
522  }
523  fprintf(script, "ALTER EXTENSION %s UPDATE;\n",
524  quote_identifier(PQgetvalue(res, rowno, i_name)));
525  }
526 
527  PQclear(res);
528 
529  PQfinish(conn);
530  }
531 
532  if (script)
533  fclose(script);
534 
535  if (found)
536  {
537  report_status(PG_REPORT, "notice");
538  pg_log(PG_REPORT, "\n"
539  "Your installation contains extensions that should be updated\n"
540  "with the ALTER EXTENSION command. The file\n"
541  " %s\n"
542  "when executed by psql by the database superuser will update\n"
543  "these extensions.\n\n",
544  output_path);
545  }
546  else
547  check_ok();
548 }
uint32 major_version
Definition: pg_upgrade.h:269
void cluster(ParseState *pstate, ClusterStmt *stmt, bool isTopLevel)
Definition: cluster.c:103
const char * quote_identifier(const char *ident)
Definition: ruleutils.c:11374
char * PQgetvalue(const PGresult *res, int tup_num, int field_num)
Definition: fe-exec.c:3642
void old_11_check_for_sql_identifier_data_type_usage(ClusterInfo *cluster)
Definition: version.c:449
void termPQExpBuffer(PQExpBuffer str)
Definition: pqexpbuffer.c:131
void appendPQExpBufferStr(PQExpBuffer str, const char *data)
Definition: pqexpbuffer.c:369
char * psprintf(const char *fmt,...)
Definition: psprintf.c:46
void old_9_6_check_for_unknown_data_type_usage(ClusterInfo *cluster)
Definition: version.c:306
PGresult * executeQueryOrDie(PGconn *conn, const char *fmt,...) pg_attribute_printf(2
#define GET_MAJOR_VERSION(v)
Definition: pg_upgrade.h:23
#define pg_fatal(...)
Definition: pg_rewind.h:37
void PQfinish(PGconn *conn)
Definition: fe-connect.c:4231
int PQntuples(const PGresult *res)
Definition: fe-exec.c:3248
#define fprintf
Definition: port.h:221
bool check_for_data_types_usage(ClusterInfo *cluster, const char *base_query, const char *output_path)
Definition: version.c:113
void appendPsqlMetaConnect(PQExpBuffer buf, const char *dbname)
Definition: string_utils.c:590
PGconn * conn
Definition: streamutil.c:54
#define MAXPGPATH
void appendPQExpBuffer(PQExpBuffer str, const char *fmt,...)
Definition: pqexpbuffer.c:267
void prep_status(const char *fmt,...) pg_attribute_printf(1
static void check_ok(void)
Definition: initdb.c:1920
PGconn * connectToServer(ClusterInfo *cluster, const char *db_name)
Definition: server.c:27
void report_extension_updates(ClusterInfo *cluster)
Definition: version.c:478
#define CppAsString2(x)
Definition: c.h:289
bool check_for_data_type_usage(ClusterInfo *cluster, const char *type_name, const char *output_path)
Definition: version.c:241
int PQfnumber(const PGresult *res, const char *field_name)
Definition: fe-exec.c:3356
void PQclear(PGresult *res)
Definition: fe-exec.c:694
#define free(a)
Definition: header.h:65
DbInfoArr dbarr
Definition: pg_upgrade.h:260
void void pg_log(eLogType type, const char *fmt,...) pg_attribute_printf(2
#define strerror
Definition: port.h:230
#define fopen_priv(path, mode)
Definition: pg_upgrade.h:380
void report_status(eLogType type, const char *fmt,...) pg_attribute_printf(2
char * db_name
Definition: pg_upgrade.h:180
void new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, bool check_mode)
Definition: version.c:22
DbInfo * dbs
Definition: pg_upgrade.h:191
#define snprintf
Definition: port.h:217
void initPQExpBuffer(PQExpBuffer str)
Definition: pqexpbuffer.c:92
void old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster)
Definition: version.c:268
void old_9_6_invalidate_hash_indexes(ClusterInfo *cluster, bool check_mode)
Definition: version.c:334