PostgreSQL Source Code  git master
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #include <limits.h>
37 #ifdef HAVE_TERMIOS_H
38 #include <termios.h>
39 #endif
40 
41 #include "access/attnum.h"
42 #include "access/sysattr.h"
43 #include "access/transam.h"
44 #include "catalog/pg_aggregate_d.h"
45 #include "catalog/pg_am_d.h"
46 #include "catalog/pg_attribute_d.h"
47 #include "catalog/pg_cast_d.h"
48 #include "catalog/pg_class_d.h"
49 #include "catalog/pg_default_acl_d.h"
50 #include "catalog/pg_largeobject_d.h"
51 #include "catalog/pg_largeobject_metadata_d.h"
52 #include "catalog/pg_proc_d.h"
53 #include "catalog/pg_trigger_d.h"
54 #include "catalog/pg_type_d.h"
55 #include "dumputils.h"
56 #include "fe_utils/connect.h"
57 #include "fe_utils/string_utils.h"
58 #include "getopt_long.h"
59 #include "libpq/libpq-fs.h"
60 #include "parallel.h"
61 #include "pg_backup_db.h"
62 #include "pg_backup_utils.h"
63 #include "pg_dump.h"
64 #include "storage/block.h"
65 
66 typedef struct
67 {
68  const char *descr; /* comment for an object */
69  Oid classoid; /* object class (catalog OID) */
70  Oid objoid; /* object OID */
71  int objsubid; /* subobject (table column #) */
72 } CommentItem;
73 
74 typedef struct
75 {
76  const char *provider; /* label provider of this security label */
77  const char *label; /* security label for an object */
78  Oid classoid; /* object class (catalog OID) */
79  Oid objoid; /* object OID */
80  int objsubid; /* subobject (table column #) */
81 } SecLabelItem;
82 
83 typedef enum OidOptions
84 {
88 } OidOptions;
89 
90 /* global decls */
91 static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
92 
93 /* subquery used to convert user ID (eg, datdba) to user name */
94 static const char *username_subquery;
95 
96 /*
97  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
98  * FirstNormalObjectId - 1.
99  */
100 static Oid g_last_builtin_oid; /* value of the last builtin oid */
101 
102 /* The specified names/patterns should to match at least one entity */
103 static int strict_names = 0;
104 
105 /*
106  * Object inclusion/exclusion lists
107  *
108  * The string lists record the patterns given by command-line switches,
109  * which we then convert to lists of OIDs of matching objects.
110  */
112 static SimpleOidList schema_include_oids = {NULL, NULL};
114 static SimpleOidList schema_exclude_oids = {NULL, NULL};
115 
117 static SimpleOidList table_include_oids = {NULL, NULL};
119 static SimpleOidList table_exclude_oids = {NULL, NULL};
121 static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
124 
125 static const CatalogId nilCatalogId = {0, 0};
126 
127 /* override for standard extra_float_digits setting */
128 static bool have_extra_float_digits = false;
130 
131 /*
132  * The default number of rows per INSERT when
133  * --inserts is specified without --rows-per-insert
134  */
135 #define DUMP_DEFAULT_ROWS_PER_INSERT 1
136 
137 /*
138  * Macro for producing quoted, schema-qualified name of a dumpable object.
139  */
140 #define fmtQualifiedDumpable(obj) \
141  fmtQualifiedId((obj)->dobj.namespace->dobj.name, \
142  (obj)->dobj.name)
143 
144 static void help(const char *progname);
145 static void setup_connection(Archive *AH,
146  const char *dumpencoding, const char *dumpsnapshot,
147  char *use_role);
149 static void expand_schema_name_patterns(Archive *fout,
150  SimpleStringList *patterns,
151  SimpleOidList *oids,
152  bool strict_names);
154  SimpleStringList *patterns,
155  SimpleOidList *oids);
156 static void expand_table_name_patterns(Archive *fout,
157  SimpleStringList *patterns,
158  SimpleOidList *oids,
159  bool strict_names);
160 static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
161 static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
162 static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
163 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
164 static void dumpComment(Archive *fout, const char *type, const char *name,
165  const char *namespace, const char *owner,
166  CatalogId catalogId, int subid, DumpId dumpId);
167 static int findComments(Archive *fout, Oid classoid, Oid objoid,
168  CommentItem **items);
169 static int collectComments(Archive *fout, CommentItem **items);
170 static void dumpSecLabel(Archive *fout, const char *type, const char *name,
171  const char *namespace, const char *owner,
172  CatalogId catalogId, int subid, DumpId dumpId);
173 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
174  SecLabelItem **items);
175 static int collectSecLabels(Archive *fout, SecLabelItem **items);
176 static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
177 static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
178 static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
179 static void dumpType(Archive *fout, TypeInfo *tyinfo);
180 static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
181 static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
182 static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
183 static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
184 static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
185 static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
186 static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
187 static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
188 static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
189 static void dumpFunc(Archive *fout, FuncInfo *finfo);
190 static void dumpCast(Archive *fout, CastInfo *cast);
191 static void dumpTransform(Archive *fout, TransformInfo *transform);
192 static void dumpOpr(Archive *fout, OprInfo *oprinfo);
193 static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
194 static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
195 static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
196 static void dumpCollation(Archive *fout, CollInfo *collinfo);
197 static void dumpConversion(Archive *fout, ConvInfo *convinfo);
198 static void dumpRule(Archive *fout, RuleInfo *rinfo);
199 static void dumpAgg(Archive *fout, AggInfo *agginfo);
200 static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
201 static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
202 static void dumpTable(Archive *fout, TableInfo *tbinfo);
203 static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
204 static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
205 static void dumpSequence(Archive *fout, TableInfo *tbinfo);
206 static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
207 static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
208 static void dumpIndexAttach(Archive *fout, IndexAttachInfo *attachinfo);
209 static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
210 static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
211 static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
212 static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
213 static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
214 static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
215 static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
216 static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
217 static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
218 static void dumpUserMappings(Archive *fout,
219  const char *servername, const char *namespace,
220  const char *owner, CatalogId catalogId, DumpId dumpId);
221 static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
222 
223 static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
224  const char *type, const char *name, const char *subname,
225  const char *nspname, const char *owner,
226  const char *acls, const char *racls,
227  const char *initacls, const char *initracls);
228 
229 static void getDependencies(Archive *fout);
230 static void BuildArchiveDependencies(Archive *fout);
232  DumpId **dependencies, int *nDeps, int *allocDeps);
233 
235 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
236  DumpableObject *boundaryObjs);
237 
238 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
239 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
240 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo);
241 static void buildMatViewRefreshDependencies(Archive *fout);
242 static void getTableDataFKConstraints(void);
243 static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
244  bool is_agg);
245 static char *format_function_arguments_old(Archive *fout,
246  FuncInfo *finfo, int nallargs,
247  char **allargtypes,
248  char **argmodes,
249  char **argnames);
250 static char *format_function_signature(Archive *fout,
251  FuncInfo *finfo, bool honor_quotes);
252 static char *convertRegProcReference(Archive *fout,
253  const char *proc);
254 static char *getFormattedOperatorName(Archive *fout, const char *oproid);
255 static char *convertTSFunction(Archive *fout, Oid funcOid);
256 static Oid findLastBuiltinOid_V71(Archive *fout);
257 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
258 static void getBlobs(Archive *fout);
259 static void dumpBlob(Archive *fout, BlobInfo *binfo);
260 static int dumpBlobs(Archive *fout, void *arg);
261 static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
262 static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
263 static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
264 static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
265 static void dumpDatabase(Archive *AH);
266 static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
267  const char *dbname, Oid dboid);
268 static void dumpEncoding(Archive *AH);
269 static void dumpStdStrings(Archive *AH);
270 static void dumpSearchPath(Archive *AH);
272  PQExpBuffer upgrade_buffer,
273  Oid pg_type_oid,
274  bool force_array_type);
276  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
277 static void binary_upgrade_set_pg_class_oids(Archive *fout,
278  PQExpBuffer upgrade_buffer,
279  Oid pg_class_oid, bool is_index);
280 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
281  DumpableObject *dobj,
282  const char *objtype,
283  const char *objname,
284  const char *objnamespace);
285 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
286 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
287 static bool nonemptyReloptions(const char *reloptions);
288 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
289  const char *prefix, Archive *fout);
290 static char *get_synchronized_snapshot(Archive *fout);
291 static void setupDumpWorker(Archive *AHX);
292 static TableInfo *getRootTableInfo(TableInfo *tbinfo);
293 
294 
295 int
296 main(int argc, char **argv)
297 {
298  int c;
299  const char *filename = NULL;
300  const char *format = "p";
301  TableInfo *tblinfo;
302  int numTables;
303  DumpableObject **dobjs;
304  int numObjs;
305  DumpableObject *boundaryObjs;
306  int i;
307  int optindex;
308  char *endptr;
309  RestoreOptions *ropt;
310  Archive *fout; /* the script file */
311  bool g_verbose = false;
312  const char *dumpencoding = NULL;
313  const char *dumpsnapshot = NULL;
314  char *use_role = NULL;
315  long rowsPerInsert;
316  int numWorkers = 1;
317  trivalue prompt_password = TRI_DEFAULT;
318  int compressLevel = -1;
319  int plainText = 0;
320  ArchiveFormat archiveFormat = archUnknown;
321  ArchiveMode archiveMode;
322 
323  static DumpOptions dopt;
324 
325  static struct option long_options[] = {
326  {"data-only", no_argument, NULL, 'a'},
327  {"blobs", no_argument, NULL, 'b'},
328  {"no-blobs", no_argument, NULL, 'B'},
329  {"clean", no_argument, NULL, 'c'},
330  {"create", no_argument, NULL, 'C'},
331  {"dbname", required_argument, NULL, 'd'},
332  {"file", required_argument, NULL, 'f'},
333  {"format", required_argument, NULL, 'F'},
334  {"host", required_argument, NULL, 'h'},
335  {"jobs", 1, NULL, 'j'},
336  {"no-reconnect", no_argument, NULL, 'R'},
337  {"no-owner", no_argument, NULL, 'O'},
338  {"port", required_argument, NULL, 'p'},
339  {"schema", required_argument, NULL, 'n'},
340  {"exclude-schema", required_argument, NULL, 'N'},
341  {"schema-only", no_argument, NULL, 's'},
342  {"superuser", required_argument, NULL, 'S'},
343  {"table", required_argument, NULL, 't'},
344  {"exclude-table", required_argument, NULL, 'T'},
345  {"no-password", no_argument, NULL, 'w'},
346  {"password", no_argument, NULL, 'W'},
347  {"username", required_argument, NULL, 'U'},
348  {"verbose", no_argument, NULL, 'v'},
349  {"no-privileges", no_argument, NULL, 'x'},
350  {"no-acl", no_argument, NULL, 'x'},
351  {"compress", required_argument, NULL, 'Z'},
352  {"encoding", required_argument, NULL, 'E'},
353  {"help", no_argument, NULL, '?'},
354  {"version", no_argument, NULL, 'V'},
355 
356  /*
357  * the following options don't have an equivalent short option letter
358  */
359  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
360  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
361  {"column-inserts", no_argument, &dopt.column_inserts, 1},
362  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
363  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
364  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
365  {"exclude-table-data", required_argument, NULL, 4},
366  {"extra-float-digits", required_argument, NULL, 8},
367  {"if-exists", no_argument, &dopt.if_exists, 1},
368  {"inserts", no_argument, NULL, 9},
369  {"lock-wait-timeout", required_argument, NULL, 2},
370  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
371  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
372  {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
373  {"role", required_argument, NULL, 3},
374  {"section", required_argument, NULL, 5},
375  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
376  {"snapshot", required_argument, NULL, 6},
377  {"strict-names", no_argument, &strict_names, 1},
378  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
379  {"no-comments", no_argument, &dopt.no_comments, 1},
380  {"no-publications", no_argument, &dopt.no_publications, 1},
381  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
382  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
383  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
384  {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
385  {"no-sync", no_argument, NULL, 7},
386  {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
387  {"rows-per-insert", required_argument, NULL, 10},
388  {"include-foreign-data", required_argument, NULL, 11},
389 
390  {NULL, 0, NULL, 0}
391  };
392 
393  pg_logging_init(argv[0]);
395  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
396 
397  /*
398  * Initialize what we need for parallel execution, especially for thread
399  * support on Windows.
400  */
402 
403  progname = get_progname(argv[0]);
404 
405  if (argc > 1)
406  {
407  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
408  {
409  help(progname);
410  exit_nicely(0);
411  }
412  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
413  {
414  puts("pg_dump (PostgreSQL) " PG_VERSION);
415  exit_nicely(0);
416  }
417  }
418 
419  InitDumpOptions(&dopt);
420 
421  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxZ:",
422  long_options, &optindex)) != -1)
423  {
424  switch (c)
425  {
426  case 'a': /* Dump data only */
427  dopt.dataOnly = true;
428  break;
429 
430  case 'b': /* Dump blobs */
431  dopt.outputBlobs = true;
432  break;
433 
434  case 'B': /* Don't dump blobs */
435  dopt.dontOutputBlobs = true;
436  break;
437 
438  case 'c': /* clean (i.e., drop) schema prior to create */
439  dopt.outputClean = 1;
440  break;
441 
442  case 'C': /* Create DB */
443  dopt.outputCreateDB = 1;
444  break;
445 
446  case 'd': /* database name */
447  dopt.dbname = pg_strdup(optarg);
448  break;
449 
450  case 'E': /* Dump encoding */
451  dumpencoding = pg_strdup(optarg);
452  break;
453 
454  case 'f':
455  filename = pg_strdup(optarg);
456  break;
457 
458  case 'F':
459  format = pg_strdup(optarg);
460  break;
461 
462  case 'h': /* server host */
463  dopt.pghost = pg_strdup(optarg);
464  break;
465 
466  case 'j': /* number of dump jobs */
467  numWorkers = atoi(optarg);
468  break;
469 
470  case 'n': /* include schema(s) */
471  simple_string_list_append(&schema_include_patterns, optarg);
472  dopt.include_everything = false;
473  break;
474 
475  case 'N': /* exclude schema(s) */
476  simple_string_list_append(&schema_exclude_patterns, optarg);
477  break;
478 
479  case 'O': /* Don't reconnect to match owner */
480  dopt.outputNoOwner = 1;
481  break;
482 
483  case 'p': /* server port */
484  dopt.pgport = pg_strdup(optarg);
485  break;
486 
487  case 'R':
488  /* no-op, still accepted for backwards compatibility */
489  break;
490 
491  case 's': /* dump schema only */
492  dopt.schemaOnly = true;
493  break;
494 
495  case 'S': /* Username for superuser in plain text output */
497  break;
498 
499  case 't': /* include table(s) */
500  simple_string_list_append(&table_include_patterns, optarg);
501  dopt.include_everything = false;
502  break;
503 
504  case 'T': /* exclude table(s) */
505  simple_string_list_append(&table_exclude_patterns, optarg);
506  break;
507 
508  case 'U':
509  dopt.username = pg_strdup(optarg);
510  break;
511 
512  case 'v': /* verbose */
513  g_verbose = true;
515  break;
516 
517  case 'w':
518  prompt_password = TRI_NO;
519  break;
520 
521  case 'W':
522  prompt_password = TRI_YES;
523  break;
524 
525  case 'x': /* skip ACL dump */
526  dopt.aclsSkip = true;
527  break;
528 
529  case 'Z': /* Compression Level */
530  compressLevel = atoi(optarg);
531  if (compressLevel < 0 || compressLevel > 9)
532  {
533  pg_log_error("compression level must be in range 0..9");
534  exit_nicely(1);
535  }
536  break;
537 
538  case 0:
539  /* This covers the long options. */
540  break;
541 
542  case 2: /* lock-wait-timeout */
544  break;
545 
546  case 3: /* SET ROLE */
547  use_role = pg_strdup(optarg);
548  break;
549 
550  case 4: /* exclude table(s) data */
551  simple_string_list_append(&tabledata_exclude_patterns, optarg);
552  break;
553 
554  case 5: /* section */
556  break;
557 
558  case 6: /* snapshot */
559  dumpsnapshot = pg_strdup(optarg);
560  break;
561 
562  case 7: /* no-sync */
563  dosync = false;
564  break;
565 
566  case 8:
568  extra_float_digits = atoi(optarg);
569  if (extra_float_digits < -15 || extra_float_digits > 3)
570  {
571  pg_log_error("extra_float_digits must be in range -15..3");
572  exit_nicely(1);
573  }
574  break;
575 
576  case 9: /* inserts */
577 
578  /*
579  * dump_inserts also stores --rows-per-insert, careful not to
580  * overwrite that.
581  */
582  if (dopt.dump_inserts == 0)
584  break;
585 
586  case 10: /* rows per insert */
587  errno = 0;
588  rowsPerInsert = strtol(optarg, &endptr, 10);
589 
590  if (endptr == optarg || *endptr != '\0' ||
591  rowsPerInsert <= 0 || rowsPerInsert > INT_MAX ||
592  errno == ERANGE)
593  {
594  pg_log_error("rows-per-insert must be in range %d..%d",
595  1, INT_MAX);
596  exit_nicely(1);
597  }
598  dopt.dump_inserts = (int) rowsPerInsert;
599  break;
600 
601  case 11: /* include foreign data */
602  simple_string_list_append(&foreign_servers_include_patterns,
603  optarg);
604  break;
605 
606  default:
607  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
608  exit_nicely(1);
609  }
610  }
611 
612  /*
613  * Non-option argument specifies database name as long as it wasn't
614  * already specified with -d / --dbname
615  */
616  if (optind < argc && dopt.dbname == NULL)
617  dopt.dbname = argv[optind++];
618 
619  /* Complain if any arguments remain */
620  if (optind < argc)
621  {
622  pg_log_error("too many command-line arguments (first is \"%s\")",
623  argv[optind]);
624  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
625  progname);
626  exit_nicely(1);
627  }
628 
629  /* --column-inserts implies --inserts */
630  if (dopt.column_inserts && dopt.dump_inserts == 0)
632 
633  /*
634  * Binary upgrade mode implies dumping sequence data even in schema-only
635  * mode. This is not exposed as a separate option, but kept separate
636  * internally for clarity.
637  */
638  if (dopt.binary_upgrade)
639  dopt.sequence_data = 1;
640 
641  if (dopt.dataOnly && dopt.schemaOnly)
642  {
643  pg_log_error("options -s/--schema-only and -a/--data-only cannot be used together");
644  exit_nicely(1);
645  }
646 
647  if (dopt.schemaOnly && foreign_servers_include_patterns.head != NULL)
648  fatal("options -s/--schema-only and --include-foreign-data cannot be used together");
649 
650  if (numWorkers > 1 && foreign_servers_include_patterns.head != NULL)
651  fatal("option --include-foreign-data is not supported with parallel backup");
652 
653  if (dopt.dataOnly && dopt.outputClean)
654  {
655  pg_log_error("options -c/--clean and -a/--data-only cannot be used together");
656  exit_nicely(1);
657  }
658 
659  if (dopt.if_exists && !dopt.outputClean)
660  fatal("option --if-exists requires option -c/--clean");
661 
662  /*
663  * --inserts are already implied above if --column-inserts or
664  * --rows-per-insert were specified.
665  */
666  if (dopt.do_nothing && dopt.dump_inserts == 0)
667  fatal("option --on-conflict-do-nothing requires option --inserts, --rows-per-insert, or --column-inserts");
668 
669  /* Identify archive format to emit */
670  archiveFormat = parseArchiveFormat(format, &archiveMode);
671 
672  /* archiveFormat specific setup */
673  if (archiveFormat == archNull)
674  plainText = 1;
675 
676  /* Custom and directory formats are compressed by default, others not */
677  if (compressLevel == -1)
678  {
679 #ifdef HAVE_LIBZ
680  if (archiveFormat == archCustom || archiveFormat == archDirectory)
681  compressLevel = Z_DEFAULT_COMPRESSION;
682  else
683 #endif
684  compressLevel = 0;
685  }
686 
687 #ifndef HAVE_LIBZ
688  if (compressLevel != 0)
689  pg_log_warning("requested compression not available in this installation -- archive will be uncompressed");
690  compressLevel = 0;
691 #endif
692 
693  /*
694  * If emitting an archive format, we always want to emit a DATABASE item,
695  * in case --create is specified at pg_restore time.
696  */
697  if (!plainText)
698  dopt.outputCreateDB = 1;
699 
700  /*
701  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
702  * parallel jobs because that's the maximum limit for the
703  * WaitForMultipleObjects() call.
704  */
705  if (numWorkers <= 0
706 #ifdef WIN32
707  || numWorkers > MAXIMUM_WAIT_OBJECTS
708 #endif
709  )
710  fatal("invalid number of parallel jobs");
711 
712  /* Parallel backup only in the directory archive format so far */
713  if (archiveFormat != archDirectory && numWorkers > 1)
714  fatal("parallel backup only supported by the directory format");
715 
716  /* Open the output file */
717  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
718  archiveMode, setupDumpWorker);
719 
720  /* Make dump options accessible right away */
721  SetArchiveOptions(fout, &dopt, NULL);
722 
723  /* Register the cleanup hook */
724  on_exit_close_archive(fout);
725 
726  /* Let the archiver know how noisy to be */
727  fout->verbose = g_verbose;
728 
729 
730  /*
731  * We allow the server to be back to 8.0, and up to any minor release of
732  * our own major version. (See also version check in pg_dumpall.c.)
733  */
734  fout->minRemoteVersion = 80000;
735  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
736 
737  fout->numWorkers = numWorkers;
738 
739  /*
740  * Open the database using the Archiver, so it knows about it. Errors mean
741  * death.
742  */
743  ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
744  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
745 
746  /*
747  * Disable security label support if server version < v9.1.x (prevents
748  * access to nonexistent pg_seclabel catalog)
749  */
750  if (fout->remoteVersion < 90100)
751  dopt.no_security_labels = 1;
752 
753  /*
754  * On hot standbys, never try to dump unlogged table data, since it will
755  * just throw an error.
756  */
757  if (fout->isStandby)
758  dopt.no_unlogged_table_data = true;
759 
760  /* Select the appropriate subquery to convert user IDs to names */
761  if (fout->remoteVersion >= 80100)
762  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
763  else
764  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
765 
766  /* check the version for the synchronized snapshots feature */
767  if (numWorkers > 1 && fout->remoteVersion < 90200
768  && !dopt.no_synchronized_snapshots)
769  fatal("Synchronized snapshots are not supported by this server version.\n"
770  "Run with --no-synchronized-snapshots instead if you do not need\n"
771  "synchronized snapshots.");
772 
773  /* check the version when a snapshot is explicitly specified by user */
774  if (dumpsnapshot && fout->remoteVersion < 90200)
775  fatal("Exported snapshots are not supported by this server version.");
776 
777  /*
778  * Find the last built-in OID, if needed (prior to 8.1)
779  *
780  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
781  */
782  if (fout->remoteVersion < 80100)
784  else
786 
787  pg_log_info("last built-in OID is %u", g_last_builtin_oid);
788 
789  /* Expand schema selection patterns into OID lists */
790  if (schema_include_patterns.head != NULL)
791  {
792  expand_schema_name_patterns(fout, &schema_include_patterns,
793  &schema_include_oids,
794  strict_names);
795  if (schema_include_oids.head == NULL)
796  fatal("no matching schemas were found");
797  }
798  expand_schema_name_patterns(fout, &schema_exclude_patterns,
799  &schema_exclude_oids,
800  false);
801  /* non-matching exclusion patterns aren't an error */
802 
803  /* Expand table selection patterns into OID lists */
804  if (table_include_patterns.head != NULL)
805  {
806  expand_table_name_patterns(fout, &table_include_patterns,
807  &table_include_oids,
808  strict_names);
809  if (table_include_oids.head == NULL)
810  fatal("no matching tables were found");
811  }
812  expand_table_name_patterns(fout, &table_exclude_patterns,
813  &table_exclude_oids,
814  false);
815 
816  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
817  &tabledata_exclude_oids,
818  false);
819 
820  expand_foreign_server_name_patterns(fout, &foreign_servers_include_patterns,
821  &foreign_servers_include_oids);
822 
823  /* non-matching exclusion patterns aren't an error */
824 
825  /*
826  * Dumping blobs is the default for dumps where an inclusion switch is not
827  * used (an "include everything" dump). -B can be used to exclude blobs
828  * from those dumps. -b can be used to include blobs even when an
829  * inclusion switch is used.
830  *
831  * -s means "schema only" and blobs are data, not schema, so we never
832  * include blobs when -s is used.
833  */
834  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
835  dopt.outputBlobs = true;
836 
837  /*
838  * Now scan the database and create DumpableObject structs for all the
839  * objects we intend to dump.
840  */
841  tblinfo = getSchemaData(fout, &numTables);
842 
843  if (fout->remoteVersion < 80400)
844  guessConstraintInheritance(tblinfo, numTables);
845 
846  if (!dopt.schemaOnly)
847  {
848  getTableData(&dopt, tblinfo, numTables, 0);
850  if (dopt.dataOnly)
852  }
853 
854  if (dopt.schemaOnly && dopt.sequence_data)
855  getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
856 
857  /*
858  * In binary-upgrade mode, we do not have to worry about the actual blob
859  * data or the associated metadata that resides in the pg_largeobject and
860  * pg_largeobject_metadata tables, respectively.
861  *
862  * However, we do need to collect blob information as there may be
863  * comments or other information on blobs that we do need to dump out.
864  */
865  if (dopt.outputBlobs || dopt.binary_upgrade)
866  getBlobs(fout);
867 
868  /*
869  * Collect dependency data to assist in ordering the objects.
870  */
871  getDependencies(fout);
872 
873  /* Lastly, create dummy objects to represent the section boundaries */
874  boundaryObjs = createBoundaryObjects();
875 
876  /* Get pointers to all the known DumpableObjects */
877  getDumpableObjects(&dobjs, &numObjs);
878 
879  /*
880  * Add dummy dependencies to enforce the dump section ordering.
881  */
882  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
883 
884  /*
885  * Sort the objects into a safe dump order (no forward references).
886  *
887  * We rely on dependency information to help us determine a safe order, so
888  * the initial sort is mostly for cosmetic purposes: we sort by name to
889  * ensure that logically identical schemas will dump identically.
890  */
891  sortDumpableObjectsByTypeName(dobjs, numObjs);
892 
893  sortDumpableObjects(dobjs, numObjs,
894  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
895 
896  /*
897  * Create archive TOC entries for all the objects to be dumped, in a safe
898  * order.
899  */
900 
901  /* First the special ENCODING, STDSTRINGS, and SEARCHPATH entries. */
902  dumpEncoding(fout);
903  dumpStdStrings(fout);
904  dumpSearchPath(fout);
905 
906  /* The database items are always next, unless we don't want them at all */
907  if (dopt.outputCreateDB)
908  dumpDatabase(fout);
909 
910  /* Now the rearrangeable objects. */
911  for (i = 0; i < numObjs; i++)
912  dumpDumpableObject(fout, dobjs[i]);
913 
914  /*
915  * Set up options info to ensure we dump what we want.
916  */
917  ropt = NewRestoreOptions();
918  ropt->filename = filename;
919 
920  /* if you change this list, see dumpOptionsFromRestoreOptions */
921  ropt->dropSchema = dopt.outputClean;
922  ropt->dataOnly = dopt.dataOnly;
923  ropt->schemaOnly = dopt.schemaOnly;
924  ropt->if_exists = dopt.if_exists;
925  ropt->column_inserts = dopt.column_inserts;
926  ropt->dumpSections = dopt.dumpSections;
927  ropt->aclsSkip = dopt.aclsSkip;
928  ropt->superuser = dopt.outputSuperuser;
929  ropt->createDB = dopt.outputCreateDB;
930  ropt->noOwner = dopt.outputNoOwner;
931  ropt->noTablespace = dopt.outputNoTablespaces;
932  ropt->disable_triggers = dopt.disable_triggers;
933  ropt->use_setsessauth = dopt.use_setsessauth;
935  ropt->dump_inserts = dopt.dump_inserts;
936  ropt->no_comments = dopt.no_comments;
937  ropt->no_publications = dopt.no_publications;
939  ropt->no_subscriptions = dopt.no_subscriptions;
940  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
943  ropt->sequence_data = dopt.sequence_data;
944  ropt->binary_upgrade = dopt.binary_upgrade;
945 
946  if (compressLevel == -1)
947  ropt->compression = 0;
948  else
949  ropt->compression = compressLevel;
950 
951  ropt->suppressDumpWarnings = true; /* We've already shown them */
952 
953  SetArchiveOptions(fout, &dopt, ropt);
954 
955  /* Mark which entries should be output */
957 
958  /*
959  * The archive's TOC entries are now marked as to which ones will actually
960  * be output, so we can set up their dependency lists properly. This isn't
961  * necessary for plain-text output, though.
962  */
963  if (!plainText)
965 
966  /*
967  * And finally we can do the actual output.
968  *
969  * Note: for non-plain-text output formats, the output file is written
970  * inside CloseArchive(). This is, um, bizarre; but not worth changing
971  * right now.
972  */
973  if (plainText)
974  RestoreArchive(fout);
975 
976  CloseArchive(fout);
977 
978  exit_nicely(0);
979 }
980 
981 
982 static void
983 help(const char *progname)
984 {
985  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
986  printf(_("Usage:\n"));
987  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
988 
989  printf(_("\nGeneral options:\n"));
990  printf(_(" -f, --file=FILENAME output file or directory name\n"));
991  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
992  " plain text (default))\n"));
993  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
994  printf(_(" -v, --verbose verbose mode\n"));
995  printf(_(" -V, --version output version information, then exit\n"));
996  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
997  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
998  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
999  printf(_(" -?, --help show this help, then exit\n"));
1000 
1001  printf(_("\nOptions controlling the output content:\n"));
1002  printf(_(" -a, --data-only dump only the data, not the schema\n"));
1003  printf(_(" -b, --blobs include large objects in dump\n"));
1004  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
1005  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
1006  printf(_(" -C, --create include commands to create database in dump\n"));
1007  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
1008  printf(_(" -n, --schema=PATTERN dump the specified schema(s) only\n"));
1009  printf(_(" -N, --exclude-schema=PATTERN do NOT dump the specified schema(s)\n"));
1010  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
1011  " plain-text format\n"));
1012  printf(_(" -s, --schema-only dump only the schema, no data\n"));
1013  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
1014  printf(_(" -t, --table=PATTERN dump the specified table(s) only\n"));
1015  printf(_(" -T, --exclude-table=PATTERN do NOT dump the specified table(s)\n"));
1016  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
1017  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
1018  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
1019  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
1020  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
1021  printf(_(" --enable-row-security enable row security (dump only content user has\n"
1022  " access to)\n"));
1023  printf(_(" --exclude-table-data=PATTERN do NOT dump data for the specified table(s)\n"));
1024  printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
1025  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
1026  printf(_(" --include-foreign-data=PATTERN\n"
1027  " include data of foreign tables on foreign\n"
1028  " servers matching PATTERN\n"));
1029  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
1030  printf(_(" --load-via-partition-root load partitions via the root table\n"));
1031  printf(_(" --no-comments do not dump comments\n"));
1032  printf(_(" --no-publications do not dump publications\n"));
1033  printf(_(" --no-security-labels do not dump security label assignments\n"));
1034  printf(_(" --no-subscriptions do not dump subscriptions\n"));
1035  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
1036  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
1037  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
1038  printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
1039  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
1040  printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
1041  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
1042  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
1043  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
1044  printf(_(" --strict-names require table and/or schema include patterns to\n"
1045  " match at least one entity each\n"));
1046  printf(_(" --use-set-session-authorization\n"
1047  " use SET SESSION AUTHORIZATION commands instead of\n"
1048  " ALTER OWNER commands to set ownership\n"));
1049 
1050  printf(_("\nConnection options:\n"));
1051  printf(_(" -d, --dbname=DBNAME database to dump\n"));
1052  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
1053  printf(_(" -p, --port=PORT database server port number\n"));
1054  printf(_(" -U, --username=NAME connect as specified database user\n"));
1055  printf(_(" -w, --no-password never prompt for password\n"));
1056  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
1057  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
1058 
1059  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
1060  "variable value is used.\n\n"));
1061  printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
1062  printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
1063 }
1064 
1065 static void
1066 setup_connection(Archive *AH, const char *dumpencoding,
1067  const char *dumpsnapshot, char *use_role)
1068 {
1069  DumpOptions *dopt = AH->dopt;
1070  PGconn *conn = GetConnection(AH);
1071  const char *std_strings;
1072 
1074 
1075  /*
1076  * Set the client encoding if requested.
1077  */
1078  if (dumpencoding)
1079  {
1080  if (PQsetClientEncoding(conn, dumpencoding) < 0)
1081  fatal("invalid client encoding \"%s\" specified",
1082  dumpencoding);
1083  }
1084 
1085  /*
1086  * Get the active encoding and the standard_conforming_strings setting, so
1087  * we know how to escape strings.
1088  */
1089  AH->encoding = PQclientEncoding(conn);
1090 
1091  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1092  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1093 
1094  /*
1095  * Set the role if requested. In a parallel dump worker, we'll be passed
1096  * use_role == NULL, but AH->use_role is already set (if user specified it
1097  * originally) and we should use that.
1098  */
1099  if (!use_role && AH->use_role)
1100  use_role = AH->use_role;
1101 
1102  /* Set the role if requested */
1103  if (use_role && AH->remoteVersion >= 80100)
1104  {
1105  PQExpBuffer query = createPQExpBuffer();
1106 
1107  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1108  ExecuteSqlStatement(AH, query->data);
1109  destroyPQExpBuffer(query);
1110 
1111  /* save it for possible later use by parallel workers */
1112  if (!AH->use_role)
1113  AH->use_role = pg_strdup(use_role);
1114  }
1115 
1116  /* Set the datestyle to ISO to ensure the dump's portability */
1117  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1118 
1119  /* Likewise, avoid using sql_standard intervalstyle */
1120  if (AH->remoteVersion >= 80400)
1121  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1122 
1123  /*
1124  * Use an explicitly specified extra_float_digits if it has been provided.
1125  * Otherwise, set extra_float_digits so that we can dump float data
1126  * exactly (given correctly implemented float I/O code, anyway).
1127  */
1129  {
1131 
1132  appendPQExpBuffer(q, "SET extra_float_digits TO %d",
1134  ExecuteSqlStatement(AH, q->data);
1135  destroyPQExpBuffer(q);
1136  }
1137  else if (AH->remoteVersion >= 90000)
1138  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1139  else
1140  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1141 
1142  /*
1143  * If synchronized scanning is supported, disable it, to prevent
1144  * unpredictable changes in row ordering across a dump and reload.
1145  */
1146  if (AH->remoteVersion >= 80300)
1147  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1148 
1149  /*
1150  * Disable timeouts if supported.
1151  */
1152  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1153  if (AH->remoteVersion >= 90300)
1154  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1155  if (AH->remoteVersion >= 90600)
1156  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1157 
1158  /*
1159  * Quote all identifiers, if requested.
1160  */
1161  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1162  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1163 
1164  /*
1165  * Adjust row-security mode, if supported.
1166  */
1167  if (AH->remoteVersion >= 90500)
1168  {
1169  if (dopt->enable_row_security)
1170  ExecuteSqlStatement(AH, "SET row_security = on");
1171  else
1172  ExecuteSqlStatement(AH, "SET row_security = off");
1173  }
1174 
1175  /*
1176  * Start transaction-snapshot mode transaction to dump consistent data.
1177  */
1178  ExecuteSqlStatement(AH, "BEGIN");
1179  if (AH->remoteVersion >= 90100)
1180  {
1181  /*
1182  * To support the combination of serializable_deferrable with the jobs
1183  * option we use REPEATABLE READ for the worker connections that are
1184  * passed a snapshot. As long as the snapshot is acquired in a
1185  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1186  * REPEATABLE READ transaction provides the appropriate integrity
1187  * guarantees. This is a kluge, but safe for back-patching.
1188  */
1189  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1191  "SET TRANSACTION ISOLATION LEVEL "
1192  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1193  else
1195  "SET TRANSACTION ISOLATION LEVEL "
1196  "REPEATABLE READ, READ ONLY");
1197  }
1198  else
1199  {
1201  "SET TRANSACTION ISOLATION LEVEL "
1202  "SERIALIZABLE, READ ONLY");
1203  }
1204 
1205  /*
1206  * If user specified a snapshot to use, select that. In a parallel dump
1207  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1208  * is already set (if the server can handle it) and we should use that.
1209  */
1210  if (dumpsnapshot)
1211  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1212 
1213  if (AH->sync_snapshot_id)
1214  {
1215  PQExpBuffer query = createPQExpBuffer();
1216 
1217  appendPQExpBufferStr(query, "SET TRANSACTION SNAPSHOT ");
1218  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1219  ExecuteSqlStatement(AH, query->data);
1220  destroyPQExpBuffer(query);
1221  }
1222  else if (AH->numWorkers > 1 &&
1223  AH->remoteVersion >= 90200 &&
1225  {
1226  if (AH->isStandby && AH->remoteVersion < 100000)
1227  fatal("Synchronized snapshots on standby servers are not supported by this server version.\n"
1228  "Run with --no-synchronized-snapshots instead if you do not need\n"
1229  "synchronized snapshots.");
1230 
1231 
1233  }
1234 }
1235 
1236 /* Set up connection for a parallel worker process */
1237 static void
1239 {
1240  /*
1241  * We want to re-select all the same values the master connection is
1242  * using. We'll have inherited directly-usable values in
1243  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1244  * inherited encoding value back to a string to pass to setup_connection.
1245  */
1246  setup_connection(AH,
1248  NULL,
1249  NULL);
1250 }
1251 
1252 static char *
1254 {
1255  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1256  char *result;
1257  PGresult *res;
1258 
1259  res = ExecuteSqlQueryForSingleRow(fout, query);
1260  result = pg_strdup(PQgetvalue(res, 0, 0));
1261  PQclear(res);
1262 
1263  return result;
1264 }
1265 
1266 static ArchiveFormat
1268 {
1269  ArchiveFormat archiveFormat;
1270 
1271  *mode = archModeWrite;
1272 
1273  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1274  {
1275  /* This is used by pg_dumpall, and is not documented */
1276  archiveFormat = archNull;
1277  *mode = archModeAppend;
1278  }
1279  else if (pg_strcasecmp(format, "c") == 0)
1280  archiveFormat = archCustom;
1281  else if (pg_strcasecmp(format, "custom") == 0)
1282  archiveFormat = archCustom;
1283  else if (pg_strcasecmp(format, "d") == 0)
1284  archiveFormat = archDirectory;
1285  else if (pg_strcasecmp(format, "directory") == 0)
1286  archiveFormat = archDirectory;
1287  else if (pg_strcasecmp(format, "p") == 0)
1288  archiveFormat = archNull;
1289  else if (pg_strcasecmp(format, "plain") == 0)
1290  archiveFormat = archNull;
1291  else if (pg_strcasecmp(format, "t") == 0)
1292  archiveFormat = archTar;
1293  else if (pg_strcasecmp(format, "tar") == 0)
1294  archiveFormat = archTar;
1295  else
1296  fatal("invalid output format \"%s\" specified", format);
1297  return archiveFormat;
1298 }
1299 
1300 /*
1301  * Find the OIDs of all schemas matching the given list of patterns,
1302  * and append them to the given OID list.
1303  */
1304 static void
1306  SimpleStringList *patterns,
1307  SimpleOidList *oids,
1308  bool strict_names)
1309 {
1310  PQExpBuffer query;
1311  PGresult *res;
1312  SimpleStringListCell *cell;
1313  int i;
1314 
1315  if (patterns->head == NULL)
1316  return; /* nothing to do */
1317 
1318  query = createPQExpBuffer();
1319 
1320  /*
1321  * The loop below runs multiple SELECTs might sometimes result in
1322  * duplicate entries in the OID list, but we don't care.
1323  */
1324 
1325  for (cell = patterns->head; cell; cell = cell->next)
1326  {
1327  appendPQExpBufferStr(query,
1328  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1329  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1330  false, NULL, "n.nspname", NULL, NULL);
1331 
1332  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1333  if (strict_names && PQntuples(res) == 0)
1334  fatal("no matching schemas were found for pattern \"%s\"", cell->val);
1335 
1336  for (i = 0; i < PQntuples(res); i++)
1337  {
1338  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1339  }
1340 
1341  PQclear(res);
1342  resetPQExpBuffer(query);
1343  }
1344 
1345  destroyPQExpBuffer(query);
1346 }
1347 
1348 /*
1349  * Find the OIDs of all foreign servers matching the given list of patterns,
1350  * and append them to the given OID list.
1351  */
1352 static void
1354  SimpleStringList *patterns,
1355  SimpleOidList *oids)
1356 {
1357  PQExpBuffer query;
1358  PGresult *res;
1359  SimpleStringListCell *cell;
1360  int i;
1361 
1362  if (patterns->head == NULL)
1363  return; /* nothing to do */
1364 
1365  query = createPQExpBuffer();
1366 
1367  /*
1368  * The loop below runs multiple SELECTs might sometimes result in
1369  * duplicate entries in the OID list, but we don't care.
1370  */
1371 
1372  for (cell = patterns->head; cell; cell = cell->next)
1373  {
1374  appendPQExpBuffer(query,
1375  "SELECT oid FROM pg_catalog.pg_foreign_server s\n");
1376  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1377  false, NULL, "s.srvname", NULL, NULL);
1378 
1379  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1380  if (PQntuples(res) == 0)
1381  fatal("no matching foreign servers were found for pattern \"%s\"", cell->val);
1382 
1383  for (i = 0; i < PQntuples(res); i++)
1384  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1385 
1386  PQclear(res);
1387  resetPQExpBuffer(query);
1388  }
1389 
1390  destroyPQExpBuffer(query);
1391 }
1392 
1393 /*
1394  * Find the OIDs of all tables matching the given list of patterns,
1395  * and append them to the given OID list. See also expand_dbname_patterns()
1396  * in pg_dumpall.c
1397  */
1398 static void
1400  SimpleStringList *patterns, SimpleOidList *oids,
1401  bool strict_names)
1402 {
1403  PQExpBuffer query;
1404  PGresult *res;
1405  SimpleStringListCell *cell;
1406  int i;
1407 
1408  if (patterns->head == NULL)
1409  return; /* nothing to do */
1410 
1411  query = createPQExpBuffer();
1412 
1413  /*
1414  * this might sometimes result in duplicate entries in the OID list, but
1415  * we don't care.
1416  */
1417 
1418  for (cell = patterns->head; cell; cell = cell->next)
1419  {
1420  /*
1421  * Query must remain ABSOLUTELY devoid of unqualified names. This
1422  * would be unnecessary given a pg_table_is_visible() variant taking a
1423  * search_path argument.
1424  */
1425  appendPQExpBuffer(query,
1426  "SELECT c.oid"
1427  "\nFROM pg_catalog.pg_class c"
1428  "\n LEFT JOIN pg_catalog.pg_namespace n"
1429  "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1430  "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1431  "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1432  RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
1433  RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
1434  RELKIND_PARTITIONED_TABLE);
1435  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1436  false, "n.nspname", "c.relname", NULL,
1437  "pg_catalog.pg_table_is_visible(c.oid)");
1438 
1439  ExecuteSqlStatement(fout, "RESET search_path");
1440  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1443  if (strict_names && PQntuples(res) == 0)
1444  fatal("no matching tables were found for pattern \"%s\"", cell->val);
1445 
1446  for (i = 0; i < PQntuples(res); i++)
1447  {
1448  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1449  }
1450 
1451  PQclear(res);
1452  resetPQExpBuffer(query);
1453  }
1454 
1455  destroyPQExpBuffer(query);
1456 }
1457 
1458 /*
1459  * checkExtensionMembership
1460  * Determine whether object is an extension member, and if so,
1461  * record an appropriate dependency and set the object's dump flag.
1462  *
1463  * It's important to call this for each object that could be an extension
1464  * member. Generally, we integrate this with determining the object's
1465  * to-be-dumped-ness, since extension membership overrides other rules for that.
1466  *
1467  * Returns true if object is an extension member, else false.
1468  */
1469 static bool
1471 {
1472  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1473 
1474  if (ext == NULL)
1475  return false;
1476 
1477  dobj->ext_member = true;
1478 
1479  /* Record dependency so that getDependencies needn't deal with that */
1480  addObjectDependency(dobj, ext->dobj.dumpId);
1481 
1482  /*
1483  * In 9.6 and above, mark the member object to have any non-initial ACL,
1484  * policies, and security labels dumped.
1485  *
1486  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1487  * extract the information about the object. We don't provide support for
1488  * initial policies and security labels and it seems unlikely for those to
1489  * ever exist, but we may have to revisit this later.
1490  *
1491  * Prior to 9.6, we do not include any extension member components.
1492  *
1493  * In binary upgrades, we still dump all components of the members
1494  * individually, since the idea is to exactly reproduce the database
1495  * contents rather than replace the extension contents with something
1496  * different.
1497  */
1498  if (fout->dopt->binary_upgrade)
1499  dobj->dump = ext->dobj.dump;
1500  else
1501  {
1502  if (fout->remoteVersion < 90600)
1503  dobj->dump = DUMP_COMPONENT_NONE;
1504  else
1505  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1508  }
1509 
1510  return true;
1511 }
1512 
1513 /*
1514  * selectDumpableNamespace: policy-setting subroutine
1515  * Mark a namespace as to be dumped or not
1516  */
1517 static void
1519 {
1520  /*
1521  * If specific tables are being dumped, do not dump any complete
1522  * namespaces. If specific namespaces are being dumped, dump just those
1523  * namespaces. Otherwise, dump all non-system namespaces.
1524  */
1525  if (table_include_oids.head != NULL)
1526  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1527  else if (schema_include_oids.head != NULL)
1528  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1529  simple_oid_list_member(&schema_include_oids,
1530  nsinfo->dobj.catId.oid) ?
1532  else if (fout->remoteVersion >= 90600 &&
1533  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1534  {
1535  /*
1536  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1537  * they are interesting (and not the original ACLs which were set at
1538  * initdb time, see pg_init_privs).
1539  */
1540  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1541  }
1542  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1543  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1544  {
1545  /* Other system schemas don't get dumped */
1546  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1547  }
1548  else if (strcmp(nsinfo->dobj.name, "public") == 0)
1549  {
1550  /*
1551  * The public schema is a strange beast that sits in a sort of
1552  * no-mans-land between being a system object and a user object. We
1553  * don't want to dump creation or comment commands for it, because
1554  * that complicates matters for non-superuser use of pg_dump. But we
1555  * should dump any ACL changes that have occurred for it, and of
1556  * course we should dump contained objects.
1557  */
1558  nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1560  }
1561  else
1562  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1563 
1564  /*
1565  * In any case, a namespace can be excluded by an exclusion switch
1566  */
1567  if (nsinfo->dobj.dump_contains &&
1568  simple_oid_list_member(&schema_exclude_oids,
1569  nsinfo->dobj.catId.oid))
1570  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1571 
1572  /*
1573  * If the schema belongs to an extension, allow extension membership to
1574  * override the dump decision for the schema itself. However, this does
1575  * not change dump_contains, so this won't change what we do with objects
1576  * within the schema. (If they belong to the extension, they'll get
1577  * suppressed by it, otherwise not.)
1578  */
1579  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1580 }
1581 
1582 /*
1583  * selectDumpableTable: policy-setting subroutine
1584  * Mark a table as to be dumped or not
1585  */
1586 static void
1588 {
1589  if (checkExtensionMembership(&tbinfo->dobj, fout))
1590  return; /* extension membership overrides all else */
1591 
1592  /*
1593  * If specific tables are being dumped, dump just those tables; else, dump
1594  * according to the parent namespace's dump flag.
1595  */
1596  if (table_include_oids.head != NULL)
1597  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1598  tbinfo->dobj.catId.oid) ?
1600  else
1601  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1602 
1603  /*
1604  * In any case, a table can be excluded by an exclusion switch
1605  */
1606  if (tbinfo->dobj.dump &&
1607  simple_oid_list_member(&table_exclude_oids,
1608  tbinfo->dobj.catId.oid))
1609  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1610 }
1611 
1612 /*
1613  * selectDumpableType: policy-setting subroutine
1614  * Mark a type as to be dumped or not
1615  *
1616  * If it's a table's rowtype or an autogenerated array type, we also apply a
1617  * special type code to facilitate sorting into the desired order. (We don't
1618  * want to consider those to be ordinary types because that would bring tables
1619  * up into the datatype part of the dump order.) We still set the object's
1620  * dump flag; that's not going to cause the dummy type to be dumped, but we
1621  * need it so that casts involving such types will be dumped correctly -- see
1622  * dumpCast. This means the flag should be set the same as for the underlying
1623  * object (the table or base type).
1624  */
1625 static void
1627 {
1628  /* skip complex types, except for standalone composite types */
1629  if (OidIsValid(tyinfo->typrelid) &&
1630  tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
1631  {
1632  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1633 
1634  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1635  if (tytable != NULL)
1636  tyinfo->dobj.dump = tytable->dobj.dump;
1637  else
1638  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1639  return;
1640  }
1641 
1642  /* skip auto-generated array types */
1643  if (tyinfo->isArray)
1644  {
1645  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1646 
1647  /*
1648  * Fall through to set the dump flag; we assume that the subsequent
1649  * rules will do the same thing as they would for the array's base
1650  * type. (We cannot reliably look up the base type here, since
1651  * getTypes may not have processed it yet.)
1652  */
1653  }
1654 
1655  if (checkExtensionMembership(&tyinfo->dobj, fout))
1656  return; /* extension membership overrides all else */
1657 
1658  /* Dump based on if the contents of the namespace are being dumped */
1659  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1660 }
1661 
1662 /*
1663  * selectDumpableDefaultACL: policy-setting subroutine
1664  * Mark a default ACL as to be dumped or not
1665  *
1666  * For per-schema default ACLs, dump if the schema is to be dumped.
1667  * Otherwise dump if we are dumping "everything". Note that dataOnly
1668  * and aclsSkip are checked separately.
1669  */
1670 static void
1672 {
1673  /* Default ACLs can't be extension members */
1674 
1675  if (dinfo->dobj.namespace)
1676  /* default ACLs are considered part of the namespace */
1677  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1678  else
1679  dinfo->dobj.dump = dopt->include_everything ?
1681 }
1682 
1683 /*
1684  * selectDumpableCast: policy-setting subroutine
1685  * Mark a cast as to be dumped or not
1686  *
1687  * Casts do not belong to any particular namespace (since they haven't got
1688  * names), nor do they have identifiable owners. To distinguish user-defined
1689  * casts from built-in ones, we must resort to checking whether the cast's
1690  * OID is in the range reserved for initdb.
1691  */
1692 static void
1694 {
1695  if (checkExtensionMembership(&cast->dobj, fout))
1696  return; /* extension membership overrides all else */
1697 
1698  /*
1699  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1700  * support ACLs currently.
1701  */
1702  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1703  cast->dobj.dump = DUMP_COMPONENT_NONE;
1704  else
1705  cast->dobj.dump = fout->dopt->include_everything ?
1707 }
1708 
1709 /*
1710  * selectDumpableProcLang: policy-setting subroutine
1711  * Mark a procedural language as to be dumped or not
1712  *
1713  * Procedural languages do not belong to any particular namespace. To
1714  * identify built-in languages, we must resort to checking whether the
1715  * language's OID is in the range reserved for initdb.
1716  */
1717 static void
1719 {
1720  if (checkExtensionMembership(&plang->dobj, fout))
1721  return; /* extension membership overrides all else */
1722 
1723  /*
1724  * Only include procedural languages when we are dumping everything.
1725  *
1726  * For from-initdb procedural languages, only include ACLs, as we do for
1727  * the pg_catalog namespace. We need this because procedural languages do
1728  * not live in any namespace.
1729  */
1730  if (!fout->dopt->include_everything)
1731  plang->dobj.dump = DUMP_COMPONENT_NONE;
1732  else
1733  {
1734  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1735  plang->dobj.dump = fout->remoteVersion < 90600 ?
1737  else
1738  plang->dobj.dump = DUMP_COMPONENT_ALL;
1739  }
1740 }
1741 
1742 /*
1743  * selectDumpableAccessMethod: policy-setting subroutine
1744  * Mark an access method as to be dumped or not
1745  *
1746  * Access methods do not belong to any particular namespace. To identify
1747  * built-in access methods, we must resort to checking whether the
1748  * method's OID is in the range reserved for initdb.
1749  */
1750 static void
1752 {
1753  if (checkExtensionMembership(&method->dobj, fout))
1754  return; /* extension membership overrides all else */
1755 
1756  /*
1757  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1758  * they do not support ACLs currently.
1759  */
1760  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1761  method->dobj.dump = DUMP_COMPONENT_NONE;
1762  else
1763  method->dobj.dump = fout->dopt->include_everything ?
1765 }
1766 
1767 /*
1768  * selectDumpableExtension: policy-setting subroutine
1769  * Mark an extension as to be dumped or not
1770  *
1771  * Built-in extensions should be skipped except for checking ACLs, since we
1772  * assume those will already be installed in the target database. We identify
1773  * such extensions by their having OIDs in the range reserved for initdb.
1774  * We dump all user-added extensions by default, or none of them if
1775  * include_everything is false (i.e., a --schema or --table switch was given).
1776  */
1777 static void
1779 {
1780  /*
1781  * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
1782  * change permissions on their member objects, if they wish to, and have
1783  * those changes preserved.
1784  */
1785  if (extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1786  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1787  else
1788  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1791 }
1792 
1793 /*
1794  * selectDumpablePublicationTable: policy-setting subroutine
1795  * Mark a publication table as to be dumped or not
1796  *
1797  * Publication tables have schemas, but those are ignored in decision making,
1798  * because publications are only dumped when we are dumping everything.
1799  */
1800 static void
1802 {
1803  if (checkExtensionMembership(dobj, fout))
1804  return; /* extension membership overrides all else */
1805 
1806  dobj->dump = fout->dopt->include_everything ?
1808 }
1809 
1810 /*
1811  * selectDumpableObject: policy-setting subroutine
1812  * Mark a generic dumpable object as to be dumped or not
1813  *
1814  * Use this only for object types without a special-case routine above.
1815  */
1816 static void
1818 {
1819  if (checkExtensionMembership(dobj, fout))
1820  return; /* extension membership overrides all else */
1821 
1822  /*
1823  * Default policy is to dump if parent namespace is dumpable, or for
1824  * non-namespace-associated items, dump if we're dumping "everything".
1825  */
1826  if (dobj->namespace)
1827  dobj->dump = dobj->namespace->dobj.dump_contains;
1828  else
1829  dobj->dump = fout->dopt->include_everything ?
1831 }
1832 
1833 /*
1834  * Dump a table's contents for loading using the COPY command
1835  * - this routine is called by the Archiver when it wants the table
1836  * to be dumped.
1837  */
1838 static int
1839 dumpTableData_copy(Archive *fout, void *dcontext)
1840 {
1841  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1842  TableInfo *tbinfo = tdinfo->tdtable;
1843  const char *classname = tbinfo->dobj.name;
1845 
1846  /*
1847  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1848  * which uses it already.
1849  */
1850  PQExpBuffer clistBuf = createPQExpBuffer();
1851  PGconn *conn = GetConnection(fout);
1852  PGresult *res;
1853  int ret;
1854  char *copybuf;
1855  const char *column_list;
1856 
1857  pg_log_info("dumping contents of table \"%s.%s\"",
1858  tbinfo->dobj.namespace->dobj.name, classname);
1859 
1860  /*
1861  * Specify the column list explicitly so that we have no possibility of
1862  * retrieving data in the wrong column order. (The default column
1863  * ordering of COPY will not be what we want in certain corner cases
1864  * involving ADD COLUMN and inheritance.)
1865  */
1866  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1867 
1868  /*
1869  * Use COPY (SELECT ...) TO when dumping a foreign table's data, and when
1870  * a filter condition was specified. For other cases a simple COPY
1871  * suffices.
1872  */
1873  if (tdinfo->filtercond || tbinfo->relkind == RELKIND_FOREIGN_TABLE)
1874  {
1875  /* Note: this syntax is only supported in 8.2 and up */
1876  appendPQExpBufferStr(q, "COPY (SELECT ");
1877  /* klugery to get rid of parens in column list */
1878  if (strlen(column_list) > 2)
1879  {
1880  appendPQExpBufferStr(q, column_list + 1);
1881  q->data[q->len - 1] = ' ';
1882  }
1883  else
1884  appendPQExpBufferStr(q, "* ");
1885 
1886  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1887  fmtQualifiedDumpable(tbinfo),
1888  tdinfo->filtercond ? tdinfo->filtercond : "");
1889  }
1890  else
1891  {
1892  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1893  fmtQualifiedDumpable(tbinfo),
1894  column_list);
1895  }
1896  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1897  PQclear(res);
1898  destroyPQExpBuffer(clistBuf);
1899 
1900  for (;;)
1901  {
1902  ret = PQgetCopyData(conn, &copybuf, 0);
1903 
1904  if (ret < 0)
1905  break; /* done or error */
1906 
1907  if (copybuf)
1908  {
1909  WriteData(fout, copybuf, ret);
1910  PQfreemem(copybuf);
1911  }
1912 
1913  /* ----------
1914  * THROTTLE:
1915  *
1916  * There was considerable discussion in late July, 2000 regarding
1917  * slowing down pg_dump when backing up large tables. Users with both
1918  * slow & fast (multi-processor) machines experienced performance
1919  * degradation when doing a backup.
1920  *
1921  * Initial attempts based on sleeping for a number of ms for each ms
1922  * of work were deemed too complex, then a simple 'sleep in each loop'
1923  * implementation was suggested. The latter failed because the loop
1924  * was too tight. Finally, the following was implemented:
1925  *
1926  * If throttle is non-zero, then
1927  * See how long since the last sleep.
1928  * Work out how long to sleep (based on ratio).
1929  * If sleep is more than 100ms, then
1930  * sleep
1931  * reset timer
1932  * EndIf
1933  * EndIf
1934  *
1935  * where the throttle value was the number of ms to sleep per ms of
1936  * work. The calculation was done in each loop.
1937  *
1938  * Most of the hard work is done in the backend, and this solution
1939  * still did not work particularly well: on slow machines, the ratio
1940  * was 50:1, and on medium paced machines, 1:1, and on fast
1941  * multi-processor machines, it had little or no effect, for reasons
1942  * that were unclear.
1943  *
1944  * Further discussion ensued, and the proposal was dropped.
1945  *
1946  * For those people who want this feature, it can be implemented using
1947  * gettimeofday in each loop, calculating the time since last sleep,
1948  * multiplying that by the sleep ratio, then if the result is more
1949  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1950  * function to sleep for a subsecond period ie.
1951  *
1952  * select(0, NULL, NULL, NULL, &tvi);
1953  *
1954  * This will return after the interval specified in the structure tvi.
1955  * Finally, call gettimeofday again to save the 'last sleep time'.
1956  * ----------
1957  */
1958  }
1959  archprintf(fout, "\\.\n\n\n");
1960 
1961  if (ret == -2)
1962  {
1963  /* copy data transfer failed */
1964  pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
1965  pg_log_error("Error message from server: %s", PQerrorMessage(conn));
1966  pg_log_error("The command was: %s", q->data);
1967  exit_nicely(1);
1968  }
1969 
1970  /* Check command status and return to normal libpq state */
1971  res = PQgetResult(conn);
1972  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1973  {
1974  pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
1975  pg_log_error("Error message from server: %s", PQerrorMessage(conn));
1976  pg_log_error("The command was: %s", q->data);
1977  exit_nicely(1);
1978  }
1979  PQclear(res);
1980 
1981  /* Do this to ensure we've pumped libpq back to idle state */
1982  if (PQgetResult(conn) != NULL)
1983  pg_log_warning("unexpected extra results during COPY of table \"%s\"",
1984  classname);
1985 
1986  destroyPQExpBuffer(q);
1987  return 1;
1988 }
1989 
1990 /*
1991  * Dump table data using INSERT commands.
1992  *
1993  * Caution: when we restore from an archive file direct to database, the
1994  * INSERT commands emitted by this function have to be parsed by
1995  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
1996  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
1997  */
1998 static int
1999 dumpTableData_insert(Archive *fout, void *dcontext)
2000 {
2001  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
2002  TableInfo *tbinfo = tdinfo->tdtable;
2003  DumpOptions *dopt = fout->dopt;
2005  PQExpBuffer insertStmt = NULL;
2006  PGresult *res;
2007  int nfields;
2008  int rows_per_statement = dopt->dump_inserts;
2009  int rows_this_statement = 0;
2010 
2011  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
2012  "SELECT * FROM ONLY %s",
2013  fmtQualifiedDumpable(tbinfo));
2014  if (tdinfo->filtercond)
2015  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
2016 
2017  ExecuteSqlStatement(fout, q->data);
2018 
2019  while (1)
2020  {
2021  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
2022  PGRES_TUPLES_OK);
2023  nfields = PQnfields(res);
2024 
2025  /*
2026  * First time through, we build as much of the INSERT statement as
2027  * possible in "insertStmt", which we can then just print for each
2028  * statement. If the table happens to have zero columns then this will
2029  * be a complete statement, otherwise it will end in "VALUES" and be
2030  * ready to have the row's column values printed.
2031  */
2032  if (insertStmt == NULL)
2033  {
2034  TableInfo *targettab;
2035 
2036  insertStmt = createPQExpBuffer();
2037 
2038  /*
2039  * When load-via-partition-root is set, get the root table name
2040  * for the partition table, so that we can reload data through the
2041  * root table.
2042  */
2043  if (dopt->load_via_partition_root && tbinfo->ispartition)
2044  targettab = getRootTableInfo(tbinfo);
2045  else
2046  targettab = tbinfo;
2047 
2048  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
2049  fmtQualifiedDumpable(targettab));
2050 
2051  /* corner case for zero-column table */
2052  if (nfields == 0)
2053  {
2054  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
2055  }
2056  else
2057  {
2058  /* append the list of column names if required */
2059  if (dopt->column_inserts)
2060  {
2061  appendPQExpBufferChar(insertStmt, '(');
2062  for (int field = 0; field < nfields; field++)
2063  {
2064  if (field > 0)
2065  appendPQExpBufferStr(insertStmt, ", ");
2066  appendPQExpBufferStr(insertStmt,
2067  fmtId(PQfname(res, field)));
2068  }
2069  appendPQExpBufferStr(insertStmt, ") ");
2070  }
2071 
2072  if (tbinfo->needs_override)
2073  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
2074 
2075  appendPQExpBufferStr(insertStmt, "VALUES");
2076  }
2077  }
2078 
2079  for (int tuple = 0; tuple < PQntuples(res); tuple++)
2080  {
2081  /* Write the INSERT if not in the middle of a multi-row INSERT. */
2082  if (rows_this_statement == 0)
2083  archputs(insertStmt->data, fout);
2084 
2085  /*
2086  * If it is zero-column table then we've already written the
2087  * complete statement, which will mean we've disobeyed
2088  * --rows-per-insert when it's set greater than 1. We do support
2089  * a way to make this multi-row with: SELECT UNION ALL SELECT
2090  * UNION ALL ... but that's non-standard so we should avoid it
2091  * given that using INSERTs is mostly only ever needed for
2092  * cross-database exports.
2093  */
2094  if (nfields == 0)
2095  continue;
2096 
2097  /* Emit a row heading */
2098  if (rows_per_statement == 1)
2099  archputs(" (", fout);
2100  else if (rows_this_statement > 0)
2101  archputs(",\n\t(", fout);
2102  else
2103  archputs("\n\t(", fout);
2104 
2105  for (int field = 0; field < nfields; field++)
2106  {
2107  if (field > 0)
2108  archputs(", ", fout);
2109  if (tbinfo->attgenerated[field])
2110  {
2111  archputs("DEFAULT", fout);
2112  continue;
2113  }
2114  if (PQgetisnull(res, tuple, field))
2115  {
2116  archputs("NULL", fout);
2117  continue;
2118  }
2119 
2120  /* XXX This code is partially duplicated in ruleutils.c */
2121  switch (PQftype(res, field))
2122  {
2123  case INT2OID:
2124  case INT4OID:
2125  case INT8OID:
2126  case OIDOID:
2127  case FLOAT4OID:
2128  case FLOAT8OID:
2129  case NUMERICOID:
2130  {
2131  /*
2132  * These types are printed without quotes unless
2133  * they contain values that aren't accepted by the
2134  * scanner unquoted (e.g., 'NaN'). Note that
2135  * strtod() and friends might accept NaN, so we
2136  * can't use that to test.
2137  *
2138  * In reality we only need to defend against
2139  * infinity and NaN, so we need not get too crazy
2140  * about pattern matching here.
2141  */
2142  const char *s = PQgetvalue(res, tuple, field);
2143 
2144  if (strspn(s, "0123456789 +-eE.") == strlen(s))
2145  archputs(s, fout);
2146  else
2147  archprintf(fout, "'%s'", s);
2148  }
2149  break;
2150 
2151  case BITOID:
2152  case VARBITOID:
2153  archprintf(fout, "B'%s'",
2154  PQgetvalue(res, tuple, field));
2155  break;
2156 
2157  case BOOLOID:
2158  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2159  archputs("true", fout);
2160  else
2161  archputs("false", fout);
2162  break;
2163 
2164  default:
2165  /* All other types are printed as string literals. */
2166  resetPQExpBuffer(q);
2168  PQgetvalue(res, tuple, field),
2169  fout);
2170  archputs(q->data, fout);
2171  break;
2172  }
2173  }
2174 
2175  /* Terminate the row ... */
2176  archputs(")", fout);
2177 
2178  /* ... and the statement, if the target no. of rows is reached */
2179  if (++rows_this_statement >= rows_per_statement)
2180  {
2181  if (dopt->do_nothing)
2182  archputs(" ON CONFLICT DO NOTHING;\n", fout);
2183  else
2184  archputs(";\n", fout);
2185  /* Reset the row counter */
2186  rows_this_statement = 0;
2187  }
2188  }
2189 
2190  if (PQntuples(res) <= 0)
2191  {
2192  PQclear(res);
2193  break;
2194  }
2195  PQclear(res);
2196  }
2197 
2198  /* Terminate any statements that didn't make the row count. */
2199  if (rows_this_statement > 0)
2200  {
2201  if (dopt->do_nothing)
2202  archputs(" ON CONFLICT DO NOTHING;\n", fout);
2203  else
2204  archputs(";\n", fout);
2205  }
2206 
2207  archputs("\n\n", fout);
2208 
2209  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2210 
2211  destroyPQExpBuffer(q);
2212  if (insertStmt != NULL)
2213  destroyPQExpBuffer(insertStmt);
2214 
2215  return 1;
2216 }
2217 
2218 /*
2219  * getRootTableInfo:
2220  * get the root TableInfo for the given partition table.
2221  */
2222 static TableInfo *
2224 {
2225  TableInfo *parentTbinfo;
2226 
2227  Assert(tbinfo->ispartition);
2228  Assert(tbinfo->numParents == 1);
2229 
2230  parentTbinfo = tbinfo->parents[0];
2231  while (parentTbinfo->ispartition)
2232  {
2233  Assert(parentTbinfo->numParents == 1);
2234  parentTbinfo = parentTbinfo->parents[0];
2235  }
2236 
2237  return parentTbinfo;
2238 }
2239 
2240 /*
2241  * dumpTableData -
2242  * dump the contents of a single table
2243  *
2244  * Actually, this just makes an ArchiveEntry for the table contents.
2245  */
2246 static void
2248 {
2249  DumpOptions *dopt = fout->dopt;
2250  TableInfo *tbinfo = tdinfo->tdtable;
2251  PQExpBuffer copyBuf = createPQExpBuffer();
2252  PQExpBuffer clistBuf = createPQExpBuffer();
2253  DataDumperPtr dumpFn;
2254  char *copyStmt;
2255  const char *copyFrom;
2256 
2257  if (!dopt->dump_inserts)
2258  {
2259  /* Dump/restore using COPY */
2260  dumpFn = dumpTableData_copy;
2261 
2262  /*
2263  * When load-via-partition-root is set, get the root table name for
2264  * the partition table, so that we can reload data through the root
2265  * table.
2266  */
2267  if (dopt->load_via_partition_root && tbinfo->ispartition)
2268  {
2269  TableInfo *parentTbinfo;
2270 
2271  parentTbinfo = getRootTableInfo(tbinfo);
2272  copyFrom = fmtQualifiedDumpable(parentTbinfo);
2273  }
2274  else
2275  copyFrom = fmtQualifiedDumpable(tbinfo);
2276 
2277  /* must use 2 steps here 'cause fmtId is nonreentrant */
2278  appendPQExpBuffer(copyBuf, "COPY %s ",
2279  copyFrom);
2280  appendPQExpBuffer(copyBuf, "%s FROM stdin;\n",
2281  fmtCopyColumnList(tbinfo, clistBuf));
2282  copyStmt = copyBuf->data;
2283  }
2284  else
2285  {
2286  /* Restore using INSERT */
2287  dumpFn = dumpTableData_insert;
2288  copyStmt = NULL;
2289  }
2290 
2291  /*
2292  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2293  * dependency on its table as "special" and pass it to ArchiveEntry now.
2294  * See comments for BuildArchiveDependencies.
2295  */
2296  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2297  {
2298  TocEntry *te;
2299 
2300  te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2301  ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2302  .namespace = tbinfo->dobj.namespace->dobj.name,
2303  .owner = tbinfo->rolname,
2304  .description = "TABLE DATA",
2305  .section = SECTION_DATA,
2306  .copyStmt = copyStmt,
2307  .deps = &(tbinfo->dobj.dumpId),
2308  .nDeps = 1,
2309  .dumpFn = dumpFn,
2310  .dumpArg = tdinfo));
2311 
2312  /*
2313  * Set the TocEntry's dataLength in case we are doing a parallel dump
2314  * and want to order dump jobs by table size. We choose to measure
2315  * dataLength in table pages during dump, so no scaling is needed.
2316  * However, relpages is declared as "integer" in pg_class, and hence
2317  * also in TableInfo, but it's really BlockNumber a/k/a unsigned int.
2318  * Cast so that we get the right interpretation of table sizes
2319  * exceeding INT_MAX pages.
2320  */
2321  te->dataLength = (BlockNumber) tbinfo->relpages;
2322  }
2323 
2324  destroyPQExpBuffer(copyBuf);
2325  destroyPQExpBuffer(clistBuf);
2326 }
2327 
2328 /*
2329  * refreshMatViewData -
2330  * load or refresh the contents of a single materialized view
2331  *
2332  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2333  * statement.
2334  */
2335 static void
2337 {
2338  TableInfo *tbinfo = tdinfo->tdtable;
2339  PQExpBuffer q;
2340 
2341  /* If the materialized view is not flagged as populated, skip this. */
2342  if (!tbinfo->relispopulated)
2343  return;
2344 
2345  q = createPQExpBuffer();
2346 
2347  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2348  fmtQualifiedDumpable(tbinfo));
2349 
2350  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2351  ArchiveEntry(fout,
2352  tdinfo->dobj.catId, /* catalog ID */
2353  tdinfo->dobj.dumpId, /* dump ID */
2354  ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2355  .namespace = tbinfo->dobj.namespace->dobj.name,
2356  .owner = tbinfo->rolname,
2357  .description = "MATERIALIZED VIEW DATA",
2358  .section = SECTION_POST_DATA,
2359  .createStmt = q->data,
2360  .deps = tdinfo->dobj.dependencies,
2361  .nDeps = tdinfo->dobj.nDeps));
2362 
2363  destroyPQExpBuffer(q);
2364 }
2365 
2366 /*
2367  * getTableData -
2368  * set up dumpable objects representing the contents of tables
2369  */
2370 static void
2371 getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind)
2372 {
2373  int i;
2374 
2375  for (i = 0; i < numTables; i++)
2376  {
2377  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2378  (!relkind || tblinfo[i].relkind == relkind))
2379  makeTableDataInfo(dopt, &(tblinfo[i]));
2380  }
2381 }
2382 
2383 /*
2384  * Make a dumpable object for the data of this specific table
2385  *
2386  * Note: we make a TableDataInfo if and only if we are going to dump the
2387  * table data; the "dump" flag in such objects isn't used.
2388  */
2389 static void
2391 {
2392  TableDataInfo *tdinfo;
2393 
2394  /*
2395  * Nothing to do if we already decided to dump the table. This will
2396  * happen for "config" tables.
2397  */
2398  if (tbinfo->dataObj != NULL)
2399  return;
2400 
2401  /* Skip VIEWs (no data to dump) */
2402  if (tbinfo->relkind == RELKIND_VIEW)
2403  return;
2404  /* Skip FOREIGN TABLEs (no data to dump) unless requested explicitly */
2405  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
2406  (foreign_servers_include_oids.head == NULL ||
2407  !simple_oid_list_member(&foreign_servers_include_oids,
2408  tbinfo->foreign_server)))
2409  return;
2410  /* Skip partitioned tables (data in partitions) */
2411  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2412  return;
2413 
2414  /* Don't dump data in unlogged tables, if so requested */
2415  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2416  dopt->no_unlogged_table_data)
2417  return;
2418 
2419  /* Check that the data is not explicitly excluded */
2420  if (simple_oid_list_member(&tabledata_exclude_oids,
2421  tbinfo->dobj.catId.oid))
2422  return;
2423 
2424  /* OK, let's dump it */
2425  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2426 
2427  if (tbinfo->relkind == RELKIND_MATVIEW)
2428  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2429  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2430  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2431  else
2432  tdinfo->dobj.objType = DO_TABLE_DATA;
2433 
2434  /*
2435  * Note: use tableoid 0 so that this object won't be mistaken for
2436  * something that pg_depend entries apply to.
2437  */
2438  tdinfo->dobj.catId.tableoid = 0;
2439  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2440  AssignDumpId(&tdinfo->dobj);
2441  tdinfo->dobj.name = tbinfo->dobj.name;
2442  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2443  tdinfo->tdtable = tbinfo;
2444  tdinfo->filtercond = NULL; /* might get set later */
2445  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2446 
2447  tbinfo->dataObj = tdinfo;
2448 }
2449 
2450 /*
2451  * The refresh for a materialized view must be dependent on the refresh for
2452  * any materialized view that this one is dependent on.
2453  *
2454  * This must be called after all the objects are created, but before they are
2455  * sorted.
2456  */
2457 static void
2459 {
2460  PQExpBuffer query;
2461  PGresult *res;
2462  int ntups,
2463  i;
2464  int i_classid,
2465  i_objid,
2466  i_refobjid;
2467 
2468  /* No Mat Views before 9.3. */
2469  if (fout->remoteVersion < 90300)
2470  return;
2471 
2472  query = createPQExpBuffer();
2473 
2474  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2475  "( "
2476  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2477  "FROM pg_depend d1 "
2478  "JOIN pg_class c1 ON c1.oid = d1.objid "
2479  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2480  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2481  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2482  "AND d2.objid = r1.oid "
2483  "AND d2.refobjid <> d1.objid "
2484  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2485  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2486  CppAsString2(RELKIND_VIEW) ") "
2487  "WHERE d1.classid = 'pg_class'::regclass "
2488  "UNION "
2489  "SELECT w.objid, d3.refobjid, c3.relkind "
2490  "FROM w "
2491  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2492  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2493  "AND d3.objid = r3.oid "
2494  "AND d3.refobjid <> w.refobjid "
2495  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2496  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2497  CppAsString2(RELKIND_VIEW) ") "
2498  ") "
2499  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2500  "FROM w "
2501  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2502 
2503  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2504 
2505  ntups = PQntuples(res);
2506 
2507  i_classid = PQfnumber(res, "classid");
2508  i_objid = PQfnumber(res, "objid");
2509  i_refobjid = PQfnumber(res, "refobjid");
2510 
2511  for (i = 0; i < ntups; i++)
2512  {
2513  CatalogId objId;
2514  CatalogId refobjId;
2515  DumpableObject *dobj;
2516  DumpableObject *refdobj;
2517  TableInfo *tbinfo;
2518  TableInfo *reftbinfo;
2519 
2520  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2521  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2522  refobjId.tableoid = objId.tableoid;
2523  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2524 
2525  dobj = findObjectByCatalogId(objId);
2526  if (dobj == NULL)
2527  continue;
2528 
2529  Assert(dobj->objType == DO_TABLE);
2530  tbinfo = (TableInfo *) dobj;
2531  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2532  dobj = (DumpableObject *) tbinfo->dataObj;
2533  if (dobj == NULL)
2534  continue;
2535  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2536 
2537  refdobj = findObjectByCatalogId(refobjId);
2538  if (refdobj == NULL)
2539  continue;
2540 
2541  Assert(refdobj->objType == DO_TABLE);
2542  reftbinfo = (TableInfo *) refdobj;
2543  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2544  refdobj = (DumpableObject *) reftbinfo->dataObj;
2545  if (refdobj == NULL)
2546  continue;
2547  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2548 
2549  addObjectDependency(dobj, refdobj->dumpId);
2550 
2551  if (!reftbinfo->relispopulated)
2552  tbinfo->relispopulated = false;
2553  }
2554 
2555  PQclear(res);
2556 
2557  destroyPQExpBuffer(query);
2558 }
2559 
2560 /*
2561  * getTableDataFKConstraints -
2562  * add dump-order dependencies reflecting foreign key constraints
2563  *
2564  * This code is executed only in a data-only dump --- in schema+data dumps
2565  * we handle foreign key issues by not creating the FK constraints until
2566  * after the data is loaded. In a data-only dump, however, we want to
2567  * order the table data objects in such a way that a table's referenced
2568  * tables are restored first. (In the presence of circular references or
2569  * self-references this may be impossible; we'll detect and complain about
2570  * that during the dependency sorting step.)
2571  */
2572 static void
2574 {
2575  DumpableObject **dobjs;
2576  int numObjs;
2577  int i;
2578 
2579  /* Search through all the dumpable objects for FK constraints */
2580  getDumpableObjects(&dobjs, &numObjs);
2581  for (i = 0; i < numObjs; i++)
2582  {
2583  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2584  {
2585  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2586  TableInfo *ftable;
2587 
2588  /* Not interesting unless both tables are to be dumped */
2589  if (cinfo->contable == NULL ||
2590  cinfo->contable->dataObj == NULL)
2591  continue;
2592  ftable = findTableByOid(cinfo->confrelid);
2593  if (ftable == NULL ||
2594  ftable->dataObj == NULL)
2595  continue;
2596 
2597  /*
2598  * Okay, make referencing table's TABLE_DATA object depend on the
2599  * referenced table's TABLE_DATA object.
2600  */
2602  ftable->dataObj->dobj.dumpId);
2603  }
2604  }
2605  free(dobjs);
2606 }
2607 
2608 
2609 /*
2610  * guessConstraintInheritance:
2611  * In pre-8.4 databases, we can't tell for certain which constraints
2612  * are inherited. We assume a CHECK constraint is inherited if its name
2613  * matches the name of any constraint in the parent. Originally this code
2614  * tried to compare the expression texts, but that can fail for various
2615  * reasons --- for example, if the parent and child tables are in different
2616  * schemas, reverse-listing of function calls may produce different text
2617  * (schema-qualified or not) depending on search path.
2618  *
2619  * In 8.4 and up we can rely on the conislocal field to decide which
2620  * constraints must be dumped; much safer.
2621  *
2622  * This function assumes all conislocal flags were initialized to true.
2623  * It clears the flag on anything that seems to be inherited.
2624  */
2625 static void
2627 {
2628  int i,
2629  j,
2630  k;
2631 
2632  for (i = 0; i < numTables; i++)
2633  {
2634  TableInfo *tbinfo = &(tblinfo[i]);
2635  int numParents;
2636  TableInfo **parents;
2637  TableInfo *parent;
2638 
2639  /* Sequences and views never have parents */
2640  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2641  tbinfo->relkind == RELKIND_VIEW)
2642  continue;
2643 
2644  /* Don't bother computing anything for non-target tables, either */
2645  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2646  continue;
2647 
2648  numParents = tbinfo->numParents;
2649  parents = tbinfo->parents;
2650 
2651  if (numParents == 0)
2652  continue; /* nothing to see here, move along */
2653 
2654  /* scan for inherited CHECK constraints */
2655  for (j = 0; j < tbinfo->ncheck; j++)
2656  {
2657  ConstraintInfo *constr;
2658 
2659  constr = &(tbinfo->checkexprs[j]);
2660 
2661  for (k = 0; k < numParents; k++)
2662  {
2663  int l;
2664 
2665  parent = parents[k];
2666  for (l = 0; l < parent->ncheck; l++)
2667  {
2668  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2669 
2670  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2671  {
2672  constr->conislocal = false;
2673  break;
2674  }
2675  }
2676  if (!constr->conislocal)
2677  break;
2678  }
2679  }
2680  }
2681 }
2682 
2683 
2684 /*
2685  * dumpDatabase:
2686  * dump the database definition
2687  */
2688 static void
2690 {
2691  DumpOptions *dopt = fout->dopt;
2692  PQExpBuffer dbQry = createPQExpBuffer();
2693  PQExpBuffer delQry = createPQExpBuffer();
2694  PQExpBuffer creaQry = createPQExpBuffer();
2695  PQExpBuffer labelq = createPQExpBuffer();
2696  PGconn *conn = GetConnection(fout);
2697  PGresult *res;
2698  int i_tableoid,
2699  i_oid,
2700  i_datname,
2701  i_dba,
2702  i_encoding,
2703  i_collate,
2704  i_ctype,
2705  i_frozenxid,
2706  i_minmxid,
2707  i_datacl,
2708  i_rdatacl,
2709  i_datistemplate,
2710  i_datconnlimit,
2711  i_tablespace;
2712  CatalogId dbCatId;
2713  DumpId dbDumpId;
2714  const char *datname,
2715  *dba,
2716  *encoding,
2717  *collate,
2718  *ctype,
2719  *datacl,
2720  *rdatacl,
2721  *datistemplate,
2722  *datconnlimit,
2723  *tablespace;
2724  uint32 frozenxid,
2725  minmxid;
2726  char *qdatname;
2727 
2728  pg_log_info("saving database definition");
2729 
2730  /*
2731  * Fetch the database-level properties for this database.
2732  *
2733  * The order in which privileges are in the ACL string (the order they
2734  * have been GRANT'd in, which the backend maintains) must be preserved to
2735  * ensure that GRANTs WITH GRANT OPTION and subsequent GRANTs based on
2736  * those are dumped in the correct order. Note that initial privileges
2737  * (pg_init_privs) are not supported on databases, so this logic cannot
2738  * make use of buildACLQueries().
2739  */
2740  if (fout->remoteVersion >= 90600)
2741  {
2742  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2743  "(%s datdba) AS dba, "
2744  "pg_encoding_to_char(encoding) AS encoding, "
2745  "datcollate, datctype, datfrozenxid, datminmxid, "
2746  "(SELECT array_agg(acl ORDER BY row_n) FROM "
2747  " (SELECT acl, row_n FROM "
2748  " unnest(coalesce(datacl,acldefault('d',datdba))) "
2749  " WITH ORDINALITY AS perm(acl,row_n) "
2750  " WHERE NOT EXISTS ( "
2751  " SELECT 1 "
2752  " FROM unnest(acldefault('d',datdba)) "
2753  " AS init(init_acl) "
2754  " WHERE acl = init_acl)) AS datacls) "
2755  " AS datacl, "
2756  "(SELECT array_agg(acl ORDER BY row_n) FROM "
2757  " (SELECT acl, row_n FROM "
2758  " unnest(acldefault('d',datdba)) "
2759  " WITH ORDINALITY AS initp(acl,row_n) "
2760  " WHERE NOT EXISTS ( "
2761  " SELECT 1 "
2762  " FROM unnest(coalesce(datacl,acldefault('d',datdba))) "
2763  " AS permp(orig_acl) "
2764  " WHERE acl = orig_acl)) AS rdatacls) "
2765  " AS rdatacl, "
2766  "datistemplate, datconnlimit, "
2767  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2768  "shobj_description(oid, 'pg_database') AS description "
2769 
2770  "FROM pg_database "
2771  "WHERE datname = current_database()",
2773  }
2774  else if (fout->remoteVersion >= 90300)
2775  {
2776  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2777  "(%s datdba) AS dba, "
2778  "pg_encoding_to_char(encoding) AS encoding, "
2779  "datcollate, datctype, datfrozenxid, datminmxid, "
2780  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2781  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2782  "shobj_description(oid, 'pg_database') AS description "
2783 
2784  "FROM pg_database "
2785  "WHERE datname = current_database()",
2787  }
2788  else if (fout->remoteVersion >= 80400)
2789  {
2790  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2791  "(%s datdba) AS dba, "
2792  "pg_encoding_to_char(encoding) AS encoding, "
2793  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2794  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2795  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2796  "shobj_description(oid, 'pg_database') AS description "
2797 
2798  "FROM pg_database "
2799  "WHERE datname = current_database()",
2801  }
2802  else if (fout->remoteVersion >= 80200)
2803  {
2804  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2805  "(%s datdba) AS dba, "
2806  "pg_encoding_to_char(encoding) AS encoding, "
2807  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2808  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2809  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2810  "shobj_description(oid, 'pg_database') AS description "
2811 
2812  "FROM pg_database "
2813  "WHERE datname = current_database()",
2815  }
2816  else
2817  {
2818  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2819  "(%s datdba) AS dba, "
2820  "pg_encoding_to_char(encoding) AS encoding, "
2821  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2822  "datacl, '' as rdatacl, datistemplate, "
2823  "-1 as datconnlimit, "
2824  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2825  "FROM pg_database "
2826  "WHERE datname = current_database()",
2828  }
2829 
2830  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2831 
2832  i_tableoid = PQfnumber(res, "tableoid");
2833  i_oid = PQfnumber(res, "oid");
2834  i_datname = PQfnumber(res, "datname");
2835  i_dba = PQfnumber(res, "dba");
2836  i_encoding = PQfnumber(res, "encoding");
2837  i_collate = PQfnumber(res, "datcollate");
2838  i_ctype = PQfnumber(res, "datctype");
2839  i_frozenxid = PQfnumber(res, "datfrozenxid");
2840  i_minmxid = PQfnumber(res, "datminmxid");
2841  i_datacl = PQfnumber(res, "datacl");
2842  i_rdatacl = PQfnumber(res, "rdatacl");
2843  i_datistemplate = PQfnumber(res, "datistemplate");
2844  i_datconnlimit = PQfnumber(res, "datconnlimit");
2845  i_tablespace = PQfnumber(res, "tablespace");
2846 
2847  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2848  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2849  datname = PQgetvalue(res, 0, i_datname);
2850  dba = PQgetvalue(res, 0, i_dba);
2851  encoding = PQgetvalue(res, 0, i_encoding);
2852  collate = PQgetvalue(res, 0, i_collate);
2853  ctype = PQgetvalue(res, 0, i_ctype);
2854  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2855  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2856  datacl = PQgetvalue(res, 0, i_datacl);
2857  rdatacl = PQgetvalue(res, 0, i_rdatacl);
2858  datistemplate = PQgetvalue(res, 0, i_datistemplate);
2859  datconnlimit = PQgetvalue(res, 0, i_datconnlimit);
2860  tablespace = PQgetvalue(res, 0, i_tablespace);
2861 
2862  qdatname = pg_strdup(fmtId(datname));
2863 
2864  /*
2865  * Prepare the CREATE DATABASE command. We must specify encoding, locale,
2866  * and tablespace since those can't be altered later. Other DB properties
2867  * are left to the DATABASE PROPERTIES entry, so that they can be applied
2868  * after reconnecting to the target DB.
2869  */
2870  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2871  qdatname);
2872  if (strlen(encoding) > 0)
2873  {
2874  appendPQExpBufferStr(creaQry, " ENCODING = ");
2875  appendStringLiteralAH(creaQry, encoding, fout);
2876  }
2877  if (strlen(collate) > 0 && strcmp(collate, ctype) == 0)
2878  {
2879  appendPQExpBufferStr(creaQry, " LOCALE = ");
2880  appendStringLiteralAH(creaQry, collate, fout);
2881  }
2882  else
2883  {
2884  if (strlen(collate) > 0)
2885  {
2886  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2887  appendStringLiteralAH(creaQry, collate, fout);
2888  }
2889  if (strlen(ctype) > 0)
2890  {
2891  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2892  appendStringLiteralAH(creaQry, ctype, fout);
2893  }
2894  }
2895 
2896  /*
2897  * Note: looking at dopt->outputNoTablespaces here is completely the wrong
2898  * thing; the decision whether to specify a tablespace should be left till
2899  * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
2900  * label the DATABASE entry with the tablespace and let the normal
2901  * tablespace selection logic work ... but CREATE DATABASE doesn't pay
2902  * attention to default_tablespace, so that won't work.
2903  */
2904  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2905  !dopt->outputNoTablespaces)
2906  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2907  fmtId(tablespace));
2908  appendPQExpBufferStr(creaQry, ";\n");
2909 
2910  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2911  qdatname);
2912 
2913  dbDumpId = createDumpId();
2914 
2915  ArchiveEntry(fout,
2916  dbCatId, /* catalog ID */
2917  dbDumpId, /* dump ID */
2918  ARCHIVE_OPTS(.tag = datname,
2919  .owner = dba,
2920  .description = "DATABASE",
2921  .section = SECTION_PRE_DATA,
2922  .createStmt = creaQry->data,
2923  .dropStmt = delQry->data));
2924 
2925  /* Compute correct tag for archive entry */
2926  appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
2927 
2928  /* Dump DB comment if any */
2929  if (fout->remoteVersion >= 80200)
2930  {
2931  /*
2932  * 8.2 and up keep comments on shared objects in a shared table, so we
2933  * cannot use the dumpComment() code used for other database objects.
2934  * Be careful that the ArchiveEntry parameters match that function.
2935  */
2936  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2937 
2938  if (comment && *comment && !dopt->no_comments)
2939  {
2940  resetPQExpBuffer(dbQry);
2941 
2942  /*
2943  * Generates warning when loaded into a differently-named
2944  * database.
2945  */
2946  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
2947  appendStringLiteralAH(dbQry, comment, fout);
2948  appendPQExpBufferStr(dbQry, ";\n");
2949 
2950  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2951  ARCHIVE_OPTS(.tag = labelq->data,
2952  .owner = dba,
2953  .description = "COMMENT",
2954  .section = SECTION_NONE,
2955  .createStmt = dbQry->data,
2956  .deps = &dbDumpId,
2957  .nDeps = 1));
2958  }
2959  }
2960  else
2961  {
2962  dumpComment(fout, "DATABASE", qdatname, NULL, dba,
2963  dbCatId, 0, dbDumpId);
2964  }
2965 
2966  /* Dump DB security label, if enabled */
2967  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2968  {
2969  PGresult *shres;
2970  PQExpBuffer seclabelQry;
2971 
2972  seclabelQry = createPQExpBuffer();
2973 
2974  buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry);
2975  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2976  resetPQExpBuffer(seclabelQry);
2977  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2978  if (seclabelQry->len > 0)
2979  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2980  ARCHIVE_OPTS(.tag = labelq->data,
2981  .owner = dba,
2982  .description = "SECURITY LABEL",
2983  .section = SECTION_NONE,
2984  .createStmt = seclabelQry->data,
2985  .deps = &dbDumpId,
2986  .nDeps = 1));
2987  destroyPQExpBuffer(seclabelQry);
2988  PQclear(shres);
2989  }
2990 
2991  /*
2992  * Dump ACL if any. Note that we do not support initial privileges
2993  * (pg_init_privs) on databases.
2994  */
2995  dumpACL(fout, dbCatId, dbDumpId, "DATABASE",
2996  qdatname, NULL, NULL,
2997  dba, datacl, rdatacl, "", "");
2998 
2999  /*
3000  * Now construct a DATABASE PROPERTIES archive entry to restore any
3001  * non-default database-level properties. (The reason this must be
3002  * separate is that we cannot put any additional commands into the TOC
3003  * entry that has CREATE DATABASE. pg_restore would execute such a group
3004  * in an implicit transaction block, and the backend won't allow CREATE
3005  * DATABASE in that context.)
3006  */
3007  resetPQExpBuffer(creaQry);
3008  resetPQExpBuffer(delQry);
3009 
3010  if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
3011  appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
3012  qdatname, datconnlimit);
3013 
3014  if (strcmp(datistemplate, "t") == 0)
3015  {
3016  appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
3017  qdatname);
3018 
3019  /*
3020  * The backend won't accept DROP DATABASE on a template database. We
3021  * can deal with that by removing the template marking before the DROP
3022  * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
3023  * since no such command is currently supported, fake it with a direct
3024  * UPDATE on pg_database.
3025  */
3026  appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
3027  "SET datistemplate = false WHERE datname = ");
3028  appendStringLiteralAH(delQry, datname, fout);
3029  appendPQExpBufferStr(delQry, ";\n");
3030  }
3031 
3032  /* Add database-specific SET options */
3033  dumpDatabaseConfig(fout, creaQry, datname, dbCatId.oid);
3034 
3035  /*
3036  * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
3037  * entry, too, for lack of a better place.
3038  */
3039  if (dopt->binary_upgrade)
3040  {
3041  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
3042  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
3043  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
3044  "WHERE datname = ",
3045  frozenxid, minmxid);
3046  appendStringLiteralAH(creaQry, datname, fout);
3047  appendPQExpBufferStr(creaQry, ";\n");
3048  }
3049 
3050  if (creaQry->len > 0)
3051  ArchiveEntry(fout, nilCatalogId, createDumpId(),
3052  ARCHIVE_OPTS(.tag = datname,
3053  .owner = dba,
3054  .description = "DATABASE PROPERTIES",
3055  .section = SECTION_PRE_DATA,
3056  .createStmt = creaQry->data,
3057  .dropStmt = delQry->data,
3058  .deps = &dbDumpId));
3059 
3060  /*
3061  * pg_largeobject comes from the old system intact, so set its
3062  * relfrozenxids and relminmxids.
3063  */
3064  if (dopt->binary_upgrade)
3065  {
3066  PGresult *lo_res;
3067  PQExpBuffer loFrozenQry = createPQExpBuffer();
3068  PQExpBuffer loOutQry = createPQExpBuffer();
3069  int i_relfrozenxid,
3070  i_relminmxid;
3071 
3072  /*
3073  * pg_largeobject
3074  */
3075  if (fout->remoteVersion >= 90300)
3076  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
3077  "FROM pg_catalog.pg_class\n"
3078  "WHERE oid = %u;\n",
3079  LargeObjectRelationId);
3080  else
3081  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
3082  "FROM pg_catalog.pg_class\n"
3083  "WHERE oid = %u;\n",
3084  LargeObjectRelationId);
3085 
3086  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
3087 
3088  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
3089  i_relminmxid = PQfnumber(lo_res, "relminmxid");
3090 
3091  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
3092  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
3093  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
3094  "WHERE oid = %u;\n",
3095  atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
3096  atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
3097  LargeObjectRelationId);
3098  ArchiveEntry(fout, nilCatalogId, createDumpId(),
3099  ARCHIVE_OPTS(.tag = "pg_largeobject",
3100  .description = "pg_largeobject",
3101  .section = SECTION_PRE_DATA,
3102  .createStmt = loOutQry->data));
3103 
3104  PQclear(lo_res);
3105 
3106  destroyPQExpBuffer(loFrozenQry);
3107  destroyPQExpBuffer(loOutQry);
3108  }
3109 
3110  PQclear(res);
3111 
3112  free(qdatname);
3113  destroyPQExpBuffer(dbQry);
3114  destroyPQExpBuffer(delQry);
3115  destroyPQExpBuffer(creaQry);
3116  destroyPQExpBuffer(labelq);
3117 }
3118 
3119 /*
3120  * Collect any database-specific or role-and-database-specific SET options
3121  * for this database, and append them to outbuf.
3122  */
3123 static void
3125  const char *dbname, Oid dboid)
3126 {
3127  PGconn *conn = GetConnection(AH);
3129  PGresult *res;
3130  int count = 1;
3131 
3132  /*
3133  * First collect database-specific options. Pre-8.4 server versions lack
3134  * unnest(), so we do this the hard way by querying once per subscript.
3135  */
3136  for (;;)
3137  {
3138  if (AH->remoteVersion >= 90000)
3139  printfPQExpBuffer(buf, "SELECT setconfig[%d] FROM pg_db_role_setting "
3140  "WHERE setrole = 0 AND setdatabase = '%u'::oid",
3141  count, dboid);
3142  else
3143  printfPQExpBuffer(buf, "SELECT datconfig[%d] FROM pg_database WHERE oid = '%u'::oid", count, dboid);
3144 
3145  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3146 
3147  if (PQntuples(res) == 1 &&
3148  !PQgetisnull(res, 0, 0))
3149  {
3150  makeAlterConfigCommand(conn, PQgetvalue(res, 0, 0),
3151  "DATABASE", dbname, NULL, NULL,
3152  outbuf);
3153  PQclear(res);
3154  count++;
3155  }
3156  else
3157  {
3158  PQclear(res);
3159  break;
3160  }
3161  }
3162 
3163  /* Now look for role-and-database-specific options */
3164  if (AH->remoteVersion >= 90000)
3165  {
3166  /* Here we can assume we have unnest() */
3167  printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
3168  "FROM pg_db_role_setting s, pg_roles r "
3169  "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
3170  dboid);
3171 
3172  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3173 
3174  if (PQntuples(res) > 0)
3175  {
3176  int i;
3177 
3178  for (i = 0; i < PQntuples(res); i++)
3179  makeAlterConfigCommand(conn, PQgetvalue(res, i, 1),
3180  "ROLE", PQgetvalue(res, i, 0),
3181  "DATABASE", dbname,
3182  outbuf);
3183  }
3184 
3185  PQclear(res);
3186  }
3187 
3188  destroyPQExpBuffer(buf);
3189 }
3190 
3191 /*
3192  * dumpEncoding: put the correct encoding into the archive
3193  */
3194 static void
3196 {
3197  const char *encname = pg_encoding_to_char(AH->encoding);
3199 
3200  pg_log_info("saving encoding = %s", encname);
3201 
3202  appendPQExpBufferStr(qry, "SET client_encoding = ");
3203  appendStringLiteralAH(qry, encname, AH);
3204  appendPQExpBufferStr(qry, ";\n");
3205 
3206  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3207  ARCHIVE_OPTS(.tag = "ENCODING",
3208  .description = "ENCODING",
3209  .section = SECTION_PRE_DATA,
3210  .createStmt = qry->data));
3211 
3212  destroyPQExpBuffer(qry);
3213 }
3214 
3215 
3216 /*
3217  * dumpStdStrings: put the correct escape string behavior into the archive
3218  */
3219 static void
3221 {
3222  const char *stdstrings = AH->std_strings ? "on" : "off";
3224 
3225  pg_log_info("saving standard_conforming_strings = %s",
3226  stdstrings);
3227 
3228  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3229  stdstrings);
3230 
3231  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3232  ARCHIVE_OPTS(.tag = "STDSTRINGS",
3233  .description = "STDSTRINGS",
3234  .section = SECTION_PRE_DATA,
3235  .createStmt = qry->data));
3236 
3237  destroyPQExpBuffer(qry);
3238 }
3239 
3240 /*
3241  * dumpSearchPath: record the active search_path in the archive
3242  */
3243 static void
3245 {
3247  PQExpBuffer path = createPQExpBuffer();
3248  PGresult *res;
3249  char **schemanames = NULL;
3250  int nschemanames = 0;
3251  int i;
3252 
3253  /*
3254  * We use the result of current_schemas(), not the search_path GUC,
3255  * because that might contain wildcards such as "$user", which won't
3256  * necessarily have the same value during restore. Also, this way avoids
3257  * listing schemas that may appear in search_path but not actually exist,
3258  * which seems like a prudent exclusion.
3259  */
3260  res = ExecuteSqlQueryForSingleRow(AH,
3261  "SELECT pg_catalog.current_schemas(false)");
3262 
3263  if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3264  fatal("could not parse result of current_schemas()");
3265 
3266  /*
3267  * We use set_config(), not a simple "SET search_path" command, because
3268  * the latter has less-clean behavior if the search path is empty. While
3269  * that's likely to get fixed at some point, it seems like a good idea to
3270  * be as backwards-compatible as possible in what we put into archives.
3271  */
3272  for (i = 0; i < nschemanames; i++)
3273  {
3274  if (i > 0)
3275  appendPQExpBufferStr(path, ", ");
3276  appendPQExpBufferStr(path, fmtId(schemanames[i]));
3277  }
3278 
3279  appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3280  appendStringLiteralAH(qry, path->data, AH);
3281  appendPQExpBufferStr(qry, ", false);\n");
3282 
3283  pg_log_info("saving search_path = %s", path->data);
3284 
3285  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3286  ARCHIVE_OPTS(.tag = "SEARCHPATH",
3287  .description = "SEARCHPATH",
3288  .section = SECTION_PRE_DATA,
3289  .createStmt = qry->data));
3290 
3291  /* Also save it in AH->searchpath, in case we're doing plain text dump */
3292  AH->searchpath = pg_strdup(qry->data);
3293 
3294  if (schemanames)
3295  free(schemanames);
3296  PQclear(res);
3297  destroyPQExpBuffer(qry);
3298  destroyPQExpBuffer(path);
3299 }
3300 
3301 
3302 /*
3303  * getBlobs:
3304  * Collect schema-level data about large objects
3305  */
3306 static void
3308 {
3309  DumpOptions *dopt = fout->dopt;
3310  PQExpBuffer blobQry = createPQExpBuffer();
3311  BlobInfo *binfo;
3312  DumpableObject *bdata;
3313  PGresult *res;
3314  int ntups;
3315  int i;
3316  int i_oid;
3317  int i_lomowner;
3318  int i_lomacl;
3319  int i_rlomacl;
3320  int i_initlomacl;
3321  int i_initrlomacl;
3322 
3323  pg_log_info("reading large objects");
3324 
3325  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
3326  if (fout->remoteVersion >= 90600)
3327  {
3328  PQExpBuffer acl_subquery = createPQExpBuffer();
3329  PQExpBuffer racl_subquery = createPQExpBuffer();
3330  PQExpBuffer init_acl_subquery = createPQExpBuffer();
3331  PQExpBuffer init_racl_subquery = createPQExpBuffer();
3332 
3333  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
3334  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
3335  dopt->binary_upgrade);
3336 
3337  appendPQExpBuffer(blobQry,
3338  "SELECT l.oid, (%s l.lomowner) AS rolname, "
3339  "%s AS lomacl, "
3340  "%s AS rlomacl, "
3341  "%s AS initlomacl, "
3342  "%s AS initrlomacl "
3343  "FROM pg_largeobject_metadata l "
3344  "LEFT JOIN pg_init_privs pip ON "
3345  "(l.oid = pip.objoid "
3346  "AND pip.classoid = 'pg_largeobject'::regclass "
3347  "AND pip.objsubid = 0) ",
3349  acl_subquery->data,
3350  racl_subquery->data,
3351  init_acl_subquery->data,
3352  init_racl_subquery->data);
3353 
3354  destroyPQExpBuffer(acl_subquery);
3355  destroyPQExpBuffer(racl_subquery);
3356  destroyPQExpBuffer(init_acl_subquery);
3357  destroyPQExpBuffer(init_racl_subquery);
3358  }
3359  else if (fout->remoteVersion >= 90000)
3360  appendPQExpBuffer(blobQry,
3361  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
3362  "NULL AS rlomacl, NULL AS initlomacl, "
3363  "NULL AS initrlomacl "
3364  " FROM pg_largeobject_metadata",
3366  else
3367  appendPQExpBufferStr(blobQry,
3368  "SELECT DISTINCT loid AS oid, "
3369  "NULL::name AS rolname, NULL::oid AS lomacl, "
3370  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
3371  "NULL::oid AS initrlomacl "
3372  " FROM pg_largeobject");
3373 
3374  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
3375 
3376  i_oid = PQfnumber(res, "oid");
3377  i_lomowner = PQfnumber(res, "rolname");
3378  i_lomacl = PQfnumber(res, "lomacl");
3379  i_rlomacl = PQfnumber(res, "rlomacl");
3380  i_initlomacl = PQfnumber(res, "initlomacl");
3381  i_initrlomacl = PQfnumber(res, "initrlomacl");
3382 
3383  ntups = PQntuples(res);
3384 
3385  /*
3386  * Each large object has its own BLOB archive entry.
3387  */
3388  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
3389 
3390  for (i = 0; i < ntups; i++)
3391  {
3392  binfo[i].dobj.objType = DO_BLOB;
3393  binfo[i].dobj.catId.tableoid = LargeObjectRelationId;
3394  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3395  AssignDumpId(&binfo[i].dobj);
3396 
3397  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
3398  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
3399  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
3400  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
3401  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
3402  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
3403 
3404  if (PQgetisnull(res, i, i_lomacl) &&
3405  PQgetisnull(res, i, i_rlomacl) &&
3406  PQgetisnull(res, i, i_initlomacl) &&
3407  PQgetisnull(res, i, i_initrlomacl))
3408  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
3409 
3410  /*
3411  * In binary-upgrade mode for blobs, we do *not* dump out the blob
3412  * data, as it will be copied by pg_upgrade, which simply copies the
3413  * pg_largeobject table. We *do* however dump out anything but the
3414  * data, as pg_upgrade copies just pg_largeobject, but not
3415  * pg_largeobject_metadata, after the dump is restored.
3416  */
3417  if (dopt->binary_upgrade)
3418  binfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
3419  }
3420 
3421  /*
3422  * If we have any large objects, a "BLOBS" archive entry is needed. This
3423  * is just a placeholder for sorting; it carries no data now.
3424  */
3425  if (ntups > 0)
3426  {
3427  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
3428  bdata->objType = DO_BLOB_DATA;
3429  bdata->catId = nilCatalogId;
3430  AssignDumpId(bdata);
3431  bdata->name = pg_strdup("BLOBS");
3432  }
3433 
3434  PQclear(res);
3435  destroyPQExpBuffer(blobQry);
3436 }
3437 
3438 /*
3439  * dumpBlob
3440  *
3441  * dump the definition (metadata) of the given large object
3442  */
3443 static void
3444 dumpBlob(Archive *fout, BlobInfo *binfo)
3445 {
3446  PQExpBuffer cquery = createPQExpBuffer();
3447  PQExpBuffer dquery = createPQExpBuffer();
3448 
3449  appendPQExpBuffer(cquery,
3450  "SELECT pg_catalog.lo_create('%s');\n",
3451  binfo->dobj.name);
3452 
3453  appendPQExpBuffer(dquery,
3454  "SELECT pg_catalog.lo_unlink('%s');\n",
3455  binfo->dobj.name);
3456 
3457  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
3458  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
3459  ARCHIVE_OPTS(.tag = binfo->dobj.name,
3460  .owner = binfo->rolname,
3461  .description = "BLOB",
3462  .section = SECTION_PRE_DATA,
3463  .createStmt = cquery->data,
3464  .dropStmt = dquery->data));
3465 
3466  /* Dump comment if any */
3467  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3468  dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
3469  NULL, binfo->rolname,
3470  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3471 
3472  /* Dump security label if any */
3473  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3474  dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
3475  NULL, binfo->rolname,
3476  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3477 
3478  /* Dump ACL if any */
3479  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3480  dumpACL(fout, binfo->dobj.catId, binfo->dobj.dumpId, "LARGE OBJECT",
3481  binfo->dobj.name, NULL,
3482  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3483  binfo->initblobacl, binfo->initrblobacl);
3484 
3485  destroyPQExpBuffer(cquery);
3486  destroyPQExpBuffer(dquery);
3487 }
3488 
3489 /*
3490  * dumpBlobs:
3491  * dump the data contents of all large objects
3492  */
3493 static int
3494 dumpBlobs(Archive *fout, void *arg)
3495 {
3496  const char *blobQry;
3497  const char *blobFetchQry;
3498  PGconn *conn = GetConnection(fout);
3499  PGresult *res;
3500  char buf[LOBBUFSIZE];
3501  int ntups;
3502  int i;
3503  int cnt;
3504 
3505  pg_log_info("saving large objects");
3506 
3507  /*
3508  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3509  * the already-in-memory dumpable objects instead...
3510  */
3511  if (fout->remoteVersion >= 90000)
3512  blobQry =
3513  "DECLARE bloboid CURSOR FOR "
3514  "SELECT oid FROM pg_largeobject_metadata ORDER BY 1";
3515  else
3516  blobQry =
3517  "DECLARE bloboid CURSOR FOR "
3518  "SELECT DISTINCT loid FROM pg_largeobject ORDER BY 1";
3519 
3520  ExecuteSqlStatement(fout, blobQry);
3521 
3522  /* Command to fetch from cursor */
3523  blobFetchQry = "FETCH 1000 IN bloboid";
3524 
3525  do
3526  {
3527  /* Do a fetch */
3528  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3529 
3530  /* Process the tuples, if any */
3531  ntups = PQntuples(res);
3532  for (i = 0; i < ntups; i++)
3533  {
3534  Oid blobOid;
3535  int loFd;
3536 
3537  blobOid = atooid(PQgetvalue(res, i, 0));
3538  /* Open the BLOB */
3539  loFd = lo_open(conn, blobOid, INV_READ);
3540  if (loFd == -1)
3541  fatal("could not open large object %u: %s",
3542  blobOid, PQerrorMessage(conn));
3543 
3544  StartBlob(fout, blobOid);
3545 
3546  /* Now read it in chunks, sending data to archive */
3547  do
3548  {
3549  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3550  if (cnt < 0)
3551  fatal("error reading large object %u: %s",
3552  blobOid, PQerrorMessage(conn));
3553 
3554  WriteData(fout, buf, cnt);
3555  } while (cnt > 0);
3556 
3557  lo_close(conn, loFd);
3558 
3559  EndBlob(fout, blobOid);
3560  }
3561 
3562  PQclear(res);
3563  } while (ntups > 0);
3564 
3565  return 1;
3566 }
3567 
3568 /*
3569  * getPolicies
3570  * get information about policies on a dumpable table.
3571  */
3572 void
3573 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3574 {
3575  PQExpBuffer query;
3576  PGresult *res;
3577  PolicyInfo *polinfo;
3578  int i_oid;
3579  int i_tableoid;
3580  int i_polname;
3581  int i_polcmd;
3582  int i_polpermissive;
3583  int i_polroles;
3584  int i_polqual;
3585  int i_polwithcheck;
3586  int i,
3587  j,
3588  ntups;
3589 
3590  if (fout->remoteVersion < 90500)
3591  return;
3592 
3593  query = createPQExpBuffer();
3594 
3595  for (i = 0; i < numTables; i++)
3596  {
3597  TableInfo *tbinfo = &tblinfo[i];
3598 
3599  /* Ignore row security on tables not to be dumped */
3600  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3601  continue;
3602 
3603  pg_log_info("reading row security enabled for table \"%s.%s\"",
3604  tbinfo->dobj.namespace->dobj.name,
3605  tbinfo->dobj.name);
3606 
3607  /*
3608  * Get row security enabled information for the table. We represent
3609  * RLS being enabled on a table by creating a PolicyInfo object with
3610  * null polname.
3611  */
3612  if (tbinfo->rowsec)
3613  {
3614  /*
3615  * Note: use tableoid 0 so that this object won't be mistaken for
3616  * something that pg_depend entries apply to.
3617  */
3618  polinfo = pg_malloc(sizeof(PolicyInfo));
3619  polinfo->dobj.objType = DO_POLICY;
3620  polinfo->dobj.catId.tableoid = 0;
3621  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3622  AssignDumpId(&polinfo->dobj);
3623  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3624  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3625  polinfo->poltable = tbinfo;
3626  polinfo->polname = NULL;
3627  polinfo->polcmd = '\0';
3628  polinfo->polpermissive = 0;
3629  polinfo->polroles = NULL;
3630  polinfo->polqual = NULL;
3631  polinfo->polwithcheck = NULL;
3632  }
3633 
3634  pg_log_info("reading policies for table \"%s.%s\"",
3635  tbinfo->dobj.namespace->dobj.name,
3636  tbinfo->dobj.name);
3637 
3638  resetPQExpBuffer(query);
3639 
3640  /* Get the policies for the table. */
3641  if (fout->remoteVersion >= 100000)
3642  appendPQExpBuffer(query,
3643  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3644  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3645  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3646  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3647  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3648  "FROM pg_catalog.pg_policy pol "
3649  "WHERE polrelid = '%u'",
3650  tbinfo->dobj.catId.oid);
3651  else
3652  appendPQExpBuffer(query,
3653  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3654  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3655  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3656  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3657  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3658  "FROM pg_catalog.pg_policy pol "
3659  "WHERE polrelid = '%u'",
3660  tbinfo->dobj.catId.oid);
3661  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3662 
3663  ntups = PQntuples(res);
3664 
3665  if (ntups == 0)
3666  {
3667  /*
3668  * No explicit policies to handle (only the default-deny policy,
3669  * which is handled as part of the table definition). Clean up
3670  * and return.
3671  */
3672  PQclear(res);
3673  continue;
3674  }
3675 
3676  i_oid = PQfnumber(res, "oid");
3677  i_tableoid = PQfnumber(res, "tableoid");
3678  i_polname = PQfnumber(res, "polname");
3679  i_polcmd = PQfnumber(res, "polcmd");
3680  i_polpermissive = PQfnumber(res, "polpermissive");
3681  i_polroles = PQfnumber(res, "polroles");
3682  i_polqual = PQfnumber(res, "polqual");
3683  i_polwithcheck = PQfnumber(res, "polwithcheck");
3684 
3685  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3686 
3687  for (j = 0; j < ntups; j++)
3688  {
3689  polinfo[j].dobj.objType = DO_POLICY;
3690  polinfo[j].dobj.catId.tableoid =
3691  atooid(PQgetvalue(res, j, i_tableoid));
3692  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3693  AssignDumpId(&polinfo[j].dobj);
3694  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3695  polinfo[j].poltable = tbinfo;
3696  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3697  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3698 
3699  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3700  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3701 
3702  if (PQgetisnull(res, j, i_polroles))
3703  polinfo[j].polroles = NULL;
3704  else
3705  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3706 
3707  if (PQgetisnull(res, j, i_polqual))
3708  polinfo[j].polqual = NULL;
3709  else
3710  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3711 
3712  if (PQgetisnull(res, j, i_polwithcheck))
3713  polinfo[j].polwithcheck = NULL;
3714  else
3715  polinfo[j].polwithcheck
3716  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3717  }
3718  PQclear(res);
3719  }
3720  destroyPQExpBuffer(query);
3721 }
3722 
3723 /*
3724  * dumpPolicy
3725  * dump the definition of the given policy
3726  */
3727 static void
3729 {
3730  DumpOptions *dopt = fout->dopt;
3731  TableInfo *tbinfo = polinfo->poltable;
3732  PQExpBuffer query;
3733  PQExpBuffer delqry;
3734  PQExpBuffer polprefix;
3735  char *qtabname;
3736  const char *cmd;
3737  char *tag;
3738 
3739  if (dopt->dataOnly)
3740  return;
3741 
3742  /*
3743  * If polname is NULL, then this record is just indicating that ROW LEVEL
3744  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3745  * ROW LEVEL SECURITY.
3746  */
3747  if (polinfo->polname == NULL)
3748  {
3749  query = createPQExpBuffer();
3750 
3751  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3752  fmtQualifiedDumpable(tbinfo));
3753 
3754  /*
3755  * We must emit the ROW SECURITY object's dependency on its table
3756  * explicitly, because it will not match anything in pg_depend (unlike
3757  * the case for other PolicyInfo objects).
3758  */
3759  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3760  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3761  ARCHIVE_OPTS(.tag = polinfo->dobj.name,
3762  .namespace = polinfo->dobj.namespace->dobj.name,
3763  .owner = tbinfo->rolname,
3764  .description = "ROW SECURITY",
3765  .section = SECTION_POST_DATA,
3766  .createStmt = query->data,
3767  .deps = &(tbinfo->dobj.dumpId),
3768  .nDeps = 1));
3769 
3770  destroyPQExpBuffer(query);
3771  return;
3772  }
3773 
3774  if (polinfo->polcmd == '*')
3775  cmd = "";
3776  else if (polinfo->polcmd == 'r')
3777  cmd = " FOR SELECT";
3778  else if (polinfo->polcmd == 'a')
3779  cmd = " FOR INSERT";
3780  else if (polinfo->polcmd == 'w')
3781  cmd = " FOR UPDATE";
3782  else if (polinfo->polcmd == 'd')
3783  cmd = " FOR DELETE";
3784  else
3785  {
3786  pg_log_error("unexpected policy command type: %c",
3787  polinfo->polcmd);
3788  exit_nicely(1);
3789  }
3790 
3791  query = createPQExpBuffer();
3792  delqry = createPQExpBuffer();
3793  polprefix = createPQExpBuffer();
3794 
3795  qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
3796 
3797  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3798 
3799  appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
3800  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3801 
3802  if (polinfo->polroles != NULL)
3803  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3804 
3805  if (polinfo->polqual != NULL)
3806  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3807 
3808  if (polinfo->polwithcheck != NULL)
3809  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3810 
3811  appendPQExpBufferStr(query, ";\n");
3812 
3813  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3814  appendPQExpBuffer(delqry, " ON %s;\n", fmtQualifiedDumpable(tbinfo));
3815 
3816  appendPQExpBuffer(polprefix, "POLICY %s ON",
3817  fmtId(polinfo->polname));
3818 
3819  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3820 
3821  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3822  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3823  ARCHIVE_OPTS(.tag = tag,
3824  .namespace = polinfo->dobj.namespace->dobj.name,
3825  .owner = tbinfo->rolname,
3826  .description = "POLICY",
3827  .section = SECTION_POST_DATA,
3828  .createStmt = query->data,
3829  .dropStmt = delqry->data));
3830 
3831  if (polinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3832  dumpComment(fout, polprefix->data, qtabname,
3833  tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
3834  polinfo->dobj.catId, 0, polinfo->dobj.dumpId);
3835 
3836  free(tag);
3837  destroyPQExpBuffer(query);
3838  destroyPQExpBuffer(delqry);
3839  destroyPQExpBuffer(polprefix);
3840  free(qtabname);
3841 }
3842 
3843 /*
3844  * getPublications
3845  * get information about publications
3846  */
3847 void
3849 {
3850  DumpOptions *dopt = fout->dopt;
3851  PQExpBuffer query;
3852  PGresult *res;
3853  PublicationInfo *pubinfo;
3854  int i_tableoid;
3855  int i_oid;
3856  int i_pubname;
3857  int i_rolname;
3858  int i_puballtables;
3859  int i_pubinsert;
3860  int i_pubupdate;
3861  int i_pubdelete;
3862  int i_pubtruncate;
3863  int i_pubviaroot;
3864  int i,
3865  ntups;
3866 
3867  if (dopt->no_publications || fout->remoteVersion < 100000)
3868  return;
3869 
3870  query = createPQExpBuffer();
3871 
3872  resetPQExpBuffer(query);
3873 
3874  /* Get the publications. */
3875  if (fout->remoteVersion >= 130000)
3876  appendPQExpBuffer(query,
3877  "SELECT p.tableoid, p.oid, p.pubname, "
3878  "(%s p.pubowner) AS rolname, "
3879  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, p.pubviaroot "
3880  "FROM pg_publication p",
3882  else if (fout->remoteVersion >= 110000)
3883  appendPQExpBuffer(query,
3884  "SELECT p.tableoid, p.oid, p.pubname, "
3885  "(%s p.pubowner) AS rolname, "
3886  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, false AS pubviaroot "
3887  "FROM pg_publication p",
3889  else
3890  appendPQExpBuffer(query,
3891  "SELECT p.tableoid, p.oid, p.pubname, "
3892  "(%s p.pubowner) AS rolname, "
3893  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, false AS pubtruncate, false AS pubviaroot "
3894  "FROM pg_publication p",
3896 
3897  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3898 
3899  ntups = PQntuples(res);
3900 
3901  i_tableoid = PQfnumber(res, "tableoid");
3902  i_oid = PQfnumber(res, "oid");
3903  i_pubname = PQfnumber(res, "pubname");
3904  i_rolname = PQfnumber(res, "rolname");
3905  i_puballtables = PQfnumber(res, "puballtables");
3906  i_pubinsert = PQfnumber(res, "pubinsert");
3907  i_pubupdate = PQfnumber(res, "pubupdate");
3908  i_pubdelete = PQfnumber(res, "pubdelete");
3909  i_pubtruncate = PQfnumber(res, "pubtruncate");
3910  i_pubviaroot = PQfnumber(res, "pubviaroot");
3911 
3912  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3913 
3914  for (i = 0; i < ntups; i++)
3915  {
3916  pubinfo[i].dobj.objType = DO_PUBLICATION;
3917  pubinfo[i].dobj.catId.tableoid =
3918  atooid(PQgetvalue(res, i, i_tableoid));
3919  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3920  AssignDumpId(&pubinfo[i].dobj);
3921  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3922  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3923  pubinfo[i].puballtables =
3924  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3925  pubinfo[i].pubinsert =
3926  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3927  pubinfo[i].pubupdate =
3928  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3929  pubinfo[i].pubdelete =
3930  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3931  pubinfo[i].pubtruncate =
3932  (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
3933  pubinfo[i].pubviaroot =
3934  (strcmp(PQgetvalue(res, i, i_pubviaroot), "t") == 0);
3935 
3936  if (strlen(pubinfo[i].rolname) == 0)
3937  pg_log_warning("owner of publication \"%s\" appears to be invalid",
3938  pubinfo[i].dobj.name);
3939 
3940  /* Decide whether we want to dump it */
3941  selectDumpableObject(&(pubinfo[i].dobj), fout);
3942  }
3943  PQclear(res);
3944 
3945  destroyPQExpBuffer(query);
3946 }
3947 
3948 /*
3949  * dumpPublication
3950  * dump the definition of the given publication
3951  */
3952 static void
3954 {
3955  PQExpBuffer delq;
3956  PQExpBuffer query;
3957  char *qpubname;
3958  bool first = true;
3959 
3960  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3961  return;
3962 
3963  delq = createPQExpBuffer();
3964  query = createPQExpBuffer();
3965 
3966  qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
3967 
3968  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3969  qpubname);
3970 
3971  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3972  qpubname);
3973 
3974  if (pubinfo->puballtables)
3975  appendPQExpBufferStr(query, " FOR ALL TABLES");
3976 
3977  appendPQExpBufferStr(query, " WITH (publish = '");
3978  if (pubinfo->pubinsert)
3979  {
3980  appendPQExpBufferStr(query, "insert");
3981  first = false;
3982  }
3983 
3984  if (pubinfo->pubupdate)
3985  {
3986  if (!first)
3987  appendPQExpBufferStr(query, ", ");
3988 
3989  appendPQExpBufferStr(query, "update");
3990  first = false;
3991  }
3992 
3993  if (pubinfo->pubdelete)
3994  {
3995  if (!first)
3996  appendPQExpBufferStr(query, ", ");
3997 
3998  appendPQExpBufferStr(query, "delete");
3999  first = false;
4000  }
4001 
4002  if (pubinfo->pubtruncate)
4003  {
4004  if (!first)
4005  appendPQExpBufferStr(query, ", ");
4006 
4007  appendPQExpBufferStr(query, "truncate");
4008  first = false;
4009  }
4010 
4011  appendPQExpBufferStr(query, "'");
4012 
4013  if (pubinfo->pubviaroot)
4014  appendPQExpBufferStr(query, ", publish_via_partition_root = true");
4015 
4016  appendPQExpBufferStr(query, ");\n");
4017 
4018  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
4019  ARCHIVE_OPTS(.tag = pubinfo->dobj.name,
4020  .owner = pubinfo->rolname,
4021  .description = "PUBLICATION",
4022  .section = SECTION_POST_DATA,
4023  .createStmt = query->data,
4024  .dropStmt = delq->data));
4025 
4026  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4027  dumpComment(fout, "PUBLICATION", qpubname,
4028  NULL, pubinfo->rolname,
4029  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4030 
4031  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4032  dumpSecLabel(fout, "PUBLICATION", qpubname,
4033  NULL, pubinfo->rolname,
4034  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4035 
4036  destroyPQExpBuffer(delq);
4037  destroyPQExpBuffer(query);
4038  free(qpubname);
4039 }
4040 
4041 /*
4042  * getPublicationTables
4043  * get information about publication membership for dumpable tables.
4044  */
4045 void
4047 {
4048  PQExpBuffer query;
4049  PGresult *res;
4050  PublicationRelInfo *pubrinfo;
4051  DumpOptions *dopt = fout->dopt;
4052  int i_tableoid;
4053  int i_oid;
4054  int i_pubname;
4055  int i,
4056  j,
4057  ntups;
4058 
4059  if (dopt->no_publications || fout->remoteVersion < 100000)
4060  return;
4061 
4062  query = createPQExpBuffer();
4063 
4064  for (i = 0; i < numTables; i++)
4065  {
4066  TableInfo *tbinfo = &tblinfo[i];
4067 
4068  /*
4069  * Only regular and partitioned tables can be added to publications.
4070  */
4071  if (tbinfo->relkind != RELKIND_RELATION &&
4072  tbinfo->relkind != RELKIND_PARTITIONED_TABLE)
4073  continue;
4074 
4075  /*
4076  * Ignore publication membership of tables whose definitions are not
4077  * to be dumped.
4078  */
4079  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4080  continue;
4081 
4082  pg_log_info("reading publication membership for table \"%s.%s\"",
4083  tbinfo->dobj.namespace->dobj.name,
4084  tbinfo->dobj.name);
4085 
4086  resetPQExpBuffer(query);
4087 
4088  /* Get the publication membership for the table. */
4089  appendPQExpBuffer(query,
4090  "SELECT pr.tableoid, pr.oid, p.pubname "
4091  "FROM pg_publication_rel pr, pg_publication p "
4092  "WHERE pr.prrelid = '%u'"
4093  " AND p.oid = pr.prpubid",
4094  tbinfo->dobj.catId.oid);
4095  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4096 
4097  ntups = PQntuples(res);
4098 
4099  if (ntups == 0)
4100  {
4101  /*
4102  * Table is not member of any publications. Clean up and return.
4103  */
4104  PQclear(res);
4105  continue;
4106  }
4107 
4108  i_tableoid = PQfnumber(res, "tableoid");
4109  i_oid = PQfnumber(res, "oid");
4110  i_pubname = PQfnumber(res, "pubname");
4111 
4112  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
4113 
4114  for (j = 0; j < ntups; j++)
4115  {
4116  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
4117  pubrinfo[j].dobj.catId.tableoid =
4118  atooid(PQgetvalue(res, j, i_tableoid));
4119  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
4120  AssignDumpId(&pubrinfo[j].dobj);
4121  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4122  pubrinfo[j].dobj.name = tbinfo->dobj.name;
4123  pubrinfo[j].pubname = pg_strdup(PQgetvalue(res, j, i_pubname));
4124  pubrinfo[j].pubtable = tbinfo;
4125 
4126  /* Decide whether we want to dump it */
4127  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
4128  }
4129  PQclear(res);
4130  }
4131  destroyPQExpBuffer(query);
4132 }
4133 
4134 /*
4135  * dumpPublicationTable
4136  * dump the definition of the given publication table mapping
4137  */
4138 static void
4140 {
4141  TableInfo *tbinfo = pubrinfo->pubtable;
4142  PQExpBuffer query;
4143  char *tag;
4144 
4145  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4146  return;
4147 
4148  tag = psprintf("%s %s", pubrinfo->pubname, tbinfo->dobj.name);
4149 
4150  query = createPQExpBuffer();
4151 
4152  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
4153  fmtId(pubrinfo->pubname));
4154  appendPQExpBuffer(query, " %s;\n",
4155  fmtQualifiedDumpable(tbinfo));
4156 
4157  /*
4158  * There is no point in creating drop query as the drop is done by table
4159  * drop.
4160  */
4161  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
4162  ARCHIVE_OPTS(.tag = tag,
4163  .namespace = tbinfo->dobj.namespace->dobj.name,
4164  .description = "PUBLICATION TABLE",
4165  .section = SECTION_POST_DATA,
4166  .createStmt = query->data));
4167 
4168  free(tag);
4169  destroyPQExpBuffer(query);
4170 }
4171 
4172 /*
4173  * Is the currently connected user a superuser?
4174  */
4175 static bool
4177 {
4178  ArchiveHandle *AH = (ArchiveHandle *) fout;
4179  const char *val;
4180 
4181  val = PQparameterStatus(AH->connection, "is_superuser");
4182 
4183  if (val && strcmp(val, "on") == 0)
4184  return true;
4185 
4186  return false;
4187 }
4188 
4189 /*
4190  * getSubscriptions
4191  * get information about subscriptions
4192  */
4193 void
4195 {
4196  DumpOptions *dopt = fout->dopt;
4197  PQExpBuffer query;
4198  PGresult *res;
4199  SubscriptionInfo *subinfo;
4200  int i_tableoid;
4201  int i_oid;
4202  int i_subname;
4203  int i_rolname;
4204  int i_subconninfo;
4205  int i_subslotname;
4206  int i_subsynccommit;
4207  int i_subpublications;
4208  int i,
4209  ntups;
4210 
4211  if (dopt->no_subscriptions || fout->remoteVersion < 100000)
4212  return;
4213 
4214  if (!is_superuser(fout))
4215  {
4216  int n;
4217 
4218  res = ExecuteSqlQuery(fout,
4219  "SELECT count(*) FROM pg_subscription "
4220  "WHERE subdbid = (SELECT oid FROM pg_database"
4221  " WHERE datname = current_database())",
4222  PGRES_TUPLES_OK);
4223  n = atoi(PQgetvalue(res, 0, 0));
4224  if (n > 0)
4225  pg_log_warning("subscriptions not dumped because current user is not a superuser");
4226  PQclear(res);
4227  return;
4228  }
4229 
4230  query = createPQExpBuffer();
4231 
4232  resetPQExpBuffer(query);
4233 
4234  /* Get the subscriptions in current database. */
4235  appendPQExpBuffer(query,
4236  "SELECT s.tableoid, s.oid, s.subname,"
4237  "(%s s.subowner) AS rolname, "
4238  " s.subconninfo, s.subslotname, s.subsynccommit, "
4239  " s.subpublications "
4240  "FROM pg_subscription s "
4241  "WHERE s.subdbid = (SELECT oid FROM pg_database"
4242  " WHERE datname = current_database())",
4244  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4245 
4246  ntups = PQntuples(res);
4247 
4248  i_tableoid = PQfnumber(res, "tableoid");
4249  i_oid = PQfnumber(res, "oid");
4250  i_subname = PQfnumber(res, "subname");
4251  i_rolname = PQfnumber(res, "rolname");
4252  i_subconninfo = PQfnumber(res, "subconninfo");
4253  i_subslotname = PQfnumber(res, "subslotname");
4254  i_subsynccommit = PQfnumber(res, "subsynccommit");
4255  i_subpublications = PQfnumber(res, "subpublications");
4256 
4257  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
4258 
4259  for (i = 0; i < ntups; i++)
4260  {
4261  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
4262  subinfo[i].dobj.catId.tableoid =
4263  atooid(PQgetvalue(res, i, i_tableoid));
4264  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4265  AssignDumpId(&subinfo[i].dobj);
4266  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
4267  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4268  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
4269  if (PQgetisnull(res, i, i_subslotname))
4270  subinfo[i].subslotname = NULL;
4271  else
4272  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
4273  subinfo[i].subsynccommit =
4274  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
4275  subinfo[i].subpublications =
4276  pg_strdup(PQgetvalue(res, i, i_subpublications));
4277 
4278  if (strlen(subinfo[i].rolname) == 0)
4279  pg_log_warning("owner of subscription \"%s\" appears to be invalid",
4280  subinfo[i].dobj.name);
4281 
4282  /* Decide whether we want to dump it */
4283  selectDumpableObject(&(subinfo[i].dobj), fout);
4284  }
4285  PQclear(res);
4286 
4287  destroyPQExpBuffer(query);
4288 }
4289 
4290 /*
4291  * dumpSubscription
4292  * dump the definition of the given subscription
4293  */
4294 static void
4296 {
4297  PQExpBuffer delq;
4298  PQExpBuffer query;
4299  PQExpBuffer publications;
4300  char *qsubname;
4301  char **pubnames = NULL;
4302  int npubnames = 0;
4303  int i;
4304 
4305  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4306  return;
4307 
4308  delq = createPQExpBuffer();
4309  query = createPQExpBuffer();
4310 
4311  qsubname = pg_strdup(fmtId(subinfo->dobj.name));
4312 
4313  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
4314  qsubname);
4315 
4316  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
4317  qsubname);
4318  appendStringLiteralAH(query, subinfo->subconninfo, fout);
4319 
4320  /* Build list of quoted publications and append them to query. */
4321  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
4322  {
4323  pg_log_warning("could not parse subpublications array");
4324  if (pubnames)
4325  free(pubnames);
4326  pubnames = NULL;
4327  npubnames = 0;
4328  }
4329 
4330  publications = createPQExpBuffer();
4331  for (i = 0; i < npubnames; i++)
4332  {
4333  if (i > 0)
4334  appendPQExpBufferStr(publications, ", ");
4335 
4336  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
4337  }
4338 
4339  appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
4340  if (subinfo->subslotname)
4341  appendStringLiteralAH(query, subinfo->subslotname, fout);
4342  else
4343  appendPQExpBufferStr(query, "NONE");
4344 
4345  if (strcmp(subinfo->subsynccommit, "off") != 0)
4346  appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
4347 
4348  appendPQExpBufferStr(query, ");\n");
4349 
4350  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
4351  ARCHIVE_OPTS(.tag = subinfo->dobj.name,
4352  .owner = subinfo->rolname,
4353  .description = "SUBSCRIPTION",
4354  .section = SECTION_POST_DATA,
4355  .createStmt = query->data,
4356  .dropStmt = delq->data));
4357 
4358  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4359  dumpComment(fout, "SUBSCRIPTION", qsubname,
4360  NULL, subinfo->rolname,
4361  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4362 
4363  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4364  dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
4365  NULL, subinfo->rolname,
4366  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4367 
4368  destroyPQExpBuffer(publications);
4369  if (pubnames)
4370  free(pubnames);
4371 
4372  destroyPQExpBuffer(delq);
4373  destroyPQExpBuffer(query);
4374  free(qsubname);
4375 }
4376 
4377 /*
4378  * Given a "create query", append as many ALTER ... DEPENDS ON EXTENSION as
4379  * the object needs.
4380  */
4381 static void
4383  PQExpBuffer create,
4384  DumpableObject *dobj,
4385  const char *catalog,
4386  const char *keyword,
4387  const char *objname)
4388 {
4389  if (dobj->depends_on_ext)
4390  {
4391  char *nm;
4392  PGresult *res;
4393  PQExpBuffer query;
4394  int ntups;
4395  int i_extname;
4396  int i;
4397 
4398  /* dodge fmtId() non-reentrancy */
4399  nm = pg_strdup(objname);
4400 
4401  query = createPQExpBuffer();
4402  appendPQExpBuffer(query,
4403  "SELECT e.extname "
4404  "FROM pg_catalog.pg_depend d, pg_catalog.pg_extension e "
4405  "WHERE d.refobjid = e.oid AND classid = '%s'::pg_catalog.regclass "
4406  "AND objid = '%u'::pg_catalog.oid AND deptype = 'x' "
4407  "AND refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass",
4408  catalog,
4409  dobj->catId.oid);
4410  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4411  ntups = PQntuples(res);
4412  i_extname = PQfnumber(res, "extname");
4413  for (i = 0; i < ntups; i++)
4414  {
4415  appendPQExpBuffer(create, "ALTER %s %s DEPENDS ON EXTENSION %s;\n",
4416  keyword, nm,
4417  fmtId(PQgetvalue(res, i, i_extname)));
4418  }
4419 
4420  PQclear(res);
4421  destroyPQExpBuffer(query);
4422  pg_free(nm);
4423  }
4424 }
4425 
4426 
4427 static void
4429  PQExpBuffer upgrade_buffer,
4430  Oid pg_type_oid,
4431  bool force_array_type)
4432 {
4433  PQExpBuffer upgrade_query = createPQExpBuffer();
4434  PGresult *res;
4435  Oid pg_type_array_oid;
4436 
4437  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
4438  appendPQExpBuffer(upgrade_buffer,
4439  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4440  pg_type_oid);
4441 
4442  /* we only support old >= 8.3 for binary upgrades */
4443  appendPQExpBuffer(upgrade_query,
4444  "SELECT typarray "
4445  "FROM pg_catalog.pg_type "
4446  "WHERE oid = '%u'::pg_catalog.oid;",
4447  pg_type_oid);
4448 
4449  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4450 
4451  pg_type_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
4452 
4453  PQclear(res);
4454 
4455  if (!OidIsValid(pg_type_array_oid) && force_array_type)
4456  {
4457  /*
4458  * If the old version didn't assign an array type, but the new version
4459  * does, we must select an unused type OID to assign. This currently
4460  * only happens for domains, when upgrading pre-v11 to v11 and up.
4461  *
4462  * Note: local state here is kind of ugly, but we must have some,
4463  * since we mustn't choose the same unused OID more than once.
4464  */
4465  static Oid next_possible_free_oid = FirstNormalObjectId;
4466  bool is_dup;
4467 
4468  do
4469  {
4470  ++next_possible_free_oid;
4471  printfPQExpBuffer(upgrade_query,
4472  "SELECT EXISTS(SELECT 1 "
4473  "FROM pg_catalog.pg_type "
4474  "WHERE oid = '%u'::pg_catalog.oid);",
4475  next_possible_free_oid);
4476  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4477  is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
4478  PQclear(res);
4479  } while (is_dup);
4480 
4481  pg_type_array_oid = next_possible_free_oid;
4482  }
4483 
4484  if (OidIsValid(pg_type_array_oid))
4485  {
4486  appendPQExpBufferStr(upgrade_buffer,
4487  "\n-- For binary upgrade, must preserve pg_type array oid\n");
4488  appendPQExpBuffer(upgrade_buffer,
4489  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4490  pg_type_array_oid);
4491  }
4492 
4493  destroyPQExpBuffer(upgrade_query);
4494 }
4495 
4496 static bool
4498  PQExpBuffer upgrade_buffer,
4499  Oid pg_rel_oid)
4500 {
4501  PQExpBuffer upgrade_query = createPQExpBuffer();
4502  PGresult *upgrade_res;
4503  Oid pg_type_oid;
4504  bool toast_set = false;
4505 
4506  /*
4507  * We only support old >= 8.3 for binary upgrades.
4508  *
4509  * We purposefully ignore toast OIDs for partitioned tables; the reason is
4510  * that versions 10 and 11 have them, but 12 does not, so emitting them
4511  * causes the upgrade to fail.
4512  */
4513  appendPQExpBuffer(upgrade_query,
4514  "SELECT c.reltype AS crel, t.reltype AS trel "
4515  "FROM pg_catalog.pg_class c "
4516  "LEFT JOIN pg_catalog.pg_class t ON "
4517  " (c.reltoastrelid = t.oid AND c.relkind <> '%c') "
4518  "WHERE c.oid = '%u'::pg_catalog.oid;",
4519  RELKIND_PARTITIONED_TABLE, pg_rel_oid);
4520 
4521  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4522 
4523  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
4524 
4525  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
4526  pg_type_oid, false);
4527 
4528  if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
4529  {
4530  /* Toast tables do not have pg_type array rows */
4531  Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
4532  PQfnumber(upgrade_res, "trel")));
4533 
4534  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
4535  appendPQExpBuffer(upgrade_buffer,
4536  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4537  pg_type_toast_oid);
4538 
4539  toast_set = true;
4540  }
4541 
4542  PQclear(upgrade_res);
4543  destroyPQExpBuffer(upgrade_query);
4544 
4545  return toast_set;
4546 }
4547 
4548 static void
4550  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
4551  bool is_index)
4552 {
4553  PQExpBuffer upgrade_query = createPQExpBuffer();
4554  PGresult *upgrade_res;
4555  Oid pg_class_reltoastrelid;
4556  Oid pg_index_indexrelid;
4557 
4558  appendPQExpBuffer(upgrade_query,
4559  "SELECT c.reltoastrelid, i.indexrelid "
4560  "FROM pg_catalog.pg_class c LEFT JOIN "
4561  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
4562  "WHERE c.oid = '%u'::pg_catalog.oid;",
4563  pg_class_oid);
4564 
4565  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4566 
4567  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastrelid")));
4568  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
4569 
4570  appendPQExpBufferStr(upgrade_buffer,
4571  "\n-- For binary upgrade, must preserve pg_class oids\n");
4572 
4573  if (!is_index)
4574  {
4575  appendPQExpBuffer(upgrade_buffer,
4576  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
4577  pg_class_oid);
4578  /* only tables have toast tables, not indexes */
4579  if (OidIsValid(pg_class_reltoastrelid))
4580  {
4581  /*
4582  * One complexity is that the table definition might not require
4583  * the creation of a TOAST table, and the TOAST table might have
4584  * been created long after table creation, when the table was
4585  * loaded with wide data. By setting the TOAST oid we force
4586  * creation of the TOAST heap and TOAST index by the backend so we
4587  * can cleanly copy the files during binary upgrade.
4588  */
4589 
4590  appendPQExpBuffer(upgrade_buffer,
4591  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
4592  pg_class_reltoastrelid);
4593 
4594  /* every toast table has an index */
4595  appendPQExpBuffer(upgrade_buffer,
4596  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4597  pg_index_indexrelid);
4598  }
4599  }
4600  else
4601  appendPQExpBuffer(upgrade_buffer,
4602  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4603  pg_class_oid);
4604 
4605  appendPQExpBufferChar(upgrade_buffer, '\n');
4606 
4607  PQclear(upgrade_res);
4608  destroyPQExpBuffer(upgrade_query);
4609 }
4610 
4611 /*
4612  * If the DumpableObject is a member of an extension, add a suitable
4613  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4614  *
4615  * For somewhat historical reasons, objname should already be quoted,
4616  * but not objnamespace (if any).
4617  */
4618 static void
4620  DumpableObject *dobj,
4621  const char *objtype,
4622  const char *objname,
4623  const char *objnamespace)
4624 {
4625  DumpableObject *extobj = NULL;
4626  int i;
4627 
4628  if (!dobj->ext_member)
4629  return;
4630 
4631  /*
4632  * Find the parent extension. We could avoid this search if we wanted to
4633  * add a link field to DumpableObject, but the space costs of that would
4634  * be considerable. We assume that member objects could only have a
4635  * direct dependency on their own extension, not any others.
4636  */
4637  for (i = 0; i < dobj->nDeps; i++)
4638  {
4639  extobj = findObjectByDumpId(dobj->dependencies[i]);
4640  if (extobj && extobj->objType == DO_EXTENSION)
4641  break;
4642  extobj = NULL;
4643  }
4644  if (extobj == NULL)
4645  fatal("could not find parent extension for %s %s",
4646  objtype, objname);
4647 
4648  appendPQExpBufferStr(upgrade_buffer,
4649  "\n-- For binary upgrade, handle extension membership the hard way\n");
4650  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
4651  fmtId(extobj->name),
4652  objtype);
4653  if (objnamespace && *objnamespace)
4654  appendPQExpBuffer(upgrade_buffer, "%s.", fmtId(objnamespace));
4655  appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
4656 }
4657 
4658 /*
4659  * getNamespaces:
4660  * read all namespaces in the system catalogs and return them in the
4661  * NamespaceInfo* structure
4662  *
4663  * numNamespaces is set to the number of namespaces read in
4664  */
4665 NamespaceInfo *
4667 {
4668  DumpOptions *dopt = fout->dopt;
4669  PGresult *res;
4670  int ntups;
4671  int i;
4672  PQExpBuffer query;
4673  NamespaceInfo *nsinfo;
4674  int i_tableoid;
4675  int i_oid;
4676  int i_nspname;
4677  int i_rolname;
4678  int i_nspacl;
4679  int i_rnspacl;
4680  int i_initnspacl;
4681  int i_initrnspacl;
4682 
4683  query = createPQExpBuffer();
4684 
4685  /*
4686  * we fetch all namespaces including system ones, so that every object we
4687  * read in can be linked to a containing namespace.
4688  */
4689  if (fout->remoteVersion >= 90600)
4690  {
4691  PQExpBuffer acl_subquery = createPQExpBuffer();
4692  PQExpBuffer racl_subquery = createPQExpBuffer();
4693  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4694  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4695 
4696  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4697  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4698  dopt->binary_upgrade);
4699 
4700  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4701  "(%s nspowner) AS rolname, "
4702  "%s as nspacl, "
4703  "%s as rnspacl, "
4704  "%s as initnspacl, "
4705  "%s as initrnspacl "
4706  "FROM pg_namespace n "
4707  "LEFT JOIN pg_init_privs pip "
4708  "ON (n.oid = pip.objoid "
4709  "AND pip.classoid = 'pg_namespace'::regclass "
4710  "AND pip.objsubid = 0",
4712  acl_subquery->data,
4713  racl_subquery->data,
4714  init_acl_subquery->data,
4715  init_racl_subquery->data);
4716 
4717  appendPQExpBufferStr(query, ") ");
4718 
4719  destroyPQExpBuffer(acl_subquery);
4720  destroyPQExpBuffer(racl_subquery);
4721  destroyPQExpBuffer(init_acl_subquery);
4722  destroyPQExpBuffer(init_racl_subquery);
4723  }
4724  else
4725  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4726  "(%s nspowner) AS rolname, "
4727  "nspacl, NULL as rnspacl, "
4728  "NULL AS initnspacl, NULL as initrnspacl "
4729  "FROM pg_namespace",
4731 
4732  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4733 
4734  ntups = PQntuples(res);
4735 
4736  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4737 
4738  i_tableoid = PQfnumber(res, "tableoid");
4739  i_oid = PQfnumber(res, "oid");
4740  i_nspname = PQfnumber(res, "nspname");
4741  i_rolname = PQfnumber(res, "rolname");
4742  i_nspacl = PQfnumber(res, "nspacl");
4743  i_rnspacl = PQfnumber(res, "rnspacl");
4744  i_initnspacl = PQfnumber(res, "initnspacl");
4745  i_initrnspacl = PQfnumber(res, "initrnspacl");
4746 
4747  for (i = 0; i < ntups; i++)
4748  {
4749  nsinfo[i].dobj.objType = DO_NAMESPACE;
4750  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4751  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4752  AssignDumpId(&nsinfo[i].dobj);
4753  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4754  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4755  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4756  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4757  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4758  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4759 
4760  /* Decide whether to dump this namespace */
4761  selectDumpableNamespace(&nsinfo[i], fout);
4762 
4763  /*
4764  * Do not try to dump ACL if the ACL is empty or the default.
4765  *
4766  * This is useful because, for some schemas/objects, the only
4767  * component we are going to try and dump is the ACL and if we can
4768  * remove that then 'dump' goes to zero/false and we don't consider
4769  * this object for dumping at all later on.
4770  */
4771  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4772  PQgetisnull(res, i, i_initnspacl) &&
4773  PQgetisnull(res, i, i_initrnspacl))
4774  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4775 
4776  if (strlen(nsinfo[i].rolname) == 0)
4777  pg_log_warning("owner of schema \"%s\" appears to be invalid",
4778  nsinfo[i].dobj.name);
4779  }
4780 
4781  PQclear(res);
4782  destroyPQExpBuffer(query);
4783 
4784  *numNamespaces = ntups;
4785 
4786  return nsinfo;
4787 }
4788 
4789 /*
4790  * findNamespace:
4791  * given a namespace OID, look up the info read by getNamespaces
4792  */
4793 static NamespaceInfo *
4795 {
4796  NamespaceInfo *nsinfo;
4797 
4798  nsinfo = findNamespaceByOid(nsoid);
4799  if (nsinfo == NULL)
4800  fatal("schema with OID %u does not exist", nsoid);
4801  return nsinfo;
4802 }
4803 
4804 /*
4805  * getExtensions:
4806  * read all extensions in the system catalogs and return them in the
4807  * ExtensionInfo* structure
4808  *
4809  * numExtensions is set to the number of extensions read in
4810  */
4811 ExtensionInfo *
4813 {
4814  DumpOptions *dopt = fout->dopt;
4815  PGresult *res;
4816  int ntups;
4817  int i;
4818  PQExpBuffer query;
4819  ExtensionInfo *extinfo;
4820  int i_tableoid;
4821  int i_oid;
4822  int i_extname;
4823  int i_nspname;
4824  int i_extrelocatable;
4825  int i_extversion;
4826  int i_extconfig;
4827  int i_extcondition;
4828 
4829  /*
4830  * Before 9.1, there are no extensions.
4831  */
4832  if (fout->remoteVersion < 90100)
4833  {
4834  *numExtensions = 0;
4835  return NULL;
4836  }
4837 
4838  query = createPQExpBuffer();
4839 
4840  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4841  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4842  "FROM pg_extension x "
4843  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4844 
4845  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4846 
4847  ntups = PQntuples(res);
4848 
4849  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4850 
4851  i_tableoid = PQfnumber(res, "tableoid");
4852  i_oid = PQfnumber(res, "oid");
4853  i_extname = PQfnumber(res, "extname");
4854  i_nspname = PQfnumber(res, "nspname");
4855  i_extrelocatable = PQfnumber(res, "extrelocatable");
4856  i_extversion = PQfnumber(res, "extversion");
4857  i_extconfig = PQfnumber(res, "extconfig");
4858  i_extcondition = PQfnumber(res, "extcondition");
4859 
4860  for (i = 0; i < ntups; i++)
4861  {
4862  extinfo[i].dobj.objType = DO_EXTENSION;
4863  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4864  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4865  AssignDumpId(&extinfo[i].dobj);
4866  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4867  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4868  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4869  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4870  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4871  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4872 
4873  /* Decide whether we want to dump it */
4874  selectDumpableExtension(&(extinfo[i]), dopt);
4875  }
4876 
4877  PQclear(res);
4878  destroyPQExpBuffer(query);
4879 
4880  *numExtensions = ntups;
4881 
4882  return extinfo;
4883 }
4884 
4885 /*
4886  * getTypes:
4887  * read all types in the system catalogs and return them in the
4888  * TypeInfo* structure
4889  *
4890  * numTypes is set to the number of types read in
4891  *
4892  * NB: this must run after getFuncs() because we assume we can do
4893  * findFuncByOid().
4894  */
4895 TypeInfo *
4897 {
4898  DumpOptions *dopt = fout->dopt;
4899  PGresult *res;
4900  int ntups;
4901  int i;
4902  PQExpBuffer query = createPQExpBuffer();
4903  TypeInfo *tyinfo;
4904  ShellTypeInfo *stinfo;
4905  int i_tableoid;
4906  int i_oid;
4907  int i_typname;
4908  int i_typnamespace;
4909  int i_typacl;
4910  int i_rtypacl;
4911  int i_inittypacl;
4912  int i_initrtypacl;
4913  int i_rolname;
4914  int i_typelem;
4915  int i_typrelid;
4916  int i_typrelkind;
4917  int i_typtype;
4918  int i_typisdefined;
4919  int i_isarray;
4920 
4921  /*
4922  * we include even the built-in types because those may be used as array
4923  * elements by user-defined types
4924  *
4925  * we filter out the built-in types when we dump out the types
4926  *
4927  * same approach for undefined (shell) types and array types
4928  *
4929  * Note: as of 8.3 we can reliably detect whether a type is an
4930  * auto-generated array type by checking the element type's typarray.
4931  * (Before that the test is capable of generating false positives.) We
4932  * still check for name beginning with '_', though, so as to avoid the
4933  * cost of the subselect probe for all standard types. This would have to
4934  * be revisited if the backend ever allows renaming of array types.
4935  */
4936 
4937  if (fout->remoteVersion >= 90600)
4938  {
4939  PQExpBuffer acl_subquery = createPQExpBuffer();
4940  PQExpBuffer racl_subquery = createPQExpBuffer();
4941  PQExpBuffer initacl_subquery = createPQExpBuffer();
4942  PQExpBuffer initracl_subquery = createPQExpBuffer();
4943 
4944  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
4945  initracl_subquery, "t.typacl", "t.typowner", "'T'",
4946  dopt->binary_upgrade);
4947 
4948  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
4949  "t.typnamespace, "
4950  "%s AS typacl, "
4951  "%s AS rtypacl, "
4952  "%s AS inittypacl, "
4953  "%s AS initrtypacl, "
4954  "(%s t.typowner) AS rolname, "
4955  "t.typelem, t.typrelid, "
4956  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
4957  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
4958  "t.typtype, t.typisdefined, "
4959  "t.typname[0] = '_' AND t.typelem != 0 AND "
4960  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
4961  "FROM pg_type t "
4962  "LEFT JOIN pg_init_privs pip ON "
4963  "(t.oid = pip.objoid "
4964  "AND pip.classoid = 'pg_type'::regclass "
4965  "AND pip.objsubid = 0) ",
4966  acl_subquery->data,
4967  racl_subquery->data,
4968  initacl_subquery->data,
4969  initracl_subquery->data,
4971 
4972  destroyPQExpBuffer(acl_subquery);
4973  destroyPQExpBuffer(racl_subquery);
4974  destroyPQExpBuffer(initacl_subquery);
4975  destroyPQExpBuffer(initracl_subquery);
4976  }
4977  else if (fout->remoteVersion >= 90200)
4978  {
4979  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4980  "typnamespace, typacl, NULL as rtypacl, "
4981  "NULL AS inittypacl, NULL AS initrtypacl, "
4982  "(%s typowner) AS rolname, "
4983  "typelem, typrelid, "
4984  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4985  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4986  "typtype, typisdefined, "
4987  "typname[0] = '_' AND typelem != 0 AND "
4988  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4989  "FROM pg_type",
4991  }
4992  else if (fout->remoteVersion >= 80300)
4993  {
4994  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4995  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4996  "NULL AS inittypacl, NULL AS initrtypacl, "
4997  "(%s typowner) AS rolname, "
4998  "typelem, typrelid, "
4999  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
5000  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
5001  "typtype, typisdefined, "
5002  "typname[0] = '_' AND typelem != 0 AND "
5003  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
5004  "FROM pg_type",
5006  }
5007  else
5008  {
5009  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
5010  "typnamespace, NULL AS typacl, NULL as rtypacl, "
5011  "NULL AS inittypacl, NULL AS initrtypacl, "
5012  "(%s typowner) AS rolname, "
5013  "typelem, typrelid, "
5014  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
5015  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
5016  "typtype, typisdefined, "
5017  "typname[0] = '_' AND typelem != 0 AS isarray "
5018  "FROM pg_type",
5020  }
5021 
5022  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5023 
5024  ntups = PQntuples(res);
5025 
5026  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
5027 
5028  i_tableoid = PQfnumber(res, "tableoid");
5029  i_oid = PQfnumber(res, "oid");
5030  i_typname = PQfnumber(res, "typname");
5031  i_typnamespace = PQfnumber(res, "typnamespace");
5032  i_typacl = PQfnumber(res, "typacl");
5033  i_rtypacl = PQfnumber(res, "rtypacl");
5034  i_inittypacl = PQfnumber(res, "inittypacl");
5035  i_initrtypacl = PQfnumber(res, "initrtypacl");
5036  i_rolname = PQfnumber(res, "rolname");
5037  i_typelem = PQfnumber(res, "typelem");
5038  i_typrelid = PQfnumber(res, "typrelid");
5039  i_typrelkind = PQfnumber(res, "typrelkind");
5040  i_typtype = PQfnumber(res, "typtype");
5041  i_typisdefined = PQfnumber(res, "typisdefined");
5042  i_isarray = PQfnumber(res, "isarray");
5043 
5044  for (i = 0; i < ntups; i++)
5045  {
5046  tyinfo[i].dobj.objType = DO_TYPE;
5047  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5048  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5049  AssignDumpId(&tyinfo[i].dobj);
5050  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
5051  tyinfo[i].dobj.namespace =
5052  findNamespace(fout,
5053  atooid(PQgetvalue(res, i, i_typnamespace)));
5054  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5055  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
5056  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
5057  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
5058  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
5059  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
5060  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
5061  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
5062  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
5063  tyinfo[i].shellType = NULL;
5064 
5065  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
5066  tyinfo[i].isDefined = true;
5067  else
5068  tyinfo[i].isDefined = false;
5069 
5070  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
5071  tyinfo[i].isArray = true;
5072  else
5073  tyinfo[i].isArray = false;
5074 
5075  /* Decide whether we want to dump it */
5076  selectDumpableType(&tyinfo[i], fout);
5077 
5078  /* Do not try to dump ACL if no ACL exists. */
5079  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
5080  PQgetisnull(res, i, i_inittypacl) &&
5081  PQgetisnull(res, i, i_initrtypacl))
5082  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5083 
5084  /*
5085  * If it's a domain, fetch info about its constraints, if any
5086  */
5087  tyinfo[i].nDomChecks = 0;
5088  tyinfo[i].domChecks = NULL;
5089  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
5090  tyinfo[i].typtype == TYPTYPE_DOMAIN)
5091  getDomainConstraints(fout, &(tyinfo[i]));
5092 
5093  /*
5094  * If it's a base type, make a DumpableObject representing a shell
5095  * definition of the type. We will need to dump that ahead of the I/O
5096  * functions for the type. Similarly, range types need a shell
5097  * definition in case they have a canonicalize function.
5098  *
5099  * Note: the shell type doesn't have a catId. You might think it
5100  * should copy the base type's catId, but then it might capture the
5101  * pg_depend entries for the type, which we don't want.
5102  */
5103  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
5104  (tyinfo[i].typtype == TYPTYPE_BASE ||
5105  tyinfo[i].typtype == TYPTYPE_RANGE))
5106  {
5107  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
5108  stinfo->dobj.objType = DO_SHELL_TYPE;
5109  stinfo->dobj.catId = nilCatalogId;
5110  AssignDumpId(&stinfo->dobj);
5111  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
5112  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
5113  stinfo->baseType = &(tyinfo[i]);
5114  tyinfo[i].shellType = stinfo;
5115 
5116  /*
5117  * Initially mark the shell type as not to be dumped. We'll only
5118  * dump it if the I/O or canonicalize functions need to be dumped;
5119  * this is taken care of while sorting dependencies.
5120  */
5121  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
5122  }
5123 
5124  if (strlen(tyinfo[i].rolname) == 0)
5125  pg_log_warning("owner of data type \"%s\" appears to be invalid",
5126  tyinfo[i].dobj.name);
5127  }
5128 
5129  *numTypes = ntups;
5130 
5131  PQclear(res);
5132 
5133  destroyPQExpBuffer(query);
5134 
5135  return tyinfo;
5136 }
5137 
5138 /*
5139  * getOperators:
5140  * read all operators in the system catalogs and return them in the
5141  * OprInfo* structure
5142  *
5143  * numOprs is set to the number of operators read in
5144  */
5145 OprInfo *
5146 getOperators(Archive *fout, int *numOprs)
5147 {
5148  PGresult *res;
5149  int ntups;
5150  int i;
5151  PQExpBuffer query = createPQExpBuffer();
5152  OprInfo *oprinfo;
5153  int i_tableoid;
5154  int i_oid;
5155  int i_oprname;
5156  int i_oprnamespace;
5157  int i_rolname;
5158  int i_oprkind;
5159  int i_oprcode;
5160 
5161  /*
5162  * find all operators, including builtin operators; we filter out
5163  * system-defined operators at dump-out time.
5164  */
5165 
5166  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
5167  "oprnamespace, "
5168  "(%s oprowner) AS rolname, "
5169  "oprkind, "
5170  "oprcode::oid AS oprcode "
5171  "FROM pg_operator",
5173 
5174  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5175 
5176  ntups = PQntuples(res);
5177  *numOprs = ntups;
5178 
5179  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
5180 
5181  i_tableoid = PQfnumber(res, "tableoid");
5182  i_oid = PQfnumber(res, "oid");
5183  i_oprname = PQfnumber(res, "oprname");
5184  i_oprnamespace = PQfnumber(res, "oprnamespace");
5185  i_rolname = PQfnumber(res, "rolname");
5186  i_oprkind = PQfnumber(res, "oprkind");
5187  i_oprcode = PQfnumber(res, "oprcode");
5188 
5189  for (i = 0; i < ntups; i++)
5190  {
5191  oprinfo[i].dobj.objType = DO_OPERATOR;
5192  oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5193  oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5194  AssignDumpId(&oprinfo[i].dobj);
5195  oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
5196  oprinfo[i].dobj.namespace =
5197  findNamespace(fout,
5198  atooid(PQgetvalue(res, i, i_oprnamespace)));
5199  oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5200  oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
5201  oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
5202 
5203  /* Decide whether we want to dump it */
5204  selectDumpableObject(&(oprinfo[i].dobj), fout);
5205 
5206  /* Operators do not currently have ACLs. */
5207  oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5208 
5209  if (strlen(oprinfo[i].rolname) == 0)
5210  pg_log_warning("owner of operator \"%s\" appears to be invalid",
5211  oprinfo[i].dobj.name);
5212  }
5213 
5214  PQclear(res);
5215 
5216  destroyPQExpBuffer(query);
5217 
5218  return oprinfo;
5219 }
5220 
5221 /*
5222  * getCollations:
5223  * read all collations in the system catalogs and return them in the
5224  * CollInfo* structure
5225  *
5226  * numCollations is set to the number of collations read in
5227  */
5228 CollInfo *
5230 {
5231  PGresult *res;
5232  int ntups;
5233  int i;
5234  PQExpBuffer query;
5235  CollInfo *collinfo;
5236  int i_tableoid;
5237  int i_oid;
5238  int i_collname;
5239  int i_collnamespace;
5240  int i_rolname;
5241 
5242  /* Collations didn't exist pre-9.1 */
5243  if (fout->remoteVersion < 90100)
5244  {
5245  *numCollations = 0;
5246  return NULL;
5247  }
5248 
5249  query = createPQExpBuffer();
5250 
5251  /*
5252  * find all collations, including builtin collations; we filter out
5253  * system-defined collations at dump-out time.
5254  */
5255 
5256  appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
5257  "collnamespace, "
5258  "(%s collowner) AS rolname "
5259  "FROM pg_collation",
5261 
5262  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5263 
5264  ntups = PQntuples(res);
5265  *numCollations = ntups;
5266 
5267  collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
5268 
5269  i_tableoid = PQfnumber(res, "tableoid");
5270  i_oid = PQfnumber(res, "oid");
5271  i_collname = PQfnumber(res, "collname");
5272  i_collnamespace = PQfnumber(res, "collnamespace");
5273  i_rolname = PQfnumber(res, "rolname");
5274 
5275  for (i = 0; i < ntups; i++)
5276  {
5277  collinfo[i].dobj.objType = DO_COLLATION;
5278  collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5279  collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5280  AssignDumpId(&collinfo[i].dobj);
5281  collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
5282  collinfo[i].dobj.namespace =
5283  findNamespace(fout,
5284  atooid(PQgetvalue(res, i, i_collnamespace)));
5285  collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5286 
5287  /* Decide whether we want to dump it */
5288  selectDumpableObject(&(collinfo[i].dobj), fout);
5289 
5290  /* Collations do not currently have ACLs. */
5291  collinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5292  }
5293 
5294  PQclear(res);
5295 
5296  destroyPQExpBuffer(query);
5297 
5298  return collinfo;
5299 }
5300 
5301 /*
5302  * getConversions:
5303  * read all conversions in the system catalogs and return them in the
5304  * ConvInfo* structure
5305  *
5306  * numConversions is set to the number of conversions read in
5307  */
5308 ConvInfo *