PostgreSQL Source Code  git master
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #include <limits.h>
37 #ifdef HAVE_TERMIOS_H
38 #include <termios.h>
39 #endif
40 
41 #include "access/attnum.h"
42 #include "access/sysattr.h"
43 #include "access/transam.h"
44 #include "catalog/pg_aggregate_d.h"
45 #include "catalog/pg_am_d.h"
46 #include "catalog/pg_attribute_d.h"
47 #include "catalog/pg_cast_d.h"
48 #include "catalog/pg_class_d.h"
49 #include "catalog/pg_collation_d.h"
50 #include "catalog/pg_default_acl_d.h"
51 #include "catalog/pg_largeobject_d.h"
52 #include "catalog/pg_largeobject_metadata_d.h"
53 #include "catalog/pg_proc_d.h"
54 #include "catalog/pg_trigger_d.h"
55 #include "catalog/pg_type_d.h"
56 #include "common/connect.h"
57 #include "dumputils.h"
58 #include "fe_utils/string_utils.h"
59 #include "getopt_long.h"
60 #include "libpq/libpq-fs.h"
61 #include "parallel.h"
62 #include "pg_backup_db.h"
63 #include "pg_backup_utils.h"
64 #include "pg_dump.h"
65 #include "storage/block.h"
66 
67 typedef struct
68 {
69  const char *descr; /* comment for an object */
70  Oid classoid; /* object class (catalog OID) */
71  Oid objoid; /* object OID */
72  int objsubid; /* subobject (table column #) */
73 } CommentItem;
74 
75 typedef struct
76 {
77  const char *provider; /* label provider of this security label */
78  const char *label; /* security label for an object */
79  Oid classoid; /* object class (catalog OID) */
80  Oid objoid; /* object OID */
81  int objsubid; /* subobject (table column #) */
82 } SecLabelItem;
83 
84 typedef enum OidOptions
85 {
89 } OidOptions;
90 
91 /* global decls */
92 static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
93 
94 /* subquery used to convert user ID (eg, datdba) to user name */
95 static const char *username_subquery;
96 
97 /*
98  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
99  * FirstNormalObjectId - 1.
100  */
101 static Oid g_last_builtin_oid; /* value of the last builtin oid */
102 
103 /* The specified names/patterns should to match at least one entity */
104 static int strict_names = 0;
105 
106 /*
107  * Object inclusion/exclusion lists
108  *
109  * The string lists record the patterns given by command-line switches,
110  * which we then convert to lists of OIDs of matching objects.
111  */
113 static SimpleOidList schema_include_oids = {NULL, NULL};
115 static SimpleOidList schema_exclude_oids = {NULL, NULL};
116 
118 static SimpleOidList table_include_oids = {NULL, NULL};
120 static SimpleOidList table_exclude_oids = {NULL, NULL};
122 static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
125 
126 static const CatalogId nilCatalogId = {0, 0};
127 
128 /* override for standard extra_float_digits setting */
129 static bool have_extra_float_digits = false;
131 
132 /*
133  * The default number of rows per INSERT when
134  * --inserts is specified without --rows-per-insert
135  */
136 #define DUMP_DEFAULT_ROWS_PER_INSERT 1
137 
138 /*
139  * Macro for producing quoted, schema-qualified name of a dumpable object.
140  */
141 #define fmtQualifiedDumpable(obj) \
142  fmtQualifiedId((obj)->dobj.namespace->dobj.name, \
143  (obj)->dobj.name)
144 
145 static void help(const char *progname);
146 static void setup_connection(Archive *AH,
147  const char *dumpencoding, const char *dumpsnapshot,
148  char *use_role);
150 static void expand_schema_name_patterns(Archive *fout,
151  SimpleStringList *patterns,
152  SimpleOidList *oids,
153  bool strict_names);
155  SimpleStringList *patterns,
156  SimpleOidList *oids);
157 static void expand_table_name_patterns(Archive *fout,
158  SimpleStringList *patterns,
159  SimpleOidList *oids,
160  bool strict_names);
161 static NamespaceInfo *findNamespace(Oid nsoid);
162 static void dumpTableData(Archive *fout, const TableDataInfo *tdinfo);
163 static void refreshMatViewData(Archive *fout, const TableDataInfo *tdinfo);
164 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
165 static void dumpComment(Archive *fout, const char *type, const char *name,
166  const char *namespace, const char *owner,
167  CatalogId catalogId, int subid, DumpId dumpId);
168 static int findComments(Archive *fout, Oid classoid, Oid objoid,
169  CommentItem **items);
170 static int collectComments(Archive *fout, CommentItem **items);
171 static void dumpSecLabel(Archive *fout, const char *type, const char *name,
172  const char *namespace, const char *owner,
173  CatalogId catalogId, int subid, DumpId dumpId);
174 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
175  SecLabelItem **items);
176 static int collectSecLabels(Archive *fout, SecLabelItem **items);
177 static void dumpDumpableObject(Archive *fout, const DumpableObject *dobj);
178 static void dumpNamespace(Archive *fout, const NamespaceInfo *nspinfo);
179 static void dumpExtension(Archive *fout, const ExtensionInfo *extinfo);
180 static void dumpType(Archive *fout, const TypeInfo *tyinfo);
181 static void dumpBaseType(Archive *fout, const TypeInfo *tyinfo);
182 static void dumpEnumType(Archive *fout, const TypeInfo *tyinfo);
183 static void dumpRangeType(Archive *fout, const TypeInfo *tyinfo);
184 static void dumpUndefinedType(Archive *fout, const TypeInfo *tyinfo);
185 static void dumpDomain(Archive *fout, const TypeInfo *tyinfo);
186 static void dumpCompositeType(Archive *fout, const TypeInfo *tyinfo);
187 static void dumpCompositeTypeColComments(Archive *fout, const TypeInfo *tyinfo);
188 static void dumpShellType(Archive *fout, const ShellTypeInfo *stinfo);
189 static void dumpProcLang(Archive *fout, const ProcLangInfo *plang);
190 static void dumpFunc(Archive *fout, const FuncInfo *finfo);
191 static void dumpCast(Archive *fout, const CastInfo *cast);
192 static void dumpTransform(Archive *fout, const TransformInfo *transform);
193 static void dumpOpr(Archive *fout, const OprInfo *oprinfo);
194 static void dumpAccessMethod(Archive *fout, const AccessMethodInfo *oprinfo);
195 static void dumpOpclass(Archive *fout, const OpclassInfo *opcinfo);
196 static void dumpOpfamily(Archive *fout, const OpfamilyInfo *opfinfo);
197 static void dumpCollation(Archive *fout, const CollInfo *collinfo);
198 static void dumpConversion(Archive *fout, const ConvInfo *convinfo);
199 static void dumpRule(Archive *fout, const RuleInfo *rinfo);
200 static void dumpAgg(Archive *fout, const AggInfo *agginfo);
201 static void dumpTrigger(Archive *fout, const TriggerInfo *tginfo);
202 static void dumpEventTrigger(Archive *fout, const EventTriggerInfo *evtinfo);
203 static void dumpTable(Archive *fout, const TableInfo *tbinfo);
204 static void dumpTableSchema(Archive *fout, const TableInfo *tbinfo);
205 static void dumpTableAttach(Archive *fout, const TableAttachInfo *tbinfo);
206 static void dumpAttrDef(Archive *fout, const AttrDefInfo *adinfo);
207 static void dumpSequence(Archive *fout, const TableInfo *tbinfo);
208 static void dumpSequenceData(Archive *fout, const TableDataInfo *tdinfo);
209 static void dumpIndex(Archive *fout, const IndxInfo *indxinfo);
210 static void dumpIndexAttach(Archive *fout, const IndexAttachInfo *attachinfo);
211 static void dumpStatisticsExt(Archive *fout, const StatsExtInfo *statsextinfo);
212 static void dumpConstraint(Archive *fout, const ConstraintInfo *coninfo);
213 static void dumpTableConstraintComment(Archive *fout, const ConstraintInfo *coninfo);
214 static void dumpTSParser(Archive *fout, const TSParserInfo *prsinfo);
215 static void dumpTSDictionary(Archive *fout, const TSDictInfo *dictinfo);
216 static void dumpTSTemplate(Archive *fout, const TSTemplateInfo *tmplinfo);
217 static void dumpTSConfig(Archive *fout, const TSConfigInfo *cfginfo);
218 static void dumpForeignDataWrapper(Archive *fout, const FdwInfo *fdwinfo);
219 static void dumpForeignServer(Archive *fout, const ForeignServerInfo *srvinfo);
220 static void dumpUserMappings(Archive *fout,
221  const char *servername, const char *namespace,
222  const char *owner, CatalogId catalogId, DumpId dumpId);
223 static void dumpDefaultACL(Archive *fout, const DefaultACLInfo *daclinfo);
224 
225 static DumpId dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
226  const char *type, const char *name, const char *subname,
227  const char *nspname, const char *owner,
228  const char *acls, const char *racls,
229  const char *initacls, const char *initracls);
230 
231 static void getDependencies(Archive *fout);
232 static void BuildArchiveDependencies(Archive *fout);
233 static void findDumpableDependencies(ArchiveHandle *AH, const DumpableObject *dobj,
234  DumpId **dependencies, int *nDeps, int *allocDeps);
235 
237 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
238  DumpableObject *boundaryObjs);
239 
240 static void addConstrChildIdxDeps(DumpableObject *dobj, const IndxInfo *refidx);
241 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
242 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
243 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo);
244 static void buildMatViewRefreshDependencies(Archive *fout);
245 static void getTableDataFKConstraints(void);
246 static char *format_function_arguments(const FuncInfo *finfo, const char *funcargs,
247  bool is_agg);
248 static char *format_function_arguments_old(Archive *fout,
249  const FuncInfo *finfo, int nallargs,
250  char **allargtypes,
251  char **argmodes,
252  char **argnames);
253 static char *format_function_signature(Archive *fout,
254  const FuncInfo *finfo, bool honor_quotes);
255 static char *convertRegProcReference(const char *proc);
256 static char *getFormattedOperatorName(const char *oproid);
257 static char *convertTSFunction(Archive *fout, Oid funcOid);
258 static Oid findLastBuiltinOid_V71(Archive *fout);
259 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
260 static void getBlobs(Archive *fout);
261 static void dumpBlob(Archive *fout, const BlobInfo *binfo);
262 static int dumpBlobs(Archive *fout, const void *arg);
263 static void dumpPolicy(Archive *fout, const PolicyInfo *polinfo);
264 static void dumpPublication(Archive *fout, const PublicationInfo *pubinfo);
265 static void dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo);
266 static void dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo);
267 static void dumpDatabase(Archive *AH);
268 static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
269  const char *dbname, Oid dboid);
270 static void dumpEncoding(Archive *AH);
271 static void dumpStdStrings(Archive *AH);
272 static void dumpSearchPath(Archive *AH);
274  PQExpBuffer upgrade_buffer,
275  Oid pg_type_oid,
276  bool force_array_type,
277  bool include_multirange_type);
279  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
280 static void binary_upgrade_set_pg_class_oids(Archive *fout,
281  PQExpBuffer upgrade_buffer,
282  Oid pg_class_oid, bool is_index);
283 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
284  const DumpableObject *dobj,
285  const char *objtype,
286  const char *objname,
287  const char *objnamespace);
288 static const char *getAttrName(int attrnum, const TableInfo *tblInfo);
289 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
290 static bool nonemptyReloptions(const char *reloptions);
291 static void appendIndexCollationVersion(PQExpBuffer buffer, const IndxInfo *indxinfo,
292  int enc, bool coll_unknown,
293  Archive *fout);
294 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
295  const char *prefix, Archive *fout);
296 static char *get_synchronized_snapshot(Archive *fout);
297 static void setupDumpWorker(Archive *AHX);
298 static TableInfo *getRootTableInfo(const TableInfo *tbinfo);
299 
300 
301 int
302 main(int argc, char **argv)
303 {
304  int c;
305  const char *filename = NULL;
306  const char *format = "p";
307  TableInfo *tblinfo;
308  int numTables;
309  DumpableObject **dobjs;
310  int numObjs;
311  DumpableObject *boundaryObjs;
312  int i;
313  int optindex;
314  char *endptr;
315  RestoreOptions *ropt;
316  Archive *fout; /* the script file */
317  bool g_verbose = false;
318  const char *dumpencoding = NULL;
319  const char *dumpsnapshot = NULL;
320  char *use_role = NULL;
321  long rowsPerInsert;
322  int numWorkers = 1;
323  int compressLevel = -1;
324  int plainText = 0;
325  ArchiveFormat archiveFormat = archUnknown;
326  ArchiveMode archiveMode;
327 
328  static DumpOptions dopt;
329 
330  static struct option long_options[] = {
331  {"data-only", no_argument, NULL, 'a'},
332  {"blobs", no_argument, NULL, 'b'},
333  {"no-blobs", no_argument, NULL, 'B'},
334  {"clean", no_argument, NULL, 'c'},
335  {"create", no_argument, NULL, 'C'},
336  {"dbname", required_argument, NULL, 'd'},
337  {"file", required_argument, NULL, 'f'},
338  {"format", required_argument, NULL, 'F'},
339  {"host", required_argument, NULL, 'h'},
340  {"jobs", 1, NULL, 'j'},
341  {"no-reconnect", no_argument, NULL, 'R'},
342  {"no-owner", no_argument, NULL, 'O'},
343  {"port", required_argument, NULL, 'p'},
344  {"schema", required_argument, NULL, 'n'},
345  {"exclude-schema", required_argument, NULL, 'N'},
346  {"schema-only", no_argument, NULL, 's'},
347  {"superuser", required_argument, NULL, 'S'},
348  {"table", required_argument, NULL, 't'},
349  {"exclude-table", required_argument, NULL, 'T'},
350  {"no-password", no_argument, NULL, 'w'},
351  {"password", no_argument, NULL, 'W'},
352  {"username", required_argument, NULL, 'U'},
353  {"verbose", no_argument, NULL, 'v'},
354  {"no-privileges", no_argument, NULL, 'x'},
355  {"no-acl", no_argument, NULL, 'x'},
356  {"compress", required_argument, NULL, 'Z'},
357  {"encoding", required_argument, NULL, 'E'},
358  {"help", no_argument, NULL, '?'},
359  {"version", no_argument, NULL, 'V'},
360 
361  /*
362  * the following options don't have an equivalent short option letter
363  */
364  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
365  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
366  {"column-inserts", no_argument, &dopt.column_inserts, 1},
367  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
368  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
369  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
370  {"exclude-table-data", required_argument, NULL, 4},
371  {"extra-float-digits", required_argument, NULL, 8},
372  {"if-exists", no_argument, &dopt.if_exists, 1},
373  {"inserts", no_argument, NULL, 9},
374  {"lock-wait-timeout", required_argument, NULL, 2},
375  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
376  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
377  {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
378  {"role", required_argument, NULL, 3},
379  {"section", required_argument, NULL, 5},
380  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
381  {"snapshot", required_argument, NULL, 6},
382  {"strict-names", no_argument, &strict_names, 1},
383  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
384  {"no-comments", no_argument, &dopt.no_comments, 1},
385  {"no-publications", no_argument, &dopt.no_publications, 1},
386  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
387  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
388  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
389  {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
390  {"no-sync", no_argument, NULL, 7},
391  {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
392  {"rows-per-insert", required_argument, NULL, 10},
393  {"include-foreign-data", required_argument, NULL, 11},
394  {"index-collation-versions-unknown", no_argument, &dopt.coll_unknown, 1},
395 
396  {NULL, 0, NULL, 0}
397  };
398 
399  pg_logging_init(argv[0]);
401  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
402 
403  /*
404  * Initialize what we need for parallel execution, especially for thread
405  * support on Windows.
406  */
408 
409  progname = get_progname(argv[0]);
410 
411  if (argc > 1)
412  {
413  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
414  {
415  help(progname);
416  exit_nicely(0);
417  }
418  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
419  {
420  puts("pg_dump (PostgreSQL) " PG_VERSION);
421  exit_nicely(0);
422  }
423  }
424 
425  InitDumpOptions(&dopt);
426 
427  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxZ:",
428  long_options, &optindex)) != -1)
429  {
430  switch (c)
431  {
432  case 'a': /* Dump data only */
433  dopt.dataOnly = true;
434  break;
435 
436  case 'b': /* Dump blobs */
437  dopt.outputBlobs = true;
438  break;
439 
440  case 'B': /* Don't dump blobs */
441  dopt.dontOutputBlobs = true;
442  break;
443 
444  case 'c': /* clean (i.e., drop) schema prior to create */
445  dopt.outputClean = 1;
446  break;
447 
448  case 'C': /* Create DB */
449  dopt.outputCreateDB = 1;
450  break;
451 
452  case 'd': /* database name */
453  dopt.cparams.dbname = pg_strdup(optarg);
454  break;
455 
456  case 'E': /* Dump encoding */
457  dumpencoding = pg_strdup(optarg);
458  break;
459 
460  case 'f':
461  filename = pg_strdup(optarg);
462  break;
463 
464  case 'F':
465  format = pg_strdup(optarg);
466  break;
467 
468  case 'h': /* server host */
469  dopt.cparams.pghost = pg_strdup(optarg);
470  break;
471 
472  case 'j': /* number of dump jobs */
473  numWorkers = atoi(optarg);
474  break;
475 
476  case 'n': /* include schema(s) */
477  simple_string_list_append(&schema_include_patterns, optarg);
478  dopt.include_everything = false;
479  break;
480 
481  case 'N': /* exclude schema(s) */
482  simple_string_list_append(&schema_exclude_patterns, optarg);
483  break;
484 
485  case 'O': /* Don't reconnect to match owner */
486  dopt.outputNoOwner = 1;
487  break;
488 
489  case 'p': /* server port */
490  dopt.cparams.pgport = pg_strdup(optarg);
491  break;
492 
493  case 'R':
494  /* no-op, still accepted for backwards compatibility */
495  break;
496 
497  case 's': /* dump schema only */
498  dopt.schemaOnly = true;
499  break;
500 
501  case 'S': /* Username for superuser in plain text output */
503  break;
504 
505  case 't': /* include table(s) */
506  simple_string_list_append(&table_include_patterns, optarg);
507  dopt.include_everything = false;
508  break;
509 
510  case 'T': /* exclude table(s) */
511  simple_string_list_append(&table_exclude_patterns, optarg);
512  break;
513 
514  case 'U':
516  break;
517 
518  case 'v': /* verbose */
519  g_verbose = true;
521  break;
522 
523  case 'w':
525  break;
526 
527  case 'W':
529  break;
530 
531  case 'x': /* skip ACL dump */
532  dopt.aclsSkip = true;
533  break;
534 
535  case 'Z': /* Compression Level */
536  compressLevel = atoi(optarg);
537  if (compressLevel < 0 || compressLevel > 9)
538  {
539  pg_log_error("compression level must be in range 0..9");
540  exit_nicely(1);
541  }
542  break;
543 
544  case 0:
545  /* This covers the long options. */
546  break;
547 
548  case 2: /* lock-wait-timeout */
550  break;
551 
552  case 3: /* SET ROLE */
553  use_role = pg_strdup(optarg);
554  break;
555 
556  case 4: /* exclude table(s) data */
557  simple_string_list_append(&tabledata_exclude_patterns, optarg);
558  break;
559 
560  case 5: /* section */
562  break;
563 
564  case 6: /* snapshot */
565  dumpsnapshot = pg_strdup(optarg);
566  break;
567 
568  case 7: /* no-sync */
569  dosync = false;
570  break;
571 
572  case 8:
574  extra_float_digits = atoi(optarg);
575  if (extra_float_digits < -15 || extra_float_digits > 3)
576  {
577  pg_log_error("extra_float_digits must be in range -15..3");
578  exit_nicely(1);
579  }
580  break;
581 
582  case 9: /* inserts */
583 
584  /*
585  * dump_inserts also stores --rows-per-insert, careful not to
586  * overwrite that.
587  */
588  if (dopt.dump_inserts == 0)
590  break;
591 
592  case 10: /* rows per insert */
593  errno = 0;
594  rowsPerInsert = strtol(optarg, &endptr, 10);
595 
596  if (endptr == optarg || *endptr != '\0' ||
597  rowsPerInsert <= 0 || rowsPerInsert > INT_MAX ||
598  errno == ERANGE)
599  {
600  pg_log_error("rows-per-insert must be in range %d..%d",
601  1, INT_MAX);
602  exit_nicely(1);
603  }
604  dopt.dump_inserts = (int) rowsPerInsert;
605  break;
606 
607  case 11: /* include foreign data */
608  simple_string_list_append(&foreign_servers_include_patterns,
609  optarg);
610  break;
611 
612  default:
613  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
614  exit_nicely(1);
615  }
616  }
617 
618  /*
619  * Non-option argument specifies database name as long as it wasn't
620  * already specified with -d / --dbname
621  */
622  if (optind < argc && dopt.cparams.dbname == NULL)
623  dopt.cparams.dbname = argv[optind++];
624 
625  /* Complain if any arguments remain */
626  if (optind < argc)
627  {
628  pg_log_error("too many command-line arguments (first is \"%s\")",
629  argv[optind]);
630  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
631  progname);
632  exit_nicely(1);
633  }
634 
635  /* --column-inserts implies --inserts */
636  if (dopt.column_inserts && dopt.dump_inserts == 0)
638 
639  /*
640  * Binary upgrade mode implies dumping sequence data even in schema-only
641  * mode. This is not exposed as a separate option, but kept separate
642  * internally for clarity.
643  */
644  if (dopt.binary_upgrade)
645  dopt.sequence_data = 1;
646 
647  if (dopt.dataOnly && dopt.schemaOnly)
648  {
649  pg_log_error("options -s/--schema-only and -a/--data-only cannot be used together");
650  exit_nicely(1);
651  }
652 
653  if (dopt.schemaOnly && foreign_servers_include_patterns.head != NULL)
654  fatal("options -s/--schema-only and --include-foreign-data cannot be used together");
655 
656  if (numWorkers > 1 && foreign_servers_include_patterns.head != NULL)
657  fatal("option --include-foreign-data is not supported with parallel backup");
658 
659  if (dopt.dataOnly && dopt.outputClean)
660  {
661  pg_log_error("options -c/--clean and -a/--data-only cannot be used together");
662  exit_nicely(1);
663  }
664 
665  if (dopt.if_exists && !dopt.outputClean)
666  fatal("option --if-exists requires option -c/--clean");
667 
668  /*
669  * --inserts are already implied above if --column-inserts or
670  * --rows-per-insert were specified.
671  */
672  if (dopt.do_nothing && dopt.dump_inserts == 0)
673  fatal("option --on-conflict-do-nothing requires option --inserts, --rows-per-insert, or --column-inserts");
674 
675  /* Identify archive format to emit */
676  archiveFormat = parseArchiveFormat(format, &archiveMode);
677 
678  /* archiveFormat specific setup */
679  if (archiveFormat == archNull)
680  plainText = 1;
681 
682  /* Custom and directory formats are compressed by default, others not */
683  if (compressLevel == -1)
684  {
685 #ifdef HAVE_LIBZ
686  if (archiveFormat == archCustom || archiveFormat == archDirectory)
687  compressLevel = Z_DEFAULT_COMPRESSION;
688  else
689 #endif
690  compressLevel = 0;
691  }
692 
693 #ifndef HAVE_LIBZ
694  if (compressLevel != 0)
695  pg_log_warning("requested compression not available in this installation -- archive will be uncompressed");
696  compressLevel = 0;
697 #endif
698 
699  /*
700  * If emitting an archive format, we always want to emit a DATABASE item,
701  * in case --create is specified at pg_restore time.
702  */
703  if (!plainText)
704  dopt.outputCreateDB = 1;
705 
706  /*
707  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
708  * parallel jobs because that's the maximum limit for the
709  * WaitForMultipleObjects() call.
710  */
711  if (numWorkers <= 0
712 #ifdef WIN32
713  || numWorkers > MAXIMUM_WAIT_OBJECTS
714 #endif
715  )
716  fatal("invalid number of parallel jobs");
717 
718  /* Parallel backup only in the directory archive format so far */
719  if (archiveFormat != archDirectory && numWorkers > 1)
720  fatal("parallel backup only supported by the directory format");
721 
722  /* Unknown collation versions only relevant in binary upgrade mode */
723  if (dopt.coll_unknown && !dopt.binary_upgrade)
724  fatal("option --index-collation-versions-unknown only works in binary upgrade mode");
725 
726  /* Open the output file */
727  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
728  archiveMode, setupDumpWorker);
729 
730  /* Make dump options accessible right away */
731  SetArchiveOptions(fout, &dopt, NULL);
732 
733  /* Register the cleanup hook */
734  on_exit_close_archive(fout);
735 
736  /* Let the archiver know how noisy to be */
737  fout->verbose = g_verbose;
738 
739 
740  /*
741  * We allow the server to be back to 8.0, and up to any minor release of
742  * our own major version. (See also version check in pg_dumpall.c.)
743  */
744  fout->minRemoteVersion = 80000;
745  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
746 
747  fout->numWorkers = numWorkers;
748 
749  /*
750  * Open the database using the Archiver, so it knows about it. Errors mean
751  * death.
752  */
753  ConnectDatabase(fout, &dopt.cparams, false);
754  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
755 
756  /*
757  * Disable security label support if server version < v9.1.x (prevents
758  * access to nonexistent pg_seclabel catalog)
759  */
760  if (fout->remoteVersion < 90100)
761  dopt.no_security_labels = 1;
762 
763  /*
764  * On hot standbys, never try to dump unlogged table data, since it will
765  * just throw an error.
766  */
767  if (fout->isStandby)
768  dopt.no_unlogged_table_data = true;
769 
770  /* Select the appropriate subquery to convert user IDs to names */
771  if (fout->remoteVersion >= 80100)
772  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
773  else
774  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
775 
776  /* check the version for the synchronized snapshots feature */
777  if (numWorkers > 1 && fout->remoteVersion < 90200
778  && !dopt.no_synchronized_snapshots)
779  fatal("Synchronized snapshots are not supported by this server version.\n"
780  "Run with --no-synchronized-snapshots instead if you do not need\n"
781  "synchronized snapshots.");
782 
783  /* check the version when a snapshot is explicitly specified by user */
784  if (dumpsnapshot && fout->remoteVersion < 90200)
785  fatal("Exported snapshots are not supported by this server version.");
786 
787  /*
788  * Find the last built-in OID, if needed (prior to 8.1)
789  *
790  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
791  */
792  if (fout->remoteVersion < 80100)
794  else
796 
797  pg_log_info("last built-in OID is %u", g_last_builtin_oid);
798 
799  /* Expand schema selection patterns into OID lists */
800  if (schema_include_patterns.head != NULL)
801  {
802  expand_schema_name_patterns(fout, &schema_include_patterns,
803  &schema_include_oids,
804  strict_names);
805  if (schema_include_oids.head == NULL)
806  fatal("no matching schemas were found");
807  }
808  expand_schema_name_patterns(fout, &schema_exclude_patterns,
809  &schema_exclude_oids,
810  false);
811  /* non-matching exclusion patterns aren't an error */
812 
813  /* Expand table selection patterns into OID lists */
814  if (table_include_patterns.head != NULL)
815  {
816  expand_table_name_patterns(fout, &table_include_patterns,
817  &table_include_oids,
818  strict_names);
819  if (table_include_oids.head == NULL)
820  fatal("no matching tables were found");
821  }
822  expand_table_name_patterns(fout, &table_exclude_patterns,
823  &table_exclude_oids,
824  false);
825 
826  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
827  &tabledata_exclude_oids,
828  false);
829 
830  expand_foreign_server_name_patterns(fout, &foreign_servers_include_patterns,
831  &foreign_servers_include_oids);
832 
833  /* non-matching exclusion patterns aren't an error */
834 
835  /*
836  * Dumping blobs is the default for dumps where an inclusion switch is not
837  * used (an "include everything" dump). -B can be used to exclude blobs
838  * from those dumps. -b can be used to include blobs even when an
839  * inclusion switch is used.
840  *
841  * -s means "schema only" and blobs are data, not schema, so we never
842  * include blobs when -s is used.
843  */
844  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
845  dopt.outputBlobs = true;
846 
847  /*
848  * Now scan the database and create DumpableObject structs for all the
849  * objects we intend to dump.
850  */
851  tblinfo = getSchemaData(fout, &numTables);
852 
853  if (fout->remoteVersion < 80400)
854  guessConstraintInheritance(tblinfo, numTables);
855 
856  if (!dopt.schemaOnly)
857  {
858  getTableData(&dopt, tblinfo, numTables, 0);
860  if (dopt.dataOnly)
862  }
863 
864  if (dopt.schemaOnly && dopt.sequence_data)
865  getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
866 
867  /*
868  * In binary-upgrade mode, we do not have to worry about the actual blob
869  * data or the associated metadata that resides in the pg_largeobject and
870  * pg_largeobject_metadata tables, respectively.
871  *
872  * However, we do need to collect blob information as there may be
873  * comments or other information on blobs that we do need to dump out.
874  */
875  if (dopt.outputBlobs || dopt.binary_upgrade)
876  getBlobs(fout);
877 
878  /*
879  * Collect dependency data to assist in ordering the objects.
880  */
881  getDependencies(fout);
882 
883  /* Lastly, create dummy objects to represent the section boundaries */
884  boundaryObjs = createBoundaryObjects();
885 
886  /* Get pointers to all the known DumpableObjects */
887  getDumpableObjects(&dobjs, &numObjs);
888 
889  /*
890  * Add dummy dependencies to enforce the dump section ordering.
891  */
892  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
893 
894  /*
895  * Sort the objects into a safe dump order (no forward references).
896  *
897  * We rely on dependency information to help us determine a safe order, so
898  * the initial sort is mostly for cosmetic purposes: we sort by name to
899  * ensure that logically identical schemas will dump identically.
900  */
901  sortDumpableObjectsByTypeName(dobjs, numObjs);
902 
903  sortDumpableObjects(dobjs, numObjs,
904  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
905 
906  /*
907  * Create archive TOC entries for all the objects to be dumped, in a safe
908  * order.
909  */
910 
911  /* First the special ENCODING, STDSTRINGS, and SEARCHPATH entries. */
912  dumpEncoding(fout);
913  dumpStdStrings(fout);
914  dumpSearchPath(fout);
915 
916  /* The database items are always next, unless we don't want them at all */
917  if (dopt.outputCreateDB)
918  dumpDatabase(fout);
919 
920  /* Now the rearrangeable objects. */
921  for (i = 0; i < numObjs; i++)
922  dumpDumpableObject(fout, dobjs[i]);
923 
924  /*
925  * Set up options info to ensure we dump what we want.
926  */
927  ropt = NewRestoreOptions();
928  ropt->filename = filename;
929 
930  /* if you change this list, see dumpOptionsFromRestoreOptions */
931  ropt->cparams.dbname = dopt.cparams.dbname ? pg_strdup(dopt.cparams.dbname) : NULL;
932  ropt->cparams.pgport = dopt.cparams.pgport ? pg_strdup(dopt.cparams.pgport) : NULL;
933  ropt->cparams.pghost = dopt.cparams.pghost ? pg_strdup(dopt.cparams.pghost) : NULL;
934  ropt->cparams.username = dopt.cparams.username ? pg_strdup(dopt.cparams.username) : NULL;
936  ropt->dropSchema = dopt.outputClean;
937  ropt->dataOnly = dopt.dataOnly;
938  ropt->schemaOnly = dopt.schemaOnly;
939  ropt->if_exists = dopt.if_exists;
940  ropt->column_inserts = dopt.column_inserts;
941  ropt->dumpSections = dopt.dumpSections;
942  ropt->aclsSkip = dopt.aclsSkip;
943  ropt->superuser = dopt.outputSuperuser;
944  ropt->createDB = dopt.outputCreateDB;
945  ropt->noOwner = dopt.outputNoOwner;
946  ropt->noTablespace = dopt.outputNoTablespaces;
947  ropt->disable_triggers = dopt.disable_triggers;
948  ropt->use_setsessauth = dopt.use_setsessauth;
950  ropt->dump_inserts = dopt.dump_inserts;
951  ropt->no_comments = dopt.no_comments;
952  ropt->no_publications = dopt.no_publications;
954  ropt->no_subscriptions = dopt.no_subscriptions;
955  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
958  ropt->sequence_data = dopt.sequence_data;
959  ropt->binary_upgrade = dopt.binary_upgrade;
960 
961  if (compressLevel == -1)
962  ropt->compression = 0;
963  else
964  ropt->compression = compressLevel;
965 
966  ropt->suppressDumpWarnings = true; /* We've already shown them */
967 
968  SetArchiveOptions(fout, &dopt, ropt);
969 
970  /* Mark which entries should be output */
972 
973  /*
974  * The archive's TOC entries are now marked as to which ones will actually
975  * be output, so we can set up their dependency lists properly. This isn't
976  * necessary for plain-text output, though.
977  */
978  if (!plainText)
980 
981  /*
982  * And finally we can do the actual output.
983  *
984  * Note: for non-plain-text output formats, the output file is written
985  * inside CloseArchive(). This is, um, bizarre; but not worth changing
986  * right now.
987  */
988  if (plainText)
989  RestoreArchive(fout);
990 
991  CloseArchive(fout);
992 
993  exit_nicely(0);
994 }
995 
996 
997 static void
998 help(const char *progname)
999 {
1000  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
1001  printf(_("Usage:\n"));
1002  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
1003 
1004  printf(_("\nGeneral options:\n"));
1005  printf(_(" -f, --file=FILENAME output file or directory name\n"));
1006  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
1007  " plain text (default))\n"));
1008  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
1009  printf(_(" -v, --verbose verbose mode\n"));
1010  printf(_(" -V, --version output version information, then exit\n"));
1011  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
1012  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
1013  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
1014  printf(_(" -?, --help show this help, then exit\n"));
1015 
1016  printf(_("\nOptions controlling the output content:\n"));
1017  printf(_(" -a, --data-only dump only the data, not the schema\n"));
1018  printf(_(" -b, --blobs include large objects in dump\n"));
1019  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
1020  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
1021  printf(_(" -C, --create include commands to create database in dump\n"));
1022  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
1023  printf(_(" -n, --schema=PATTERN dump the specified schema(s) only\n"));
1024  printf(_(" -N, --exclude-schema=PATTERN do NOT dump the specified schema(s)\n"));
1025  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
1026  " plain-text format\n"));
1027  printf(_(" -s, --schema-only dump only the schema, no data\n"));
1028  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
1029  printf(_(" -t, --table=PATTERN dump the specified table(s) only\n"));
1030  printf(_(" -T, --exclude-table=PATTERN do NOT dump the specified table(s)\n"));
1031  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
1032  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
1033  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
1034  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
1035  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
1036  printf(_(" --enable-row-security enable row security (dump only content user has\n"
1037  " access to)\n"));
1038  printf(_(" --exclude-table-data=PATTERN do NOT dump data for the specified table(s)\n"));
1039  printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
1040  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
1041  printf(_(" --include-foreign-data=PATTERN\n"
1042  " include data of foreign tables on foreign\n"
1043  " servers matching PATTERN\n"));
1044  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
1045  printf(_(" --load-via-partition-root load partitions via the root table\n"));
1046  printf(_(" --no-comments do not dump comments\n"));
1047  printf(_(" --no-publications do not dump publications\n"));
1048  printf(_(" --no-security-labels do not dump security label assignments\n"));
1049  printf(_(" --no-subscriptions do not dump subscriptions\n"));
1050  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
1051  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
1052  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
1053  printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
1054  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
1055  printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
1056  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
1057  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
1058  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
1059  printf(_(" --strict-names require table and/or schema include patterns to\n"
1060  " match at least one entity each\n"));
1061  printf(_(" --use-set-session-authorization\n"
1062  " use SET SESSION AUTHORIZATION commands instead of\n"
1063  " ALTER OWNER commands to set ownership\n"));
1064 
1065  printf(_("\nConnection options:\n"));
1066  printf(_(" -d, --dbname=DBNAME database to dump\n"));
1067  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
1068  printf(_(" -p, --port=PORT database server port number\n"));
1069  printf(_(" -U, --username=NAME connect as specified database user\n"));
1070  printf(_(" -w, --no-password never prompt for password\n"));
1071  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
1072  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
1073 
1074  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
1075  "variable value is used.\n\n"));
1076  printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
1077  printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
1078 }
1079 
1080 static void
1081 setup_connection(Archive *AH, const char *dumpencoding,
1082  const char *dumpsnapshot, char *use_role)
1083 {
1084  DumpOptions *dopt = AH->dopt;
1085  PGconn *conn = GetConnection(AH);
1086  const char *std_strings;
1087 
1089 
1090  /*
1091  * Set the client encoding if requested.
1092  */
1093  if (dumpencoding)
1094  {
1095  if (PQsetClientEncoding(conn, dumpencoding) < 0)
1096  fatal("invalid client encoding \"%s\" specified",
1097  dumpencoding);
1098  }
1099 
1100  /*
1101  * Get the active encoding and the standard_conforming_strings setting, so
1102  * we know how to escape strings.
1103  */
1104  AH->encoding = PQclientEncoding(conn);
1105 
1106  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1107  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1108 
1109  /*
1110  * Set the role if requested. In a parallel dump worker, we'll be passed
1111  * use_role == NULL, but AH->use_role is already set (if user specified it
1112  * originally) and we should use that.
1113  */
1114  if (!use_role && AH->use_role)
1115  use_role = AH->use_role;
1116 
1117  /* Set the role if requested */
1118  if (use_role && AH->remoteVersion >= 80100)
1119  {
1120  PQExpBuffer query = createPQExpBuffer();
1121 
1122  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1123  ExecuteSqlStatement(AH, query->data);
1124  destroyPQExpBuffer(query);
1125 
1126  /* save it for possible later use by parallel workers */
1127  if (!AH->use_role)
1128  AH->use_role = pg_strdup(use_role);
1129  }
1130 
1131  /* Set the datestyle to ISO to ensure the dump's portability */
1132  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1133 
1134  /* Likewise, avoid using sql_standard intervalstyle */
1135  if (AH->remoteVersion >= 80400)
1136  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1137 
1138  /*
1139  * Use an explicitly specified extra_float_digits if it has been provided.
1140  * Otherwise, set extra_float_digits so that we can dump float data
1141  * exactly (given correctly implemented float I/O code, anyway).
1142  */
1144  {
1146 
1147  appendPQExpBuffer(q, "SET extra_float_digits TO %d",
1149  ExecuteSqlStatement(AH, q->data);
1150  destroyPQExpBuffer(q);
1151  }
1152  else if (AH->remoteVersion >= 90000)
1153  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1154  else
1155  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1156 
1157  /*
1158  * If synchronized scanning is supported, disable it, to prevent
1159  * unpredictable changes in row ordering across a dump and reload.
1160  */
1161  if (AH->remoteVersion >= 80300)
1162  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1163 
1164  /*
1165  * Disable timeouts if supported.
1166  */
1167  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1168  if (AH->remoteVersion >= 90300)
1169  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1170  if (AH->remoteVersion >= 90600)
1171  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1172 
1173  /*
1174  * Quote all identifiers, if requested.
1175  */
1176  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1177  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1178 
1179  /*
1180  * Adjust row-security mode, if supported.
1181  */
1182  if (AH->remoteVersion >= 90500)
1183  {
1184  if (dopt->enable_row_security)
1185  ExecuteSqlStatement(AH, "SET row_security = on");
1186  else
1187  ExecuteSqlStatement(AH, "SET row_security = off");
1188  }
1189 
1190  /*
1191  * Start transaction-snapshot mode transaction to dump consistent data.
1192  */
1193  ExecuteSqlStatement(AH, "BEGIN");
1194  if (AH->remoteVersion >= 90100)
1195  {
1196  /*
1197  * To support the combination of serializable_deferrable with the jobs
1198  * option we use REPEATABLE READ for the worker connections that are
1199  * passed a snapshot. As long as the snapshot is acquired in a
1200  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1201  * REPEATABLE READ transaction provides the appropriate integrity
1202  * guarantees. This is a kluge, but safe for back-patching.
1203  */
1204  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1206  "SET TRANSACTION ISOLATION LEVEL "
1207  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1208  else
1210  "SET TRANSACTION ISOLATION LEVEL "
1211  "REPEATABLE READ, READ ONLY");
1212  }
1213  else
1214  {
1216  "SET TRANSACTION ISOLATION LEVEL "
1217  "SERIALIZABLE, READ ONLY");
1218  }
1219 
1220  /*
1221  * If user specified a snapshot to use, select that. In a parallel dump
1222  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1223  * is already set (if the server can handle it) and we should use that.
1224  */
1225  if (dumpsnapshot)
1226  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1227 
1228  if (AH->sync_snapshot_id)
1229  {
1230  PQExpBuffer query = createPQExpBuffer();
1231 
1232  appendPQExpBufferStr(query, "SET TRANSACTION SNAPSHOT ");
1233  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1234  ExecuteSqlStatement(AH, query->data);
1235  destroyPQExpBuffer(query);
1236  }
1237  else if (AH->numWorkers > 1 &&
1238  AH->remoteVersion >= 90200 &&
1240  {
1241  if (AH->isStandby && AH->remoteVersion < 100000)
1242  fatal("Synchronized snapshots on standby servers are not supported by this server version.\n"
1243  "Run with --no-synchronized-snapshots instead if you do not need\n"
1244  "synchronized snapshots.");
1245 
1246 
1248  }
1249 }
1250 
1251 /* Set up connection for a parallel worker process */
1252 static void
1254 {
1255  /*
1256  * We want to re-select all the same values the leader connection is
1257  * using. We'll have inherited directly-usable values in
1258  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1259  * inherited encoding value back to a string to pass to setup_connection.
1260  */
1261  setup_connection(AH,
1263  NULL,
1264  NULL);
1265 }
1266 
1267 static char *
1269 {
1270  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1271  char *result;
1272  PGresult *res;
1273 
1274  res = ExecuteSqlQueryForSingleRow(fout, query);
1275  result = pg_strdup(PQgetvalue(res, 0, 0));
1276  PQclear(res);
1277 
1278  return result;
1279 }
1280 
1281 static ArchiveFormat
1283 {
1284  ArchiveFormat archiveFormat;
1285 
1286  *mode = archModeWrite;
1287 
1288  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1289  {
1290  /* This is used by pg_dumpall, and is not documented */
1291  archiveFormat = archNull;
1292  *mode = archModeAppend;
1293  }
1294  else if (pg_strcasecmp(format, "c") == 0)
1295  archiveFormat = archCustom;
1296  else if (pg_strcasecmp(format, "custom") == 0)
1297  archiveFormat = archCustom;
1298  else if (pg_strcasecmp(format, "d") == 0)
1299  archiveFormat = archDirectory;
1300  else if (pg_strcasecmp(format, "directory") == 0)
1301  archiveFormat = archDirectory;
1302  else if (pg_strcasecmp(format, "p") == 0)
1303  archiveFormat = archNull;
1304  else if (pg_strcasecmp(format, "plain") == 0)
1305  archiveFormat = archNull;
1306  else if (pg_strcasecmp(format, "t") == 0)
1307  archiveFormat = archTar;
1308  else if (pg_strcasecmp(format, "tar") == 0)
1309  archiveFormat = archTar;
1310  else
1311  fatal("invalid output format \"%s\" specified", format);
1312  return archiveFormat;
1313 }
1314 
1315 /*
1316  * Find the OIDs of all schemas matching the given list of patterns,
1317  * and append them to the given OID list.
1318  */
1319 static void
1321  SimpleStringList *patterns,
1322  SimpleOidList *oids,
1323  bool strict_names)
1324 {
1325  PQExpBuffer query;
1326  PGresult *res;
1327  SimpleStringListCell *cell;
1328  int i;
1329 
1330  if (patterns->head == NULL)
1331  return; /* nothing to do */
1332 
1333  query = createPQExpBuffer();
1334 
1335  /*
1336  * The loop below runs multiple SELECTs might sometimes result in
1337  * duplicate entries in the OID list, but we don't care.
1338  */
1339 
1340  for (cell = patterns->head; cell; cell = cell->next)
1341  {
1342  appendPQExpBufferStr(query,
1343  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1344  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1345  false, NULL, "n.nspname", NULL, NULL);
1346 
1347  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1348  if (strict_names && PQntuples(res) == 0)
1349  fatal("no matching schemas were found for pattern \"%s\"", cell->val);
1350 
1351  for (i = 0; i < PQntuples(res); i++)
1352  {
1353  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1354  }
1355 
1356  PQclear(res);
1357  resetPQExpBuffer(query);
1358  }
1359 
1360  destroyPQExpBuffer(query);
1361 }
1362 
1363 /*
1364  * Find the OIDs of all foreign servers matching the given list of patterns,
1365  * and append them to the given OID list.
1366  */
1367 static void
1369  SimpleStringList *patterns,
1370  SimpleOidList *oids)
1371 {
1372  PQExpBuffer query;
1373  PGresult *res;
1374  SimpleStringListCell *cell;
1375  int i;
1376 
1377  if (patterns->head == NULL)
1378  return; /* nothing to do */
1379 
1380  query = createPQExpBuffer();
1381 
1382  /*
1383  * The loop below runs multiple SELECTs might sometimes result in
1384  * duplicate entries in the OID list, but we don't care.
1385  */
1386 
1387  for (cell = patterns->head; cell; cell = cell->next)
1388  {
1389  appendPQExpBufferStr(query,
1390  "SELECT oid FROM pg_catalog.pg_foreign_server s\n");
1391  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1392  false, NULL, "s.srvname", NULL, NULL);
1393 
1394  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1395  if (PQntuples(res) == 0)
1396  fatal("no matching foreign servers were found for pattern \"%s\"", cell->val);
1397 
1398  for (i = 0; i < PQntuples(res); i++)
1399  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1400 
1401  PQclear(res);
1402  resetPQExpBuffer(query);
1403  }
1404 
1405  destroyPQExpBuffer(query);
1406 }
1407 
1408 /*
1409  * Find the OIDs of all tables matching the given list of patterns,
1410  * and append them to the given OID list. See also expand_dbname_patterns()
1411  * in pg_dumpall.c
1412  */
1413 static void
1415  SimpleStringList *patterns, SimpleOidList *oids,
1416  bool strict_names)
1417 {
1418  PQExpBuffer query;
1419  PGresult *res;
1420  SimpleStringListCell *cell;
1421  int i;
1422 
1423  if (patterns->head == NULL)
1424  return; /* nothing to do */
1425 
1426  query = createPQExpBuffer();
1427 
1428  /*
1429  * this might sometimes result in duplicate entries in the OID list, but
1430  * we don't care.
1431  */
1432 
1433  for (cell = patterns->head; cell; cell = cell->next)
1434  {
1435  /*
1436  * Query must remain ABSOLUTELY devoid of unqualified names. This
1437  * would be unnecessary given a pg_table_is_visible() variant taking a
1438  * search_path argument.
1439  */
1440  appendPQExpBuffer(query,
1441  "SELECT c.oid"
1442  "\nFROM pg_catalog.pg_class c"
1443  "\n LEFT JOIN pg_catalog.pg_namespace n"
1444  "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1445  "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1446  "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1447  RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
1448  RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
1449  RELKIND_PARTITIONED_TABLE);
1450  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1451  false, "n.nspname", "c.relname", NULL,
1452  "pg_catalog.pg_table_is_visible(c.oid)");
1453 
1454  ExecuteSqlStatement(fout, "RESET search_path");
1455  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1458  if (strict_names && PQntuples(res) == 0)
1459  fatal("no matching tables were found for pattern \"%s\"", cell->val);
1460 
1461  for (i = 0; i < PQntuples(res); i++)
1462  {
1463  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1464  }
1465 
1466  PQclear(res);
1467  resetPQExpBuffer(query);
1468  }
1469 
1470  destroyPQExpBuffer(query);
1471 }
1472 
1473 /*
1474  * checkExtensionMembership
1475  * Determine whether object is an extension member, and if so,
1476  * record an appropriate dependency and set the object's dump flag.
1477  *
1478  * It's important to call this for each object that could be an extension
1479  * member. Generally, we integrate this with determining the object's
1480  * to-be-dumped-ness, since extension membership overrides other rules for that.
1481  *
1482  * Returns true if object is an extension member, else false.
1483  */
1484 static bool
1486 {
1487  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1488 
1489  if (ext == NULL)
1490  return false;
1491 
1492  dobj->ext_member = true;
1493 
1494  /* Record dependency so that getDependencies needn't deal with that */
1495  addObjectDependency(dobj, ext->dobj.dumpId);
1496 
1497  /*
1498  * In 9.6 and above, mark the member object to have any non-initial ACL,
1499  * policies, and security labels dumped.
1500  *
1501  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1502  * extract the information about the object. We don't provide support for
1503  * initial policies and security labels and it seems unlikely for those to
1504  * ever exist, but we may have to revisit this later.
1505  *
1506  * Prior to 9.6, we do not include any extension member components.
1507  *
1508  * In binary upgrades, we still dump all components of the members
1509  * individually, since the idea is to exactly reproduce the database
1510  * contents rather than replace the extension contents with something
1511  * different.
1512  */
1513  if (fout->dopt->binary_upgrade)
1514  dobj->dump = ext->dobj.dump;
1515  else
1516  {
1517  if (fout->remoteVersion < 90600)
1518  dobj->dump = DUMP_COMPONENT_NONE;
1519  else
1520  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1523  }
1524 
1525  return true;
1526 }
1527 
1528 /*
1529  * selectDumpableNamespace: policy-setting subroutine
1530  * Mark a namespace as to be dumped or not
1531  */
1532 static void
1534 {
1535  /*
1536  * If specific tables are being dumped, do not dump any complete
1537  * namespaces. If specific namespaces are being dumped, dump just those
1538  * namespaces. Otherwise, dump all non-system namespaces.
1539  */
1540  if (table_include_oids.head != NULL)
1541  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1542  else if (schema_include_oids.head != NULL)
1543  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1544  simple_oid_list_member(&schema_include_oids,
1545  nsinfo->dobj.catId.oid) ?
1547  else if (fout->remoteVersion >= 90600 &&
1548  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1549  {
1550  /*
1551  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1552  * they are interesting (and not the original ACLs which were set at
1553  * initdb time, see pg_init_privs).
1554  */
1555  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1556  }
1557  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1558  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1559  {
1560  /* Other system schemas don't get dumped */
1561  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1562  }
1563  else if (strcmp(nsinfo->dobj.name, "public") == 0)
1564  {
1565  /*
1566  * The public schema is a strange beast that sits in a sort of
1567  * no-mans-land between being a system object and a user object. We
1568  * don't want to dump creation or comment commands for it, because
1569  * that complicates matters for non-superuser use of pg_dump. But we
1570  * should dump any ACL changes that have occurred for it, and of
1571  * course we should dump contained objects.
1572  */
1573  nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1575  }
1576  else
1577  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1578 
1579  /*
1580  * In any case, a namespace can be excluded by an exclusion switch
1581  */
1582  if (nsinfo->dobj.dump_contains &&
1583  simple_oid_list_member(&schema_exclude_oids,
1584  nsinfo->dobj.catId.oid))
1585  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1586 
1587  /*
1588  * If the schema belongs to an extension, allow extension membership to
1589  * override the dump decision for the schema itself. However, this does
1590  * not change dump_contains, so this won't change what we do with objects
1591  * within the schema. (If they belong to the extension, they'll get
1592  * suppressed by it, otherwise not.)
1593  */
1594  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1595 }
1596 
1597 /*
1598  * selectDumpableTable: policy-setting subroutine
1599  * Mark a table as to be dumped or not
1600  */
1601 static void
1603 {
1604  if (checkExtensionMembership(&tbinfo->dobj, fout))
1605  return; /* extension membership overrides all else */
1606 
1607  /*
1608  * If specific tables are being dumped, dump just those tables; else, dump
1609  * according to the parent namespace's dump flag.
1610  */
1611  if (table_include_oids.head != NULL)
1612  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1613  tbinfo->dobj.catId.oid) ?
1615  else
1616  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1617 
1618  /*
1619  * In any case, a table can be excluded by an exclusion switch
1620  */
1621  if (tbinfo->dobj.dump &&
1622  simple_oid_list_member(&table_exclude_oids,
1623  tbinfo->dobj.catId.oid))
1624  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1625 }
1626 
1627 /*
1628  * selectDumpableType: policy-setting subroutine
1629  * Mark a type as to be dumped or not
1630  *
1631  * If it's a table's rowtype or an autogenerated array type, we also apply a
1632  * special type code to facilitate sorting into the desired order. (We don't
1633  * want to consider those to be ordinary types because that would bring tables
1634  * up into the datatype part of the dump order.) We still set the object's
1635  * dump flag; that's not going to cause the dummy type to be dumped, but we
1636  * need it so that casts involving such types will be dumped correctly -- see
1637  * dumpCast. This means the flag should be set the same as for the underlying
1638  * object (the table or base type).
1639  */
1640 static void
1642 {
1643  /* skip complex types, except for standalone composite types */
1644  if (OidIsValid(tyinfo->typrelid) &&
1645  tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
1646  {
1647  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1648 
1649  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1650  if (tytable != NULL)
1651  tyinfo->dobj.dump = tytable->dobj.dump;
1652  else
1653  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1654  return;
1655  }
1656 
1657  /* skip auto-generated array types */
1658  if (tyinfo->isArray || tyinfo->isMultirange)
1659  {
1660  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1661 
1662  /*
1663  * Fall through to set the dump flag; we assume that the subsequent
1664  * rules will do the same thing as they would for the array's base
1665  * type. (We cannot reliably look up the base type here, since
1666  * getTypes may not have processed it yet.)
1667  */
1668  }
1669 
1670  if (checkExtensionMembership(&tyinfo->dobj, fout))
1671  return; /* extension membership overrides all else */
1672 
1673  /* Dump based on if the contents of the namespace are being dumped */
1674  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1675 }
1676 
1677 /*
1678  * selectDumpableDefaultACL: policy-setting subroutine
1679  * Mark a default ACL as to be dumped or not
1680  *
1681  * For per-schema default ACLs, dump if the schema is to be dumped.
1682  * Otherwise dump if we are dumping "everything". Note that dataOnly
1683  * and aclsSkip are checked separately.
1684  */
1685 static void
1687 {
1688  /* Default ACLs can't be extension members */
1689 
1690  if (dinfo->dobj.namespace)
1691  /* default ACLs are considered part of the namespace */
1692  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1693  else
1694  dinfo->dobj.dump = dopt->include_everything ?
1696 }
1697 
1698 /*
1699  * selectDumpableCast: policy-setting subroutine
1700  * Mark a cast as to be dumped or not
1701  *
1702  * Casts do not belong to any particular namespace (since they haven't got
1703  * names), nor do they have identifiable owners. To distinguish user-defined
1704  * casts from built-in ones, we must resort to checking whether the cast's
1705  * OID is in the range reserved for initdb.
1706  */
1707 static void
1709 {
1710  if (checkExtensionMembership(&cast->dobj, fout))
1711  return; /* extension membership overrides all else */
1712 
1713  /*
1714  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1715  * support ACLs currently.
1716  */
1717  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1718  cast->dobj.dump = DUMP_COMPONENT_NONE;
1719  else
1720  cast->dobj.dump = fout->dopt->include_everything ?
1722 }
1723 
1724 /*
1725  * selectDumpableProcLang: policy-setting subroutine
1726  * Mark a procedural language as to be dumped or not
1727  *
1728  * Procedural languages do not belong to any particular namespace. To
1729  * identify built-in languages, we must resort to checking whether the
1730  * language's OID is in the range reserved for initdb.
1731  */
1732 static void
1734 {
1735  if (checkExtensionMembership(&plang->dobj, fout))
1736  return; /* extension membership overrides all else */
1737 
1738  /*
1739  * Only include procedural languages when we are dumping everything.
1740  *
1741  * For from-initdb procedural languages, only include ACLs, as we do for
1742  * the pg_catalog namespace. We need this because procedural languages do
1743  * not live in any namespace.
1744  */
1745  if (!fout->dopt->include_everything)
1746  plang->dobj.dump = DUMP_COMPONENT_NONE;
1747  else
1748  {
1749  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1750  plang->dobj.dump = fout->remoteVersion < 90600 ?
1752  else
1753  plang->dobj.dump = DUMP_COMPONENT_ALL;
1754  }
1755 }
1756 
1757 /*
1758  * selectDumpableAccessMethod: policy-setting subroutine
1759  * Mark an access method as to be dumped or not
1760  *
1761  * Access methods do not belong to any particular namespace. To identify
1762  * built-in access methods, we must resort to checking whether the
1763  * method's OID is in the range reserved for initdb.
1764  */
1765 static void
1767 {
1768  if (checkExtensionMembership(&method->dobj, fout))
1769  return; /* extension membership overrides all else */
1770 
1771  /*
1772  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1773  * they do not support ACLs currently.
1774  */
1775  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1776  method->dobj.dump = DUMP_COMPONENT_NONE;
1777  else
1778  method->dobj.dump = fout->dopt->include_everything ?
1780 }
1781 
1782 /*
1783  * selectDumpableExtension: policy-setting subroutine
1784  * Mark an extension as to be dumped or not
1785  *
1786  * Built-in extensions should be skipped except for checking ACLs, since we
1787  * assume those will already be installed in the target database. We identify
1788  * such extensions by their having OIDs in the range reserved for initdb.
1789  * We dump all user-added extensions by default, or none of them if
1790  * include_everything is false (i.e., a --schema or --table switch was given).
1791  */
1792 static void
1794 {
1795  /*
1796  * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
1797  * change permissions on their member objects, if they wish to, and have
1798  * those changes preserved.
1799  */
1800  if (extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1801  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1802  else
1803  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1806 }
1807 
1808 /*
1809  * selectDumpablePublicationTable: policy-setting subroutine
1810  * Mark a publication table as to be dumped or not
1811  *
1812  * Publication tables have schemas, but those are ignored in decision making,
1813  * because publications are only dumped when we are dumping everything.
1814  */
1815 static void
1817 {
1818  if (checkExtensionMembership(dobj, fout))
1819  return; /* extension membership overrides all else */
1820 
1821  dobj->dump = fout->dopt->include_everything ?
1823 }
1824 
1825 /*
1826  * selectDumpableObject: policy-setting subroutine
1827  * Mark a generic dumpable object as to be dumped or not
1828  *
1829  * Use this only for object types without a special-case routine above.
1830  */
1831 static void
1833 {
1834  if (checkExtensionMembership(dobj, fout))
1835  return; /* extension membership overrides all else */
1836 
1837  /*
1838  * Default policy is to dump if parent namespace is dumpable, or for
1839  * non-namespace-associated items, dump if we're dumping "everything".
1840  */
1841  if (dobj->namespace)
1842  dobj->dump = dobj->namespace->dobj.dump_contains;
1843  else
1844  dobj->dump = fout->dopt->include_everything ?
1846 }
1847 
1848 /*
1849  * Dump a table's contents for loading using the COPY command
1850  * - this routine is called by the Archiver when it wants the table
1851  * to be dumped.
1852  */
1853 static int
1854 dumpTableData_copy(Archive *fout, const void *dcontext)
1855 {
1856  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1857  TableInfo *tbinfo = tdinfo->tdtable;
1858  const char *classname = tbinfo->dobj.name;
1860 
1861  /*
1862  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1863  * which uses it already.
1864  */
1865  PQExpBuffer clistBuf = createPQExpBuffer();
1866  PGconn *conn = GetConnection(fout);
1867  PGresult *res;
1868  int ret;
1869  char *copybuf;
1870  const char *column_list;
1871 
1872  pg_log_info("dumping contents of table \"%s.%s\"",
1873  tbinfo->dobj.namespace->dobj.name, classname);
1874 
1875  /*
1876  * Specify the column list explicitly so that we have no possibility of
1877  * retrieving data in the wrong column order. (The default column
1878  * ordering of COPY will not be what we want in certain corner cases
1879  * involving ADD COLUMN and inheritance.)
1880  */
1881  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1882 
1883  /*
1884  * Use COPY (SELECT ...) TO when dumping a foreign table's data, and when
1885  * a filter condition was specified. For other cases a simple COPY
1886  * suffices.
1887  */
1888  if (tdinfo->filtercond || tbinfo->relkind == RELKIND_FOREIGN_TABLE)
1889  {
1890  /* Note: this syntax is only supported in 8.2 and up */
1891  appendPQExpBufferStr(q, "COPY (SELECT ");
1892  /* klugery to get rid of parens in column list */
1893  if (strlen(column_list) > 2)
1894  {
1895  appendPQExpBufferStr(q, column_list + 1);
1896  q->data[q->len - 1] = ' ';
1897  }
1898  else
1899  appendPQExpBufferStr(q, "* ");
1900 
1901  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1902  fmtQualifiedDumpable(tbinfo),
1903  tdinfo->filtercond ? tdinfo->filtercond : "");
1904  }
1905  else
1906  {
1907  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1908  fmtQualifiedDumpable(tbinfo),
1909  column_list);
1910  }
1911  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1912  PQclear(res);
1913  destroyPQExpBuffer(clistBuf);
1914 
1915  for (;;)
1916  {
1917  ret = PQgetCopyData(conn, &copybuf, 0);
1918 
1919  if (ret < 0)
1920  break; /* done or error */
1921 
1922  if (copybuf)
1923  {
1924  WriteData(fout, copybuf, ret);
1925  PQfreemem(copybuf);
1926  }
1927 
1928  /* ----------
1929  * THROTTLE:
1930  *
1931  * There was considerable discussion in late July, 2000 regarding
1932  * slowing down pg_dump when backing up large tables. Users with both
1933  * slow & fast (multi-processor) machines experienced performance
1934  * degradation when doing a backup.
1935  *
1936  * Initial attempts based on sleeping for a number of ms for each ms
1937  * of work were deemed too complex, then a simple 'sleep in each loop'
1938  * implementation was suggested. The latter failed because the loop
1939  * was too tight. Finally, the following was implemented:
1940  *
1941  * If throttle is non-zero, then
1942  * See how long since the last sleep.
1943  * Work out how long to sleep (based on ratio).
1944  * If sleep is more than 100ms, then
1945  * sleep
1946  * reset timer
1947  * EndIf
1948  * EndIf
1949  *
1950  * where the throttle value was the number of ms to sleep per ms of
1951  * work. The calculation was done in each loop.
1952  *
1953  * Most of the hard work is done in the backend, and this solution
1954  * still did not work particularly well: on slow machines, the ratio
1955  * was 50:1, and on medium paced machines, 1:1, and on fast
1956  * multi-processor machines, it had little or no effect, for reasons
1957  * that were unclear.
1958  *
1959  * Further discussion ensued, and the proposal was dropped.
1960  *
1961  * For those people who want this feature, it can be implemented using
1962  * gettimeofday in each loop, calculating the time since last sleep,
1963  * multiplying that by the sleep ratio, then if the result is more
1964  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1965  * function to sleep for a subsecond period ie.
1966  *
1967  * select(0, NULL, NULL, NULL, &tvi);
1968  *
1969  * This will return after the interval specified in the structure tvi.
1970  * Finally, call gettimeofday again to save the 'last sleep time'.
1971  * ----------
1972  */
1973  }
1974  archprintf(fout, "\\.\n\n\n");
1975 
1976  if (ret == -2)
1977  {
1978  /* copy data transfer failed */
1979  pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
1980  pg_log_error("Error message from server: %s", PQerrorMessage(conn));
1981  pg_log_error("The command was: %s", q->data);
1982  exit_nicely(1);
1983  }
1984 
1985  /* Check command status and return to normal libpq state */
1986  res = PQgetResult(conn);
1987  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1988  {
1989  pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
1990  pg_log_error("Error message from server: %s", PQerrorMessage(conn));
1991  pg_log_error("The command was: %s", q->data);
1992  exit_nicely(1);
1993  }
1994  PQclear(res);
1995 
1996  /* Do this to ensure we've pumped libpq back to idle state */
1997  if (PQgetResult(conn) != NULL)
1998  pg_log_warning("unexpected extra results during COPY of table \"%s\"",
1999  classname);
2000 
2001  destroyPQExpBuffer(q);
2002  return 1;
2003 }
2004 
2005 /*
2006  * Dump table data using INSERT commands.
2007  *
2008  * Caution: when we restore from an archive file direct to database, the
2009  * INSERT commands emitted by this function have to be parsed by
2010  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
2011  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
2012  */
2013 static int
2014 dumpTableData_insert(Archive *fout, const void *dcontext)
2015 {
2016  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
2017  TableInfo *tbinfo = tdinfo->tdtable;
2018  DumpOptions *dopt = fout->dopt;
2020  PQExpBuffer insertStmt = NULL;
2021  PGresult *res;
2022  int nfields;
2023  int rows_per_statement = dopt->dump_inserts;
2024  int rows_this_statement = 0;
2025 
2026  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
2027  "SELECT * FROM ONLY %s",
2028  fmtQualifiedDumpable(tbinfo));
2029  if (tdinfo->filtercond)
2030  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
2031 
2032  ExecuteSqlStatement(fout, q->data);
2033 
2034  while (1)
2035  {
2036  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
2037  PGRES_TUPLES_OK);
2038  nfields = PQnfields(res);
2039 
2040  /*
2041  * First time through, we build as much of the INSERT statement as
2042  * possible in "insertStmt", which we can then just print for each
2043  * statement. If the table happens to have zero columns then this will
2044  * be a complete statement, otherwise it will end in "VALUES" and be
2045  * ready to have the row's column values printed.
2046  */
2047  if (insertStmt == NULL)
2048  {
2049  TableInfo *targettab;
2050 
2051  insertStmt = createPQExpBuffer();
2052 
2053  /*
2054  * When load-via-partition-root is set, get the root table name
2055  * for the partition table, so that we can reload data through the
2056  * root table.
2057  */
2058  if (dopt->load_via_partition_root && tbinfo->ispartition)
2059  targettab = getRootTableInfo(tbinfo);
2060  else
2061  targettab = tbinfo;
2062 
2063  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
2064  fmtQualifiedDumpable(targettab));
2065 
2066  /* corner case for zero-column table */
2067  if (nfields == 0)
2068  {
2069  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
2070  }
2071  else
2072  {
2073  /* append the list of column names if required */
2074  if (dopt->column_inserts)
2075  {
2076  appendPQExpBufferChar(insertStmt, '(');
2077  for (int field = 0; field < nfields; field++)
2078  {
2079  if (field > 0)
2080  appendPQExpBufferStr(insertStmt, ", ");
2081  appendPQExpBufferStr(insertStmt,
2082  fmtId(PQfname(res, field)));
2083  }
2084  appendPQExpBufferStr(insertStmt, ") ");
2085  }
2086 
2087  if (tbinfo->needs_override)
2088  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
2089 
2090  appendPQExpBufferStr(insertStmt, "VALUES");
2091  }
2092  }
2093 
2094  for (int tuple = 0; tuple < PQntuples(res); tuple++)
2095  {
2096  /* Write the INSERT if not in the middle of a multi-row INSERT. */
2097  if (rows_this_statement == 0)
2098  archputs(insertStmt->data, fout);
2099 
2100  /*
2101  * If it is zero-column table then we've already written the
2102  * complete statement, which will mean we've disobeyed
2103  * --rows-per-insert when it's set greater than 1. We do support
2104  * a way to make this multi-row with: SELECT UNION ALL SELECT
2105  * UNION ALL ... but that's non-standard so we should avoid it
2106  * given that using INSERTs is mostly only ever needed for
2107  * cross-database exports.
2108  */
2109  if (nfields == 0)
2110  continue;
2111 
2112  /* Emit a row heading */
2113  if (rows_per_statement == 1)
2114  archputs(" (", fout);
2115  else if (rows_this_statement > 0)
2116  archputs(",\n\t(", fout);
2117  else
2118  archputs("\n\t(", fout);
2119 
2120  for (int field = 0; field < nfields; field++)
2121  {
2122  if (field > 0)
2123  archputs(", ", fout);
2124  if (tbinfo->attgenerated[field])
2125  {
2126  archputs("DEFAULT", fout);
2127  continue;
2128  }
2129  if (PQgetisnull(res, tuple, field))
2130  {
2131  archputs("NULL", fout);
2132  continue;
2133  }
2134 
2135  /* XXX This code is partially duplicated in ruleutils.c */
2136  switch (PQftype(res, field))
2137  {
2138  case INT2OID:
2139  case INT4OID:
2140  case INT8OID:
2141  case OIDOID:
2142  case FLOAT4OID:
2143  case FLOAT8OID:
2144  case NUMERICOID:
2145  {
2146  /*
2147  * These types are printed without quotes unless
2148  * they contain values that aren't accepted by the
2149  * scanner unquoted (e.g., 'NaN'). Note that
2150  * strtod() and friends might accept NaN, so we
2151  * can't use that to test.
2152  *
2153  * In reality we only need to defend against
2154  * infinity and NaN, so we need not get too crazy
2155  * about pattern matching here.
2156  */
2157  const char *s = PQgetvalue(res, tuple, field);
2158 
2159  if (strspn(s, "0123456789 +-eE.") == strlen(s))
2160  archputs(s, fout);
2161  else
2162  archprintf(fout, "'%s'", s);
2163  }
2164  break;
2165 
2166  case BITOID:
2167  case VARBITOID:
2168  archprintf(fout, "B'%s'",
2169  PQgetvalue(res, tuple, field));
2170  break;
2171 
2172  case BOOLOID:
2173  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2174  archputs("true", fout);
2175  else
2176  archputs("false", fout);
2177  break;
2178 
2179  default:
2180  /* All other types are printed as string literals. */
2181  resetPQExpBuffer(q);
2183  PQgetvalue(res, tuple, field),
2184  fout);
2185  archputs(q->data, fout);
2186  break;
2187  }
2188  }
2189 
2190  /* Terminate the row ... */
2191  archputs(")", fout);
2192 
2193  /* ... and the statement, if the target no. of rows is reached */
2194  if (++rows_this_statement >= rows_per_statement)
2195  {
2196  if (dopt->do_nothing)
2197  archputs(" ON CONFLICT DO NOTHING;\n", fout);
2198  else
2199  archputs(";\n", fout);
2200  /* Reset the row counter */
2201  rows_this_statement = 0;
2202  }
2203  }
2204 
2205  if (PQntuples(res) <= 0)
2206  {
2207  PQclear(res);
2208  break;
2209  }
2210  PQclear(res);
2211  }
2212 
2213  /* Terminate any statements that didn't make the row count. */
2214  if (rows_this_statement > 0)
2215  {
2216  if (dopt->do_nothing)
2217  archputs(" ON CONFLICT DO NOTHING;\n", fout);
2218  else
2219  archputs(";\n", fout);
2220  }
2221 
2222  archputs("\n\n", fout);
2223 
2224  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2225 
2226  destroyPQExpBuffer(q);
2227  if (insertStmt != NULL)
2228  destroyPQExpBuffer(insertStmt);
2229 
2230  return 1;
2231 }
2232 
2233 /*
2234  * getRootTableInfo:
2235  * get the root TableInfo for the given partition table.
2236  */
2237 static TableInfo *
2239 {
2240  TableInfo *parentTbinfo;
2241 
2242  Assert(tbinfo->ispartition);
2243  Assert(tbinfo->numParents == 1);
2244 
2245  parentTbinfo = tbinfo->parents[0];
2246  while (parentTbinfo->ispartition)
2247  {
2248  Assert(parentTbinfo->numParents == 1);
2249  parentTbinfo = parentTbinfo->parents[0];
2250  }
2251 
2252  return parentTbinfo;
2253 }
2254 
2255 /*
2256  * dumpTableData -
2257  * dump the contents of a single table
2258  *
2259  * Actually, this just makes an ArchiveEntry for the table contents.
2260  */
2261 static void
2262 dumpTableData(Archive *fout, const TableDataInfo *tdinfo)
2263 {
2264  DumpOptions *dopt = fout->dopt;
2265  TableInfo *tbinfo = tdinfo->tdtable;
2266  PQExpBuffer copyBuf = createPQExpBuffer();
2267  PQExpBuffer clistBuf = createPQExpBuffer();
2268  DataDumperPtr dumpFn;
2269  char *copyStmt;
2270  const char *copyFrom;
2271 
2272  /* We had better have loaded per-column details about this table */
2273  Assert(tbinfo->interesting);
2274 
2275  if (dopt->dump_inserts == 0)
2276  {
2277  /* Dump/restore using COPY */
2278  dumpFn = dumpTableData_copy;
2279 
2280  /*
2281  * When load-via-partition-root is set, get the root table name for
2282  * the partition table, so that we can reload data through the root
2283  * table.
2284  */
2285  if (dopt->load_via_partition_root && tbinfo->ispartition)
2286  {
2287  TableInfo *parentTbinfo;
2288 
2289  parentTbinfo = getRootTableInfo(tbinfo);
2290  copyFrom = fmtQualifiedDumpable(parentTbinfo);
2291  }
2292  else
2293  copyFrom = fmtQualifiedDumpable(tbinfo);
2294 
2295  /* must use 2 steps here 'cause fmtId is nonreentrant */
2296  appendPQExpBuffer(copyBuf, "COPY %s ",
2297  copyFrom);
2298  appendPQExpBuffer(copyBuf, "%s FROM stdin;\n",
2299  fmtCopyColumnList(tbinfo, clistBuf));
2300  copyStmt = copyBuf->data;
2301  }
2302  else
2303  {
2304  /* Restore using INSERT */
2305  dumpFn = dumpTableData_insert;
2306  copyStmt = NULL;
2307  }
2308 
2309  /*
2310  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2311  * dependency on its table as "special" and pass it to ArchiveEntry now.
2312  * See comments for BuildArchiveDependencies.
2313  */
2314  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2315  {
2316  TocEntry *te;
2317 
2318  te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2319  ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2320  .namespace = tbinfo->dobj.namespace->dobj.name,
2321  .owner = tbinfo->rolname,
2322  .description = "TABLE DATA",
2323  .section = SECTION_DATA,
2324  .copyStmt = copyStmt,
2325  .deps = &(tbinfo->dobj.dumpId),
2326  .nDeps = 1,
2327  .dumpFn = dumpFn,
2328  .dumpArg = tdinfo));
2329 
2330  /*
2331  * Set the TocEntry's dataLength in case we are doing a parallel dump
2332  * and want to order dump jobs by table size. We choose to measure
2333  * dataLength in table pages during dump, so no scaling is needed.
2334  * However, relpages is declared as "integer" in pg_class, and hence
2335  * also in TableInfo, but it's really BlockNumber a/k/a unsigned int.
2336  * Cast so that we get the right interpretation of table sizes
2337  * exceeding INT_MAX pages.
2338  */
2339  te->dataLength = (BlockNumber) tbinfo->relpages;
2340  }
2341 
2342  destroyPQExpBuffer(copyBuf);
2343  destroyPQExpBuffer(clistBuf);
2344 }
2345 
2346 /*
2347  * refreshMatViewData -
2348  * load or refresh the contents of a single materialized view
2349  *
2350  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2351  * statement.
2352  */
2353 static void
2355 {
2356  TableInfo *tbinfo = tdinfo->tdtable;
2357  PQExpBuffer q;
2358 
2359  /* If the materialized view is not flagged as populated, skip this. */
2360  if (!tbinfo->relispopulated)
2361  return;
2362 
2363  q = createPQExpBuffer();
2364 
2365  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2366  fmtQualifiedDumpable(tbinfo));
2367 
2368  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2369  ArchiveEntry(fout,
2370  tdinfo->dobj.catId, /* catalog ID */
2371  tdinfo->dobj.dumpId, /* dump ID */
2372  ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2373  .namespace = tbinfo->dobj.namespace->dobj.name,
2374  .owner = tbinfo->rolname,
2375  .description = "MATERIALIZED VIEW DATA",
2376  .section = SECTION_POST_DATA,
2377  .createStmt = q->data,
2378  .deps = tdinfo->dobj.dependencies,
2379  .nDeps = tdinfo->dobj.nDeps));
2380 
2381  destroyPQExpBuffer(q);
2382 }
2383 
2384 /*
2385  * getTableData -
2386  * set up dumpable objects representing the contents of tables
2387  */
2388 static void
2389 getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind)
2390 {
2391  int i;
2392 
2393  for (i = 0; i < numTables; i++)
2394  {
2395  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2396  (!relkind || tblinfo[i].relkind == relkind))
2397  makeTableDataInfo(dopt, &(tblinfo[i]));
2398  }
2399 }
2400 
2401 /*
2402  * Make a dumpable object for the data of this specific table
2403  *
2404  * Note: we make a TableDataInfo if and only if we are going to dump the
2405  * table data; the "dump" flag in such objects isn't used.
2406  */
2407 static void
2409 {
2410  TableDataInfo *tdinfo;
2411 
2412  /*
2413  * Nothing to do if we already decided to dump the table. This will
2414  * happen for "config" tables.
2415  */
2416  if (tbinfo->dataObj != NULL)
2417  return;
2418 
2419  /* Skip VIEWs (no data to dump) */
2420  if (tbinfo->relkind == RELKIND_VIEW)
2421  return;
2422  /* Skip FOREIGN TABLEs (no data to dump) unless requested explicitly */
2423  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
2424  (foreign_servers_include_oids.head == NULL ||
2425  !simple_oid_list_member(&foreign_servers_include_oids,
2426  tbinfo->foreign_server)))
2427  return;
2428  /* Skip partitioned tables (data in partitions) */
2429  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2430  return;
2431 
2432  /* Don't dump data in unlogged tables, if so requested */
2433  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2434  dopt->no_unlogged_table_data)
2435  return;
2436 
2437  /* Check that the data is not explicitly excluded */
2438  if (simple_oid_list_member(&tabledata_exclude_oids,
2439  tbinfo->dobj.catId.oid))
2440  return;
2441 
2442  /* OK, let's dump it */
2443  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2444 
2445  if (tbinfo->relkind == RELKIND_MATVIEW)
2446  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2447  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2448  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2449  else
2450  tdinfo->dobj.objType = DO_TABLE_DATA;
2451 
2452  /*
2453  * Note: use tableoid 0 so that this object won't be mistaken for
2454  * something that pg_depend entries apply to.
2455  */
2456  tdinfo->dobj.catId.tableoid = 0;
2457  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2458  AssignDumpId(&tdinfo->dobj);
2459  tdinfo->dobj.name = tbinfo->dobj.name;
2460  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2461  tdinfo->tdtable = tbinfo;
2462  tdinfo->filtercond = NULL; /* might get set later */
2463  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2464 
2465  tbinfo->dataObj = tdinfo;
2466 
2467  /* Make sure that we'll collect per-column info for this table. */
2468  tbinfo->interesting = true;
2469 }
2470 
2471 /*
2472  * The refresh for a materialized view must be dependent on the refresh for
2473  * any materialized view that this one is dependent on.
2474  *
2475  * This must be called after all the objects are created, but before they are
2476  * sorted.
2477  */
2478 static void
2480 {
2481  PQExpBuffer query;
2482  PGresult *res;
2483  int ntups,
2484  i;
2485  int i_classid,
2486  i_objid,
2487  i_refobjid;
2488 
2489  /* No Mat Views before 9.3. */
2490  if (fout->remoteVersion < 90300)
2491  return;
2492 
2493  query = createPQExpBuffer();
2494 
2495  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2496  "( "
2497  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2498  "FROM pg_depend d1 "
2499  "JOIN pg_class c1 ON c1.oid = d1.objid "
2500  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2501  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2502  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2503  "AND d2.objid = r1.oid "
2504  "AND d2.refobjid <> d1.objid "
2505  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2506  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2507  CppAsString2(RELKIND_VIEW) ") "
2508  "WHERE d1.classid = 'pg_class'::regclass "
2509  "UNION "
2510  "SELECT w.objid, d3.refobjid, c3.relkind "
2511  "FROM w "
2512  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2513  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2514  "AND d3.objid = r3.oid "
2515  "AND d3.refobjid <> w.refobjid "
2516  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2517  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2518  CppAsString2(RELKIND_VIEW) ") "
2519  ") "
2520  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2521  "FROM w "
2522  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2523 
2524  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2525 
2526  ntups = PQntuples(res);
2527 
2528  i_classid = PQfnumber(res, "classid");
2529  i_objid = PQfnumber(res, "objid");
2530  i_refobjid = PQfnumber(res, "refobjid");
2531 
2532  for (i = 0; i < ntups; i++)
2533  {
2534  CatalogId objId;
2535  CatalogId refobjId;
2536  DumpableObject *dobj;
2537  DumpableObject *refdobj;
2538  TableInfo *tbinfo;
2539  TableInfo *reftbinfo;
2540 
2541  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2542  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2543  refobjId.tableoid = objId.tableoid;
2544  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2545 
2546  dobj = findObjectByCatalogId(objId);
2547  if (dobj == NULL)
2548  continue;
2549 
2550  Assert(dobj->objType == DO_TABLE);
2551  tbinfo = (TableInfo *) dobj;
2552  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2553  dobj = (DumpableObject *) tbinfo->dataObj;
2554  if (dobj == NULL)
2555  continue;
2556  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2557 
2558  refdobj = findObjectByCatalogId(refobjId);
2559  if (refdobj == NULL)
2560  continue;
2561 
2562  Assert(refdobj->objType == DO_TABLE);
2563  reftbinfo = (TableInfo *) refdobj;
2564  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2565  refdobj = (DumpableObject *) reftbinfo->dataObj;
2566  if (refdobj == NULL)
2567  continue;
2568  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2569 
2570  addObjectDependency(dobj, refdobj->dumpId);
2571 
2572  if (!reftbinfo->relispopulated)
2573  tbinfo->relispopulated = false;
2574  }
2575 
2576  PQclear(res);
2577 
2578  destroyPQExpBuffer(query);
2579 }
2580 
2581 /*
2582  * getTableDataFKConstraints -
2583  * add dump-order dependencies reflecting foreign key constraints
2584  *
2585  * This code is executed only in a data-only dump --- in schema+data dumps
2586  * we handle foreign key issues by not creating the FK constraints until
2587  * after the data is loaded. In a data-only dump, however, we want to
2588  * order the table data objects in such a way that a table's referenced
2589  * tables are restored first. (In the presence of circular references or
2590  * self-references this may be impossible; we'll detect and complain about
2591  * that during the dependency sorting step.)
2592  */
2593 static void
2595 {
2596  DumpableObject **dobjs;
2597  int numObjs;
2598  int i;
2599 
2600  /* Search through all the dumpable objects for FK constraints */
2601  getDumpableObjects(&dobjs, &numObjs);
2602  for (i = 0; i < numObjs; i++)
2603  {
2604  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2605  {
2606  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2607  TableInfo *ftable;
2608 
2609  /* Not interesting unless both tables are to be dumped */
2610  if (cinfo->contable == NULL ||
2611  cinfo->contable->dataObj == NULL)
2612  continue;
2613  ftable = findTableByOid(cinfo->confrelid);
2614  if (ftable == NULL ||
2615  ftable->dataObj == NULL)
2616  continue;
2617 
2618  /*
2619  * Okay, make referencing table's TABLE_DATA object depend on the
2620  * referenced table's TABLE_DATA object.
2621  */
2623  ftable->dataObj->dobj.dumpId);
2624  }
2625  }
2626  free(dobjs);
2627 }
2628 
2629 
2630 /*
2631  * guessConstraintInheritance:
2632  * In pre-8.4 databases, we can't tell for certain which constraints
2633  * are inherited. We assume a CHECK constraint is inherited if its name
2634  * matches the name of any constraint in the parent. Originally this code
2635  * tried to compare the expression texts, but that can fail for various
2636  * reasons --- for example, if the parent and child tables are in different
2637  * schemas, reverse-listing of function calls may produce different text
2638  * (schema-qualified or not) depending on search path.
2639  *
2640  * In 8.4 and up we can rely on the conislocal field to decide which
2641  * constraints must be dumped; much safer.
2642  *
2643  * This function assumes all conislocal flags were initialized to true.
2644  * It clears the flag on anything that seems to be inherited.
2645  */
2646 static void
2648 {
2649  int i,
2650  j,
2651  k;
2652 
2653  for (i = 0; i < numTables; i++)
2654  {
2655  TableInfo *tbinfo = &(tblinfo[i]);
2656  int numParents;
2657  TableInfo **parents;
2658  TableInfo *parent;
2659 
2660  /* Sequences and views never have parents */
2661  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2662  tbinfo->relkind == RELKIND_VIEW)
2663  continue;
2664 
2665  /* Don't bother computing anything for non-target tables, either */
2666  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2667  continue;
2668 
2669  numParents = tbinfo->numParents;
2670  parents = tbinfo->parents;
2671 
2672  if (numParents == 0)
2673  continue; /* nothing to see here, move along */
2674 
2675  /* scan for inherited CHECK constraints */
2676  for (j = 0; j < tbinfo->ncheck; j++)
2677  {
2678  ConstraintInfo *constr;
2679 
2680  constr = &(tbinfo->checkexprs[j]);
2681 
2682  for (k = 0; k < numParents; k++)
2683  {
2684  int l;
2685 
2686  parent = parents[k];
2687  for (l = 0; l < parent->ncheck; l++)
2688  {
2689  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2690 
2691  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2692  {
2693  constr->conislocal = false;
2694  break;
2695  }
2696  }
2697  if (!constr->conislocal)
2698  break;
2699  }
2700  }
2701  }
2702 }
2703 
2704 
2705 /*
2706  * dumpDatabase:
2707  * dump the database definition
2708  */
2709 static void
2711 {
2712  DumpOptions *dopt = fout->dopt;
2713  PQExpBuffer dbQry = createPQExpBuffer();
2714  PQExpBuffer delQry = createPQExpBuffer();
2715  PQExpBuffer creaQry = createPQExpBuffer();
2716  PQExpBuffer labelq = createPQExpBuffer();
2717  PGconn *conn = GetConnection(fout);
2718  PGresult *res;
2719  int i_tableoid,
2720  i_oid,
2721  i_datname,
2722  i_dba,
2723  i_encoding,
2724  i_collate,
2725  i_ctype,
2726  i_frozenxid,
2727  i_minmxid,
2728  i_datacl,
2729  i_rdatacl,
2730  i_datistemplate,
2731  i_datconnlimit,
2732  i_tablespace;
2733  CatalogId dbCatId;
2734  DumpId dbDumpId;
2735  const char *datname,
2736  *dba,
2737  *encoding,
2738  *collate,
2739  *ctype,
2740  *datacl,
2741  *rdatacl,
2742  *datistemplate,
2743  *datconnlimit,
2744  *tablespace;
2745  uint32 frozenxid,
2746  minmxid;
2747  char *qdatname;
2748 
2749  pg_log_info("saving database definition");
2750 
2751  /*
2752  * Fetch the database-level properties for this database.
2753  *
2754  * The order in which privileges are in the ACL string (the order they
2755  * have been GRANT'd in, which the backend maintains) must be preserved to
2756  * ensure that GRANTs WITH GRANT OPTION and subsequent GRANTs based on
2757  * those are dumped in the correct order. Note that initial privileges
2758  * (pg_init_privs) are not supported on databases, so this logic cannot
2759  * make use of buildACLQueries().
2760  */
2761  if (fout->remoteVersion >= 90600)
2762  {
2763  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2764  "(%s datdba) AS dba, "
2765  "pg_encoding_to_char(encoding) AS encoding, "
2766  "datcollate, datctype, datfrozenxid, datminmxid, "
2767  "(SELECT array_agg(acl ORDER BY row_n) FROM "
2768  " (SELECT acl, row_n FROM "
2769  " unnest(coalesce(datacl,acldefault('d',datdba))) "
2770  " WITH ORDINALITY AS perm(acl,row_n) "
2771  " WHERE NOT EXISTS ( "
2772  " SELECT 1 "
2773  " FROM unnest(acldefault('d',datdba)) "
2774  " AS init(init_acl) "
2775  " WHERE acl = init_acl)) AS datacls) "
2776  " AS datacl, "
2777  "(SELECT array_agg(acl ORDER BY row_n) FROM "
2778  " (SELECT acl, row_n FROM "
2779  " unnest(acldefault('d',datdba)) "
2780  " WITH ORDINALITY AS initp(acl,row_n) "
2781  " WHERE NOT EXISTS ( "
2782  " SELECT 1 "
2783  " FROM unnest(coalesce(datacl,acldefault('d',datdba))) "
2784  " AS permp(orig_acl) "
2785  " WHERE acl = orig_acl)) AS rdatacls) "
2786  " AS rdatacl, "
2787  "datistemplate, datconnlimit, "
2788  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2789  "shobj_description(oid, 'pg_database') AS description "
2790 
2791  "FROM pg_database "
2792  "WHERE datname = current_database()",
2794  }
2795  else if (fout->remoteVersion >= 90300)
2796  {
2797  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2798  "(%s datdba) AS dba, "
2799  "pg_encoding_to_char(encoding) AS encoding, "
2800  "datcollate, datctype, datfrozenxid, datminmxid, "
2801  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2802  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2803  "shobj_description(oid, 'pg_database') AS description "
2804 
2805  "FROM pg_database "
2806  "WHERE datname = current_database()",
2808  }
2809  else if (fout->remoteVersion >= 80400)
2810  {
2811  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2812  "(%s datdba) AS dba, "
2813  "pg_encoding_to_char(encoding) AS encoding, "
2814  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2815  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2816  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2817  "shobj_description(oid, 'pg_database') AS description "
2818 
2819  "FROM pg_database "
2820  "WHERE datname = current_database()",
2822  }
2823  else if (fout->remoteVersion >= 80200)
2824  {
2825  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2826  "(%s datdba) AS dba, "
2827  "pg_encoding_to_char(encoding) AS encoding, "
2828  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2829  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2830  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2831  "shobj_description(oid, 'pg_database') AS description "
2832 
2833  "FROM pg_database "
2834  "WHERE datname = current_database()",
2836  }
2837  else
2838  {
2839  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2840  "(%s datdba) AS dba, "
2841  "pg_encoding_to_char(encoding) AS encoding, "
2842  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2843  "datacl, '' as rdatacl, datistemplate, "
2844  "-1 as datconnlimit, "
2845  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2846  "FROM pg_database "
2847  "WHERE datname = current_database()",
2849  }
2850 
2851  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2852 
2853  i_tableoid = PQfnumber(res, "tableoid");
2854  i_oid = PQfnumber(res, "oid");
2855  i_datname = PQfnumber(res, "datname");
2856  i_dba = PQfnumber(res, "dba");
2857  i_encoding = PQfnumber(res, "encoding");
2858  i_collate = PQfnumber(res, "datcollate");
2859  i_ctype = PQfnumber(res, "datctype");
2860  i_frozenxid = PQfnumber(res, "datfrozenxid");
2861  i_minmxid = PQfnumber(res, "datminmxid");
2862  i_datacl = PQfnumber(res, "datacl");
2863  i_rdatacl = PQfnumber(res, "rdatacl");
2864  i_datistemplate = PQfnumber(res, "datistemplate");
2865  i_datconnlimit = PQfnumber(res, "datconnlimit");
2866  i_tablespace = PQfnumber(res, "tablespace");
2867 
2868  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2869  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2870  datname = PQgetvalue(res, 0, i_datname);
2871  dba = PQgetvalue(res, 0, i_dba);
2872  encoding = PQgetvalue(res, 0, i_encoding);
2873  collate = PQgetvalue(res, 0, i_collate);
2874  ctype = PQgetvalue(res, 0, i_ctype);
2875  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2876  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2877  datacl = PQgetvalue(res, 0, i_datacl);
2878  rdatacl = PQgetvalue(res, 0, i_rdatacl);
2879  datistemplate = PQgetvalue(res, 0, i_datistemplate);
2880  datconnlimit = PQgetvalue(res, 0, i_datconnlimit);
2881  tablespace = PQgetvalue(res, 0, i_tablespace);
2882 
2883  qdatname = pg_strdup(fmtId(datname));
2884 
2885  /*
2886  * Prepare the CREATE DATABASE command. We must specify encoding, locale,
2887  * and tablespace since those can't be altered later. Other DB properties
2888  * are left to the DATABASE PROPERTIES entry, so that they can be applied
2889  * after reconnecting to the target DB.
2890  */
2891  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2892  qdatname);
2893  if (strlen(encoding) > 0)
2894  {
2895  appendPQExpBufferStr(creaQry, " ENCODING = ");
2896  appendStringLiteralAH(creaQry, encoding, fout);
2897  }
2898  if (strlen(collate) > 0 && strcmp(collate, ctype) == 0)
2899  {
2900  appendPQExpBufferStr(creaQry, " LOCALE = ");
2901  appendStringLiteralAH(creaQry, collate, fout);
2902  }
2903  else
2904  {
2905  if (strlen(collate) > 0)
2906  {
2907  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2908  appendStringLiteralAH(creaQry, collate, fout);
2909  }
2910  if (strlen(ctype) > 0)
2911  {
2912  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2913  appendStringLiteralAH(creaQry, ctype, fout);
2914  }
2915  }
2916 
2917  /*
2918  * Note: looking at dopt->outputNoTablespaces here is completely the wrong
2919  * thing; the decision whether to specify a tablespace should be left till
2920  * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
2921  * label the DATABASE entry with the tablespace and let the normal
2922  * tablespace selection logic work ... but CREATE DATABASE doesn't pay
2923  * attention to default_tablespace, so that won't work.
2924  */
2925  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2926  !dopt->outputNoTablespaces)
2927  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2928  fmtId(tablespace));
2929  appendPQExpBufferStr(creaQry, ";\n");
2930 
2931  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2932  qdatname);
2933 
2934  dbDumpId = createDumpId();
2935 
2936  ArchiveEntry(fout,
2937  dbCatId, /* catalog ID */
2938  dbDumpId, /* dump ID */
2939  ARCHIVE_OPTS(.tag = datname,
2940  .owner = dba,
2941  .description = "DATABASE",
2942  .section = SECTION_PRE_DATA,
2943  .createStmt = creaQry->data,
2944  .dropStmt = delQry->data));
2945 
2946  /* Compute correct tag for archive entry */
2947  appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
2948 
2949  /* Dump DB comment if any */
2950  if (fout->remoteVersion >= 80200)
2951  {
2952  /*
2953  * 8.2 and up keep comments on shared objects in a shared table, so we
2954  * cannot use the dumpComment() code used for other database objects.
2955  * Be careful that the ArchiveEntry parameters match that function.
2956  */
2957  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2958 
2959  if (comment && *comment && !dopt->no_comments)
2960  {
2961  resetPQExpBuffer(dbQry);
2962 
2963  /*
2964  * Generates warning when loaded into a differently-named
2965  * database.
2966  */
2967  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
2968  appendStringLiteralAH(dbQry, comment, fout);
2969  appendPQExpBufferStr(dbQry, ";\n");
2970 
2971  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2972  ARCHIVE_OPTS(.tag = labelq->data,
2973  .owner = dba,
2974  .description = "COMMENT",
2975  .section = SECTION_NONE,
2976  .createStmt = dbQry->data,
2977  .deps = &dbDumpId,
2978  .nDeps = 1));
2979  }
2980  }
2981  else
2982  {
2983  dumpComment(fout, "DATABASE", qdatname, NULL, dba,
2984  dbCatId, 0, dbDumpId);
2985  }
2986 
2987  /* Dump DB security label, if enabled */
2988  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2989  {
2990  PGresult *shres;
2991  PQExpBuffer seclabelQry;
2992 
2993  seclabelQry = createPQExpBuffer();
2994 
2995  buildShSecLabelQuery("pg_database", dbCatId.oid, seclabelQry);
2996  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2997  resetPQExpBuffer(seclabelQry);
2998  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2999  if (seclabelQry->len > 0)
3000  ArchiveEntry(fout, nilCatalogId, createDumpId(),
3001  ARCHIVE_OPTS(.tag = labelq->data,
3002  .owner = dba,
3003  .description = "SECURITY LABEL",
3004  .section = SECTION_NONE,
3005  .createStmt = seclabelQry->data,
3006  .deps = &dbDumpId,
3007  .nDeps = 1));
3008  destroyPQExpBuffer(seclabelQry);
3009  PQclear(shres);
3010  }
3011 
3012  /*
3013  * Dump ACL if any. Note that we do not support initial privileges
3014  * (pg_init_privs) on databases.
3015  */
3016  dumpACL(fout, dbDumpId, InvalidDumpId, "DATABASE",
3017  qdatname, NULL, NULL,
3018  dba, datacl, rdatacl, "", "");
3019 
3020  /*
3021  * Now construct a DATABASE PROPERTIES archive entry to restore any
3022  * non-default database-level properties. (The reason this must be
3023  * separate is that we cannot put any additional commands into the TOC
3024  * entry that has CREATE DATABASE. pg_restore would execute such a group
3025  * in an implicit transaction block, and the backend won't allow CREATE
3026  * DATABASE in that context.)
3027  */
3028  resetPQExpBuffer(creaQry);
3029  resetPQExpBuffer(delQry);
3030 
3031  if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
3032  appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
3033  qdatname, datconnlimit);
3034 
3035  if (strcmp(datistemplate, "t") == 0)
3036  {
3037  appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
3038  qdatname);
3039 
3040  /*
3041  * The backend won't accept DROP DATABASE on a template database. We
3042  * can deal with that by removing the template marking before the DROP
3043  * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
3044  * since no such command is currently supported, fake it with a direct
3045  * UPDATE on pg_database.
3046  */
3047  appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
3048  "SET datistemplate = false WHERE datname = ");
3049  appendStringLiteralAH(delQry, datname, fout);
3050  appendPQExpBufferStr(delQry, ";\n");
3051  }
3052 
3053  /* Add database-specific SET options */
3054  dumpDatabaseConfig(fout, creaQry, datname, dbCatId.oid);
3055 
3056  /*
3057  * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
3058  * entry, too, for lack of a better place.
3059  */
3060  if (dopt->binary_upgrade)
3061  {
3062  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
3063  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
3064  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
3065  "WHERE datname = ",
3066  frozenxid, minmxid);
3067  appendStringLiteralAH(creaQry, datname, fout);
3068  appendPQExpBufferStr(creaQry, ";\n");
3069  }
3070 
3071  if (creaQry->len > 0)
3072  ArchiveEntry(fout, nilCatalogId, createDumpId(),
3073  ARCHIVE_OPTS(.tag = datname,
3074  .owner = dba,
3075  .description = "DATABASE PROPERTIES",
3076  .section = SECTION_PRE_DATA,
3077  .createStmt = creaQry->data,
3078  .dropStmt = delQry->data,
3079  .deps = &dbDumpId));
3080 
3081  /*
3082  * pg_largeobject comes from the old system intact, so set its
3083  * relfrozenxids and relminmxids.
3084  */
3085  if (dopt->binary_upgrade)
3086  {
3087  PGresult *lo_res;
3088  PQExpBuffer loFrozenQry = createPQExpBuffer();
3089  PQExpBuffer loOutQry = createPQExpBuffer();
3090  int i_relfrozenxid,
3091  i_relminmxid;
3092 
3093  /*
3094  * pg_largeobject
3095  */
3096  if (fout->remoteVersion >= 90300)
3097  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
3098  "FROM pg_catalog.pg_class\n"
3099  "WHERE oid = %u;\n",
3100  LargeObjectRelationId);
3101  else
3102  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
3103  "FROM pg_catalog.pg_class\n"
3104  "WHERE oid = %u;\n",
3105  LargeObjectRelationId);
3106 
3107  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
3108 
3109  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
3110  i_relminmxid = PQfnumber(lo_res, "relminmxid");
3111 
3112  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
3113  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
3114  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
3115  "WHERE oid = %u;\n",
3116  atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
3117  atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
3118  LargeObjectRelationId);
3119  ArchiveEntry(fout, nilCatalogId, createDumpId(),
3120  ARCHIVE_OPTS(.tag = "pg_largeobject",
3121  .description = "pg_largeobject",
3122  .section = SECTION_PRE_DATA,
3123  .createStmt = loOutQry->data));
3124 
3125  PQclear(lo_res);
3126 
3127  destroyPQExpBuffer(loFrozenQry);
3128  destroyPQExpBuffer(loOutQry);
3129  }
3130 
3131  PQclear(res);
3132 
3133  free(qdatname);
3134  destroyPQExpBuffer(dbQry);
3135  destroyPQExpBuffer(delQry);
3136  destroyPQExpBuffer(creaQry);
3137  destroyPQExpBuffer(labelq);
3138 }
3139 
3140 /*
3141  * Collect any database-specific or role-and-database-specific SET options
3142  * for this database, and append them to outbuf.
3143  */
3144 static void
3146  const char *dbname, Oid dboid)
3147 {
3148  PGconn *conn = GetConnection(AH);
3150  PGresult *res;
3151  int count = 1;
3152 
3153  /*
3154  * First collect database-specific options. Pre-8.4 server versions lack
3155  * unnest(), so we do this the hard way by querying once per subscript.
3156  */
3157  for (;;)
3158  {
3159  if (AH->remoteVersion >= 90000)
3160  printfPQExpBuffer(buf, "SELECT setconfig[%d] FROM pg_db_role_setting "
3161  "WHERE setrole = 0 AND setdatabase = '%u'::oid",
3162  count, dboid);
3163  else
3164  printfPQExpBuffer(buf, "SELECT datconfig[%d] FROM pg_database WHERE oid = '%u'::oid", count, dboid);
3165 
3166  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3167 
3168  if (PQntuples(res) == 1 &&
3169  !PQgetisnull(res, 0, 0))
3170  {
3171  makeAlterConfigCommand(conn, PQgetvalue(res, 0, 0),
3172  "DATABASE", dbname, NULL, NULL,
3173  outbuf);
3174  PQclear(res);
3175  count++;
3176  }
3177  else
3178  {
3179  PQclear(res);
3180  break;
3181  }
3182  }
3183 
3184  /* Now look for role-and-database-specific options */
3185  if (AH->remoteVersion >= 90000)
3186  {
3187  /* Here we can assume we have unnest() */
3188  printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
3189  "FROM pg_db_role_setting s, pg_roles r "
3190  "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
3191  dboid);
3192 
3193  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3194 
3195  if (PQntuples(res) > 0)
3196  {
3197  int i;
3198 
3199  for (i = 0; i < PQntuples(res); i++)
3200  makeAlterConfigCommand(conn, PQgetvalue(res, i, 1),
3201  "ROLE", PQgetvalue(res, i, 0),
3202  "DATABASE", dbname,
3203  outbuf);
3204  }
3205 
3206  PQclear(res);
3207  }
3208 
3209  destroyPQExpBuffer(buf);
3210 }
3211 
3212 /*
3213  * dumpEncoding: put the correct encoding into the archive
3214  */
3215 static void
3217 {
3218  const char *encname = pg_encoding_to_char(AH->encoding);
3220 
3221  pg_log_info("saving encoding = %s", encname);
3222 
3223  appendPQExpBufferStr(qry, "SET client_encoding = ");
3224  appendStringLiteralAH(qry, encname, AH);
3225  appendPQExpBufferStr(qry, ";\n");
3226 
3227  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3228  ARCHIVE_OPTS(.tag = "ENCODING",
3229  .description = "ENCODING",
3230  .section = SECTION_PRE_DATA,
3231  .createStmt = qry->data));
3232 
3233  destroyPQExpBuffer(qry);
3234 }
3235 
3236 
3237 /*
3238  * dumpStdStrings: put the correct escape string behavior into the archive
3239  */
3240 static void
3242 {
3243  const char *stdstrings = AH->std_strings ? "on" : "off";
3245 
3246  pg_log_info("saving standard_conforming_strings = %s",
3247  stdstrings);
3248 
3249  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3250  stdstrings);
3251 
3252  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3253  ARCHIVE_OPTS(.tag = "STDSTRINGS",
3254  .description = "STDSTRINGS",
3255  .section = SECTION_PRE_DATA,
3256  .createStmt = qry->data));
3257 
3258  destroyPQExpBuffer(qry);
3259 }
3260 
3261 /*
3262  * dumpSearchPath: record the active search_path in the archive
3263  */
3264 static void
3266 {
3268  PQExpBuffer path = createPQExpBuffer();
3269  PGresult *res;
3270  char **schemanames = NULL;
3271  int nschemanames = 0;
3272  int i;
3273 
3274  /*
3275  * We use the result of current_schemas(), not the search_path GUC,
3276  * because that might contain wildcards such as "$user", which won't
3277  * necessarily have the same value during restore. Also, this way avoids
3278  * listing schemas that may appear in search_path but not actually exist,
3279  * which seems like a prudent exclusion.
3280  */
3281  res = ExecuteSqlQueryForSingleRow(AH,
3282  "SELECT pg_catalog.current_schemas(false)");
3283 
3284  if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3285  fatal("could not parse result of current_schemas()");
3286 
3287  /*
3288  * We use set_config(), not a simple "SET search_path" command, because
3289  * the latter has less-clean behavior if the search path is empty. While
3290  * that's likely to get fixed at some point, it seems like a good idea to
3291  * be as backwards-compatible as possible in what we put into archives.
3292  */
3293  for (i = 0; i < nschemanames; i++)
3294  {
3295  if (i > 0)
3296  appendPQExpBufferStr(path, ", ");
3297  appendPQExpBufferStr(path, fmtId(schemanames[i]));
3298  }
3299 
3300  appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3301  appendStringLiteralAH(qry, path->data, AH);
3302  appendPQExpBufferStr(qry, ", false);\n");
3303 
3304  pg_log_info("saving search_path = %s", path->data);
3305 
3306  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3307  ARCHIVE_OPTS(.tag = "SEARCHPATH",
3308  .description = "SEARCHPATH",
3309  .section = SECTION_PRE_DATA,
3310  .createStmt = qry->data));
3311 
3312  /* Also save it in AH->searchpath, in case we're doing plain text dump */
3313  AH->searchpath = pg_strdup(qry->data);
3314 
3315  if (schemanames)
3316  free(schemanames);
3317  PQclear(res);
3318  destroyPQExpBuffer(qry);
3319  destroyPQExpBuffer(path);
3320 }
3321 
3322 
3323 /*
3324  * getBlobs:
3325  * Collect schema-level data about large objects
3326  */
3327 static void
3329 {
3330  DumpOptions *dopt = fout->dopt;
3331  PQExpBuffer blobQry = createPQExpBuffer();
3332  BlobInfo *binfo;
3333  DumpableObject *bdata;
3334  PGresult *res;
3335  int ntups;
3336  int i;
3337  int i_oid;
3338  int i_lomowner;
3339  int i_lomacl;
3340  int i_rlomacl;
3341  int i_initlomacl;
3342  int i_initrlomacl;
3343 
3344  pg_log_info("reading large objects");
3345 
3346  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
3347  if (fout->remoteVersion >= 90600)
3348  {
3349  PQExpBuffer acl_subquery = createPQExpBuffer();
3350  PQExpBuffer racl_subquery = createPQExpBuffer();
3351  PQExpBuffer init_acl_subquery = createPQExpBuffer();
3352  PQExpBuffer init_racl_subquery = createPQExpBuffer();
3353 
3354  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
3355  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
3356  dopt->binary_upgrade);
3357 
3358  appendPQExpBuffer(blobQry,
3359  "SELECT l.oid, (%s l.lomowner) AS rolname, "
3360  "%s AS lomacl, "
3361  "%s AS rlomacl, "
3362  "%s AS initlomacl, "
3363  "%s AS initrlomacl "
3364  "FROM pg_largeobject_metadata l "
3365  "LEFT JOIN pg_init_privs pip ON "
3366  "(l.oid = pip.objoid "
3367  "AND pip.classoid = 'pg_largeobject'::regclass "
3368  "AND pip.objsubid = 0) ",
3370  acl_subquery->data,
3371  racl_subquery->data,
3372  init_acl_subquery->data,
3373  init_racl_subquery->data);
3374 
3375  destroyPQExpBuffer(acl_subquery);
3376  destroyPQExpBuffer(racl_subquery);
3377  destroyPQExpBuffer(init_acl_subquery);
3378  destroyPQExpBuffer(init_racl_subquery);
3379  }
3380  else if (fout->remoteVersion >= 90000)
3381  appendPQExpBuffer(blobQry,
3382  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
3383  "NULL AS rlomacl, NULL AS initlomacl, "
3384  "NULL AS initrlomacl "
3385  " FROM pg_largeobject_metadata",
3387  else
3388  appendPQExpBufferStr(blobQry,
3389  "SELECT DISTINCT loid AS oid, "
3390  "NULL::name AS rolname, NULL::oid AS lomacl, "
3391  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
3392  "NULL::oid AS initrlomacl "
3393  " FROM pg_largeobject");
3394 
3395  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
3396 
3397  i_oid = PQfnumber(res, "oid");
3398  i_lomowner = PQfnumber(res, "rolname");
3399  i_lomacl = PQfnumber(res, "lomacl");
3400  i_rlomacl = PQfnumber(res, "rlomacl");
3401  i_initlomacl = PQfnumber(res, "initlomacl");
3402  i_initrlomacl = PQfnumber(res, "initrlomacl");
3403 
3404  ntups = PQntuples(res);
3405 
3406  /*
3407  * Each large object has its own BLOB archive entry.
3408  */
3409  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
3410 
3411  for (i = 0; i < ntups; i++)
3412  {
3413  binfo[i].dobj.objType = DO_BLOB;
3414  binfo[i].dobj.catId.tableoid = LargeObjectRelationId;
3415  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3416  AssignDumpId(&binfo[i].dobj);
3417 
3418  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
3419  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
3420  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
3421  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
3422  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
3423  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
3424 
3425  if (PQgetisnull(res, i, i_lomacl) &&
3426  PQgetisnull(res, i, i_rlomacl) &&
3427  PQgetisnull(res, i, i_initlomacl) &&
3428  PQgetisnull(res, i, i_initrlomacl))
3429  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
3430 
3431  /*
3432  * In binary-upgrade mode for blobs, we do *not* dump out the blob
3433  * data, as it will be copied by pg_upgrade, which simply copies the
3434  * pg_largeobject table. We *do* however dump out anything but the
3435  * data, as pg_upgrade copies just pg_largeobject, but not
3436  * pg_largeobject_metadata, after the dump is restored.
3437  */
3438  if (dopt->binary_upgrade)
3439  binfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
3440  }
3441 
3442  /*
3443  * If we have any large objects, a "BLOBS" archive entry is needed. This
3444  * is just a placeholder for sorting; it carries no data now.
3445  */
3446  if (ntups > 0)
3447  {
3448  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
3449  bdata->objType = DO_BLOB_DATA;
3450  bdata->catId = nilCatalogId;
3451  AssignDumpId(bdata);
3452  bdata->name = pg_strdup("BLOBS");
3453  }
3454 
3455  PQclear(res);
3456  destroyPQExpBuffer(blobQry);
3457 }
3458 
3459 /*
3460  * dumpBlob
3461  *
3462  * dump the definition (metadata) of the given large object
3463  */
3464 static void
3465 dumpBlob(Archive *fout, const BlobInfo *binfo)
3466 {
3467  PQExpBuffer cquery = createPQExpBuffer();
3468  PQExpBuffer dquery = createPQExpBuffer();
3469 
3470  appendPQExpBuffer(cquery,
3471  "SELECT pg_catalog.lo_create('%s');\n",
3472  binfo->dobj.name);
3473 
3474  appendPQExpBuffer(dquery,
3475  "SELECT pg_catalog.lo_unlink('%s');\n",
3476  binfo->dobj.name);
3477 
3478  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
3479  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
3480  ARCHIVE_OPTS(.tag = binfo->dobj.name,
3481  .owner = binfo->rolname,
3482  .description = "BLOB",
3483  .section = SECTION_PRE_DATA,
3484  .createStmt = cquery->data,
3485  .dropStmt = dquery->data));
3486 
3487  /* Dump comment if any */
3488  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3489  dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
3490  NULL, binfo->rolname,
3491  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3492 
3493  /* Dump security label if any */
3494  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3495  dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
3496  NULL, binfo->rolname,
3497  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3498 
3499  /* Dump ACL if any */
3500  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3501  dumpACL(fout, binfo->dobj.dumpId, InvalidDumpId, "LARGE OBJECT",
3502  binfo->dobj.name, NULL,
3503  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3504  binfo->initblobacl, binfo->initrblobacl);
3505 
3506  destroyPQExpBuffer(cquery);
3507  destroyPQExpBuffer(dquery);
3508 }
3509 
3510 /*
3511  * dumpBlobs:
3512  * dump the data contents of all large objects
3513  */
3514 static int
3515 dumpBlobs(Archive *fout, const void *arg)
3516 {
3517  const char *blobQry;
3518  const char *blobFetchQry;
3519  PGconn *conn = GetConnection(fout);
3520  PGresult *res;
3521  char buf[LOBBUFSIZE];
3522  int ntups;
3523  int i;
3524  int cnt;
3525 
3526  pg_log_info("saving large objects");
3527 
3528  /*
3529  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3530  * the already-in-memory dumpable objects instead...
3531  */
3532  if (fout->remoteVersion >= 90000)
3533  blobQry =
3534  "DECLARE bloboid CURSOR FOR "
3535  "SELECT oid FROM pg_largeobject_metadata ORDER BY 1";
3536  else
3537  blobQry =
3538  "DECLARE bloboid CURSOR FOR "
3539  "SELECT DISTINCT loid FROM pg_largeobject ORDER BY 1";
3540 
3541  ExecuteSqlStatement(fout, blobQry);
3542 
3543  /* Command to fetch from cursor */
3544  blobFetchQry = "FETCH 1000 IN bloboid";
3545 
3546  do
3547  {
3548  /* Do a fetch */
3549  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3550 
3551  /* Process the tuples, if any */
3552  ntups = PQntuples(res);
3553  for (i = 0; i < ntups; i++)
3554  {
3555  Oid blobOid;
3556  int loFd;
3557 
3558  blobOid = atooid(PQgetvalue(res, i, 0));
3559  /* Open the BLOB */
3560  loFd = lo_open(conn, blobOid, INV_READ);
3561  if (loFd == -1)
3562  fatal("could not open large object %u: %s",
3563  blobOid, PQerrorMessage(conn));
3564 
3565  StartBlob(fout, blobOid);
3566 
3567  /* Now read it in chunks, sending data to archive */
3568  do
3569  {
3570  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3571  if (cnt < 0)
3572  fatal("error reading large object %u: %s",
3573  blobOid, PQerrorMessage(conn));
3574 
3575  WriteData(fout, buf, cnt);
3576  } while (cnt > 0);
3577 
3578  lo_close(conn, loFd);
3579 
3580  EndBlob(fout, blobOid);
3581  }
3582 
3583  PQclear(res);
3584  } while (ntups > 0);
3585 
3586  return 1;
3587 }
3588 
3589 /*
3590  * getPolicies
3591  * get information about policies on a dumpable table.
3592  */
3593 void
3594 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3595 {
3596  PQExpBuffer query;
3597  PGresult *res;
3598  PolicyInfo *polinfo;
3599  int i_oid;
3600  int i_tableoid;
3601  int i_polname;
3602  int i_polcmd;
3603  int i_polpermissive;
3604  int i_polroles;
3605  int i_polqual;
3606  int i_polwithcheck;
3607  int i,
3608  j,
3609  ntups;
3610 
3611  if (fout->remoteVersion < 90500)
3612  return;
3613 
3614  query = createPQExpBuffer();
3615 
3616  for (i = 0; i < numTables; i++)
3617  {
3618  TableInfo *tbinfo = &tblinfo[i];
3619 
3620  /* Ignore row security on tables not to be dumped */
3621  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3622  continue;
3623 
3624  pg_log_info("reading row security enabled for table \"%s.%s\"",
3625  tbinfo->dobj.namespace->dobj.name,
3626  tbinfo->dobj.name);
3627 
3628  /*
3629  * Get row security enabled information for the table. We represent
3630  * RLS being enabled on a table by creating a PolicyInfo object with
3631  * null polname.
3632  */
3633  if (tbinfo->rowsec)
3634  {
3635  /*
3636  * Note: use tableoid 0 so that this object won't be mistaken for
3637  * something that pg_depend entries apply to.
3638  */
3639  polinfo = pg_malloc(sizeof(PolicyInfo));
3640  polinfo->dobj.objType = DO_POLICY;
3641  polinfo->dobj.catId.tableoid = 0;
3642  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3643  AssignDumpId(&polinfo->dobj);
3644  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3645  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3646  polinfo->poltable = tbinfo;
3647  polinfo->polname = NULL;
3648  polinfo->polcmd = '\0';
3649  polinfo->polpermissive = 0;
3650  polinfo->polroles = NULL;
3651  polinfo->polqual = NULL;
3652  polinfo->polwithcheck = NULL;
3653  }
3654 
3655  pg_log_info("reading policies for table \"%s.%s\"",
3656  tbinfo->dobj.namespace->dobj.name,
3657  tbinfo->dobj.name);
3658 
3659  resetPQExpBuffer(query);
3660 
3661  /* Get the policies for the table. */
3662  if (fout->remoteVersion >= 100000)
3663  appendPQExpBuffer(query,
3664  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3665  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3666  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3667  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3668  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3669  "FROM pg_catalog.pg_policy pol "
3670  "WHERE polrelid = '%u'",
3671  tbinfo->dobj.catId.oid);
3672  else
3673  appendPQExpBuffer(query,
3674  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3675  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3676  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3677  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3678  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3679  "FROM pg_catalog.pg_policy pol "
3680  "WHERE polrelid = '%u'",
3681  tbinfo->dobj.catId.oid);
3682  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3683 
3684  ntups = PQntuples(res);
3685 
3686  if (ntups == 0)
3687  {
3688  /*
3689  * No explicit policies to handle (only the default-deny policy,
3690  * which is handled as part of the table definition). Clean up
3691  * and return.
3692  */
3693  PQclear(res);
3694  continue;
3695  }
3696 
3697  i_oid = PQfnumber(res, "oid");
3698  i_tableoid = PQfnumber(res, "tableoid");
3699  i_polname = PQfnumber(res, "polname");
3700  i_polcmd = PQfnumber(res, "polcmd");
3701  i_polpermissive = PQfnumber(res, "polpermissive");
3702  i_polroles = PQfnumber(res, "polroles");
3703  i_polqual = PQfnumber(res, "polqual");
3704  i_polwithcheck = PQfnumber(res, "polwithcheck");
3705 
3706  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3707 
3708  for (j = 0; j < ntups; j++)
3709  {
3710  polinfo[j].dobj.objType = DO_POLICY;
3711  polinfo[j].dobj.catId.tableoid =
3712  atooid(PQgetvalue(res, j, i_tableoid));
3713  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3714  AssignDumpId(&polinfo[j].dobj);
3715  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3716  polinfo[j].poltable = tbinfo;
3717  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3718  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3719 
3720  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3721  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3722 
3723  if (PQgetisnull(res, j, i_polroles))
3724  polinfo[j].polroles = NULL;
3725  else
3726  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3727 
3728  if (PQgetisnull(res, j, i_polqual))
3729  polinfo[j].polqual = NULL;
3730  else
3731  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3732 
3733  if (PQgetisnull(res, j, i_polwithcheck))
3734  polinfo[j].polwithcheck = NULL;
3735  else
3736  polinfo[j].polwithcheck
3737  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3738  }
3739  PQclear(res);
3740  }
3741  destroyPQExpBuffer(query);
3742 }
3743 
3744 /*
3745  * dumpPolicy
3746  * dump the definition of the given policy
3747  */
3748 static void
3749 dumpPolicy(Archive *fout, const PolicyInfo *polinfo)
3750 {
3751  DumpOptions *dopt = fout->dopt;
3752  TableInfo *tbinfo = polinfo->poltable;
3753  PQExpBuffer query;
3754  PQExpBuffer delqry;
3755  PQExpBuffer polprefix;
3756  char *qtabname;
3757  const char *cmd;
3758  char *tag;
3759 
3760  if (dopt->dataOnly)
3761  return;
3762 
3763  /*
3764  * If polname is NULL, then this record is just indicating that ROW LEVEL
3765  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3766  * ROW LEVEL SECURITY.
3767  */
3768  if (polinfo->polname == NULL)
3769  {
3770  query = createPQExpBuffer();
3771 
3772  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3773  fmtQualifiedDumpable(tbinfo));
3774 
3775  /*
3776  * We must emit the ROW SECURITY object's dependency on its table
3777  * explicitly, because it will not match anything in pg_depend (unlike
3778  * the case for other PolicyInfo objects).
3779  */
3780  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3781  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3782  ARCHIVE_OPTS(.tag = polinfo->dobj.name,
3783  .namespace = polinfo->dobj.namespace->dobj.name,
3784  .owner = tbinfo->rolname,
3785  .description = "ROW SECURITY",
3786  .section = SECTION_POST_DATA,
3787  .createStmt = query->data,
3788  .deps = &(tbinfo->dobj.dumpId),
3789  .nDeps = 1));
3790 
3791  destroyPQExpBuffer(query);
3792  return;
3793  }
3794 
3795  if (polinfo->polcmd == '*')
3796  cmd = "";
3797  else if (polinfo->polcmd == 'r')
3798  cmd = " FOR SELECT";
3799  else if (polinfo->polcmd == 'a')
3800  cmd = " FOR INSERT";
3801  else if (polinfo->polcmd == 'w')
3802  cmd = " FOR UPDATE";
3803  else if (polinfo->polcmd == 'd')
3804  cmd = " FOR DELETE";
3805  else
3806  {
3807  pg_log_error("unexpected policy command type: %c",
3808  polinfo->polcmd);
3809  exit_nicely(1);
3810  }
3811 
3812  query = createPQExpBuffer();
3813  delqry = createPQExpBuffer();
3814  polprefix = createPQExpBuffer();
3815 
3816  qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
3817 
3818  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3819 
3820  appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
3821  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3822 
3823  if (polinfo->polroles != NULL)
3824  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3825 
3826  if (polinfo->polqual != NULL)
3827  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3828 
3829  if (polinfo->polwithcheck != NULL)
3830  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3831 
3832  appendPQExpBufferStr(query, ";\n");
3833 
3834  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3835  appendPQExpBuffer(delqry, " ON %s;\n", fmtQualifiedDumpable(tbinfo));
3836 
3837  appendPQExpBuffer(polprefix, "POLICY %s ON",
3838  fmtId(polinfo->polname));
3839 
3840  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3841 
3842  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3843  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3844  ARCHIVE_OPTS(.tag = tag,
3845  .namespace = polinfo->dobj.namespace->dobj.name,
3846  .owner = tbinfo->rolname,
3847  .description = "POLICY",
3848  .section = SECTION_POST_DATA,
3849  .createStmt = query->data,
3850  .dropStmt = delqry->data));
3851 
3852  if (polinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3853  dumpComment(fout, polprefix->data, qtabname,
3854  tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
3855  polinfo->dobj.catId, 0, polinfo->dobj.dumpId);
3856 
3857  free(tag);
3858  destroyPQExpBuffer(query);
3859  destroyPQExpBuffer(delqry);
3860  destroyPQExpBuffer(polprefix);
3861  free(qtabname);
3862 }
3863 
3864 /*
3865  * getPublications
3866  * get information about publications
3867  */
3870 {
3871  DumpOptions *dopt = fout->dopt;
3872  PQExpBuffer query;
3873  PGresult *res;
3874  PublicationInfo *pubinfo;
3875  int i_tableoid;
3876  int i_oid;
3877  int i_pubname;
3878  int i_rolname;
3879  int i_puballtables;
3880  int i_pubinsert;
3881  int i_pubupdate;
3882  int i_pubdelete;
3883  int i_pubtruncate;
3884  int i_pubviaroot;
3885  int i,
3886  ntups;
3887 
3888  if (dopt->no_publications || fout->remoteVersion < 100000)
3889  {
3890  *numPublications = 0;
3891  return NULL;
3892  }
3893 
3894  query = createPQExpBuffer();
3895 
3896  resetPQExpBuffer(query);
3897 
3898  /* Get the publications. */
3899  if (fout->remoteVersion >= 130000)
3900  appendPQExpBuffer(query,
3901  "SELECT p.tableoid, p.oid, p.pubname, "
3902  "(%s p.pubowner) AS rolname, "
3903  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, p.pubviaroot "
3904  "FROM pg_publication p",
3906  else if (fout->remoteVersion >= 110000)
3907  appendPQExpBuffer(query,
3908  "SELECT p.tableoid, p.oid, p.pubname, "
3909  "(%s p.pubowner) AS rolname, "
3910  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, false AS pubviaroot "
3911  "FROM pg_publication p",
3913  else
3914  appendPQExpBuffer(query,
3915  "SELECT p.tableoid, p.oid, p.pubname, "
3916  "(%s p.pubowner) AS rolname, "
3917  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, false AS pubtruncate, false AS pubviaroot "
3918  "FROM pg_publication p",
3920 
3921  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3922 
3923  ntups = PQntuples(res);
3924 
3925  i_tableoid = PQfnumber(res, "tableoid");
3926  i_oid = PQfnumber(res, "oid");
3927  i_pubname = PQfnumber(res, "pubname");
3928  i_rolname = PQfnumber(res, "rolname");
3929  i_puballtables = PQfnumber(res, "puballtables");
3930  i_pubinsert = PQfnumber(res, "pubinsert");
3931  i_pubupdate = PQfnumber(res, "pubupdate");
3932  i_pubdelete = PQfnumber(res, "pubdelete");
3933  i_pubtruncate = PQfnumber(res, "pubtruncate");
3934  i_pubviaroot = PQfnumber(res, "pubviaroot");
3935 
3936  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3937 
3938  for (i = 0; i < ntups; i++)
3939  {
3940  pubinfo[i].dobj.objType = DO_PUBLICATION;
3941  pubinfo[i].dobj.catId.tableoid =
3942  atooid(PQgetvalue(res, i, i_tableoid));
3943  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3944  AssignDumpId(&pubinfo[i].dobj);
3945  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3946  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3947  pubinfo[i].puballtables =
3948  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3949  pubinfo[i].pubinsert =
3950  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3951  pubinfo[i].pubupdate =
3952  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3953  pubinfo[i].pubdelete =
3954  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3955  pubinfo[i].pubtruncate =
3956  (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
3957  pubinfo[i].pubviaroot =
3958  (strcmp(PQgetvalue(res, i, i_pubviaroot), "t") == 0);
3959 
3960  if (strlen(pubinfo[i].rolname) == 0)
3961  pg_log_warning("owner of publication \"%s\" appears to be invalid",
3962  pubinfo[i].dobj.name);
3963 
3964  /* Decide whether we want to dump it */
3965  selectDumpableObject(&(pubinfo[i].dobj), fout);
3966  }
3967  PQclear(res);
3968 
3969  destroyPQExpBuffer(query);
3970 
3971  *numPublications = ntups;
3972  return pubinfo;
3973 }
3974 
3975 /*
3976  * dumpPublication
3977  * dump the definition of the given publication
3978  */
3979 static void
3981 {
3982  PQExpBuffer delq;
3983  PQExpBuffer query;
3984  char *qpubname;
3985  bool first = true;
3986 
3987  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3988  return;
3989 
3990  delq = createPQExpBuffer();
3991  query = createPQExpBuffer();
3992 
3993  qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
3994 
3995  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3996  qpubname);
3997 
3998  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3999  qpubname);
4000 
4001  if (pubinfo->puballtables)
4002  appendPQExpBufferStr(query, " FOR ALL TABLES");
4003 
4004  appendPQExpBufferStr(query, " WITH (publish = '");
4005  if (pubinfo->pubinsert)
4006  {
4007  appendPQExpBufferStr(query, "insert");
4008  first = false;
4009  }
4010 
4011  if (pubinfo->pubupdate)
4012  {
4013  if (!first)
4014  appendPQExpBufferStr(query, ", ");
4015 
4016  appendPQExpBufferStr(query, "update");
4017  first = false;
4018  }
4019 
4020  if (pubinfo->pubdelete)
4021  {
4022  if (!first)
4023  appendPQExpBufferStr(query, ", ");
4024 
4025  appendPQExpBufferStr(query, "delete");
4026  first = false;
4027  }
4028 
4029  if (pubinfo->pubtruncate)
4030  {
4031  if (!first)
4032  appendPQExpBufferStr(query, ", ");
4033 
4034  appendPQExpBufferStr(query, "truncate");
4035  first = false;
4036  }
4037 
4038  appendPQExpBufferStr(query, "'");
4039 
4040  if (pubinfo->pubviaroot)
4041  appendPQExpBufferStr(query, ", publish_via_partition_root = true");
4042 
4043  appendPQExpBufferStr(query, ");\n");
4044 
4045  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
4046  ARCHIVE_OPTS(.tag = pubinfo->dobj.name,
4047  .owner = pubinfo->rolname,
4048  .description = "PUBLICATION",
4049  .section = SECTION_POST_DATA,
4050  .createStmt = query->data,
4051  .dropStmt = delq->data));
4052 
4053  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4054  dumpComment(fout, "PUBLICATION", qpubname,
4055  NULL, pubinfo->rolname,
4056  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4057 
4058  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4059  dumpSecLabel(fout, "PUBLICATION", qpubname,
4060  NULL, pubinfo->rolname,
4061  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4062 
4063  destroyPQExpBuffer(delq);
4064  destroyPQExpBuffer(query);
4065  free(qpubname);
4066 }
4067 
4068 /*
4069  * getPublicationTables
4070  * get information about publication membership for dumpable tables.
4071  */
4072 void
4074 {
4075  PQExpBuffer query;
4076  PGresult *res;
4077  PublicationRelInfo *pubrinfo;
4078  DumpOptions *dopt = fout->dopt;
4079  int i_tableoid;
4080  int i_oid;
4081  int i_prpubid;
4082  int i_prrelid;
4083  int i,
4084  j,
4085  ntups;
4086 
4087  if (dopt->no_publications || fout->remoteVersion < 100000)
4088  return;
4089 
4090  query = createPQExpBuffer();
4091 
4092  /* Collect all publication membership info. */
4093  appendPQExpBufferStr(query,
4094  "SELECT tableoid, oid, prpubid, prrelid "
4095  "FROM pg_catalog.pg_publication_rel");
4096  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4097 
4098  ntups = PQntuples(res);
4099 
4100  i_tableoid = PQfnumber(res, "tableoid");
4101  i_oid = PQfnumber(res, "oid");
4102  i_prpubid = PQfnumber(res, "prpubid");
4103  i_prrelid = PQfnumber(res, "prrelid");
4104 
4105  /* this allocation may be more than we need */
4106  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
4107  j = 0;
4108 
4109  for (i = 0; i < ntups; i++)
4110  {
4111  Oid prpubid = atooid(PQgetvalue(res, i, i_prpubid));
4112  Oid prrelid = atooid(PQgetvalue(res, i, i_prrelid));
4113  PublicationInfo *pubinfo;
4114  TableInfo *tbinfo;
4115 
4116  /*
4117  * Ignore any entries for which we aren't interested in either the
4118  * publication or the rel.
4119  */
4120  pubinfo = findPublicationByOid(prpubid);
4121  if (pubinfo == NULL)
4122  continue;
4123  tbinfo = findTableByOid(prrelid);
4124  if (tbinfo == NULL)
4125  continue;
4126 
4127  /*
4128  * Ignore publication membership of tables whose definitions are not
4129  * to be dumped.
4130  */
4131  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4132  continue;
4133 
4134  /* OK, make a DumpableObject for this relationship */
4135  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
4136  pubrinfo[j].dobj.catId.tableoid =
4137  atooid(PQgetvalue(res, i, i_tableoid));
4138  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4139  AssignDumpId(&pubrinfo[j].dobj);
4140  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4141  pubrinfo[j].dobj.name = tbinfo->dobj.name;
4142  pubrinfo[j].publication = pubinfo;
4143  pubrinfo[j].pubtable = tbinfo;
4144 
4145  /* Decide whether we want to dump it */
4146  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
4147 
4148  j++;
4149  }
4150 
4151  PQclear(res);
4152  destroyPQExpBuffer(query);
4153 }
4154 
4155 /*
4156  * dumpPublicationTable
4157  * dump the definition of the given publication table mapping
4158  */
4159 static void
4161 {
4162  PublicationInfo *pubinfo = pubrinfo->publication;
4163  TableInfo *tbinfo = pubrinfo->pubtable;
4164  PQExpBuffer query;
4165  char *tag;
4166 
4167  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4168  return;
4169 
4170  tag = psprintf("%s %s", pubinfo->dobj.name, tbinfo->dobj.name);
4171 
4172  query = createPQExpBuffer();
4173 
4174  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
4175  fmtId(pubinfo->dobj.name));
4176  appendPQExpBuffer(query, " %s;\n",
4177  fmtQualifiedDumpable(tbinfo));
4178 
4179  /*
4180  * There is no point in creating a drop query as the drop is done by table
4181  * drop. (If you think to change this, see also _printTocEntry().)
4182  * Although this object doesn't really have ownership as such, set the
4183  * owner field anyway to ensure that the command is run by the correct
4184  * role at restore time.
4185  */
4186  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
4187  ARCHIVE_OPTS(.tag = tag,
4188  .namespace = tbinfo->dobj.namespace->dobj.name,
4189  .owner = pubinfo->rolname,
4190  .description = "PUBLICATION TABLE",
4191  .section = SECTION_POST_DATA,
4192  .createStmt = query->data));
4193 
4194  free(tag);
4195  destroyPQExpBuffer(query);
4196 }
4197 
4198 /*
4199  * Is the currently connected user a superuser?
4200  */
4201 static bool
4203 {
4204  ArchiveHandle *AH = (ArchiveHandle *) fout;
4205  const char *val;
4206 
4207  val = PQparameterStatus(AH->connection, "is_superuser");
4208 
4209  if (val && strcmp(val, "on") == 0)
4210  return true;
4211 
4212  return false;
4213 }
4214 
4215 /*
4216  * getSubscriptions
4217  * get information about subscriptions
4218  */
4219 void
4221 {
4222  DumpOptions *dopt = fout->dopt;
4223  PQExpBuffer query;
4224  PGresult *res;
4225  SubscriptionInfo *subinfo;
4226  int i_tableoid;
4227  int i_oid;
4228  int i_subname;
4229  int i_rolname;
4230  int i_substream;
4231  int i_subconninfo;
4232  int i_subslotname;
4233  int i_subsynccommit;
4234  int i_subpublications;
4235  int i_subbinary;
4236  int i,
4237  ntups;
4238 
4239  if (dopt->no_subscriptions || fout->remoteVersion < 100000)
4240  return;
4241 
4242  if (!is_superuser(fout))
4243  {
4244  int n;
4245 
4246  res = ExecuteSqlQuery(fout,
4247  "SELECT count(*) FROM pg_subscription "
4248  "WHERE subdbid = (SELECT oid FROM pg_database"
4249  " WHERE datname = current_database())",
4250  PGRES_TUPLES_OK);
4251  n = atoi(PQgetvalue(res, 0, 0));
4252  if (n > 0)
4253  pg_log_warning("subscriptions not dumped because current user is not a superuser");
4254  PQclear(res);
4255  return;
4256  }
4257 
4258  query = createPQExpBuffer();
4259 
4260  /* Get the subscriptions in current database. */
4261  appendPQExpBuffer(query,
4262  "SELECT s.tableoid, s.oid, s.subname,\n"
4263  " (%s s.subowner) AS rolname,\n"
4264  " s.subconninfo, s.subslotname, s.subsynccommit,\n"
4265  " s.subpublications,\n",
4267 
4268  if (fout->remoteVersion >= 140000)
4269  appendPQExpBufferStr(query, " s.subbinary,\n");
4270  else
4271  appendPQExpBufferStr(query, " false AS subbinary,\n");
4272 
4273  if (fout->remoteVersion >= 140000)
4274  appendPQExpBufferStr(query, " s.substream\n");
4275  else
4276  appendPQExpBufferStr(query, " false AS substream\n");
4277 
4278  appendPQExpBufferStr(query,
4279  "FROM pg_subscription s\n"
4280  "WHERE s.subdbid = (SELECT oid FROM pg_database\n"
4281  " WHERE datname = current_database())");
4282 
4283  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4284 
4285  ntups = PQntuples(res);
4286 
4287  i_tableoid = PQfnumber(res, "tableoid");
4288  i_oid = PQfnumber(res, "oid");
4289  i_subname = PQfnumber(res, "subname");
4290  i_rolname = PQfnumber(res, "rolname");
4291  i_subconninfo = PQfnumber(res, "subconninfo");
4292  i_subslotname = PQfnumber(res, "subslotname");
4293  i_subsynccommit = PQfnumber(res, "subsynccommit");
4294  i_subpublications = PQfnumber(res, "subpublications");
4295  i_subbinary = PQfnumber(res, "subbinary");
4296  i_substream = PQfnumber(res, "substream");
4297 
4298  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
4299 
4300  for (i = 0; i < ntups; i++)
4301  {
4302  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
4303  subinfo[i].dobj.catId.tableoid =
4304  atooid(PQgetvalue(res, i, i_tableoid));
4305  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4306  AssignDumpId(&subinfo[i].dobj);
4307  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
4308  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4309  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
4310  if (PQgetisnull(res, i, i_subslotname))
4311  subinfo[i].subslotname = NULL;
4312  else
4313  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
4314  subinfo[i].subsynccommit =
4315  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
4316  subinfo[i].subpublications =
4317  pg_strdup(PQgetvalue(res, i, i_subpublications));
4318  subinfo[i].subbinary =
4319  pg_strdup(PQgetvalue(res, i, i_subbinary));
4320  subinfo[i].substream =
4321  pg_strdup(PQgetvalue(res, i, i_substream));
4322 
4323  if (strlen(subinfo[i].rolname) == 0)
4324  pg_log_warning("owner of subscription \"%s\" appears to be invalid",
4325  subinfo[i].dobj.name);
4326 
4327  /* Decide whether we want to dump it */
4328  selectDumpableObject(&(subinfo[i].dobj), fout);
4329  }
4330  PQclear(res);
4331 
4332  destroyPQExpBuffer(query);
4333 }
4334 
4335 /*
4336  * dumpSubscription
4337  * dump the definition of the given subscription
4338  */
4339 static void
4341 {
4342  PQExpBuffer delq;
4343  PQExpBuffer query;
4344  PQExpBuffer publications;
4345  char *qsubname;
4346  char **pubnames = NULL;
4347  int npubnames = 0;
4348  int i;
4349 
4350  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4351  return;
4352 
4353  delq = createPQExpBuffer();
4354  query = createPQExpBuffer();
4355 
4356  qsubname = pg_strdup(fmtId(subinfo->dobj.name));
4357 
4358  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
4359  qsubname);
4360 
4361  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
4362  qsubname);
4363  appendStringLiteralAH(query, subinfo->subconninfo, fout);
4364 
4365  /* Build list of quoted publications and append them to query. */
4366  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
4367  fatal("could not parse subpublications array");
4368 
4369  publications = createPQExpBuffer();
4370  for (i = 0; i < npubnames; i++)
4371  {
4372  if (i > 0)
4373  appendPQExpBufferStr(publications, ", ");
4374 
4375  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
4376  }
4377 
4378  appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
4379  if (subinfo->subslotname)
4380  appendStringLiteralAH(query, subinfo->subslotname, fout);
4381  else
4382  appendPQExpBufferStr(query, "NONE");
4383 
4384  if (strcmp(subinfo->subbinary, "t") == 0)
4385  appendPQExpBufferStr(query, ", binary = true");
4386 
4387  if (strcmp(subinfo->substream, "f") != 0)
4388  appendPQExpBufferStr(query, ", streaming = on");
4389 
4390  if (strcmp(subinfo->subsynccommit, "off") != 0)
4391  appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
4392 
4393  appendPQExpBufferStr(query, ");\n");
4394 
4395  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
4396  ARCHIVE_OPTS(.tag = subinfo->dobj.name,
4397  .owner = subinfo->rolname,
4398  .description = "SUBSCRIPTION",
4399  .section = SECTION_POST_DATA,
4400  .createStmt = query->data,
4401  .dropStmt = delq->data));
4402 
4403  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4404  dumpComment(fout, "SUBSCRIPTION", qsubname,
4405  NULL, subinfo->rolname,
4406  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4407 
4408  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4409  dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
4410  NULL, subinfo->rolname,
4411  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4412 
4413  destroyPQExpBuffer(publications);
4414  if (pubnames)
4415  free(pubnames);
4416 
4417  destroyPQExpBuffer(delq);
4418  destroyPQExpBuffer(query);
4419  free(qsubname);
4420 }
4421 
4422 /*
4423  * Given a "create query", append as many ALTER ... DEPENDS ON EXTENSION as
4424  * the object needs.
4425  */
4426 static void
4428  PQExpBuffer create,
4429  const DumpableObject *dobj,
4430  const char *catalog,
4431  const char *keyword,
4432  const char *objname)
4433 {
4434  if (dobj->depends_on_ext)
4435  {
4436  char *nm;
4437  PGresult *res;
4438  PQExpBuffer query;
4439  int ntups;
4440  int i_extname;
4441  int i;
4442 
4443  /* dodge fmtId() non-reentrancy */
4444  nm = pg_strdup(objname);
4445 
4446  query = createPQExpBuffer();
4447  appendPQExpBuffer(query,
4448  "SELECT e.extname "
4449  "FROM pg_catalog.pg_depend d, pg_catalog.pg_extension e "
4450  "WHERE d.refobjid = e.oid AND classid = '%s'::pg_catalog.regclass "
4451  "AND objid = '%u'::pg_catalog.oid AND deptype = 'x' "
4452  "AND refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass",
4453  catalog,
4454  dobj->catId.oid);
4455  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4456  ntups = PQntuples(res);
4457  i_extname = PQfnumber(res, "extname");
4458  for (i = 0; i < ntups; i++)
4459  {
4460  appendPQExpBuffer(create, "ALTER %s %s DEPENDS ON EXTENSION %s;\n",
4461  keyword, nm,
4462  fmtId(PQgetvalue(res, i, i_extname)));
4463  }
4464 
4465  PQclear(res);
4466  destroyPQExpBuffer(query);
4467  pg_free(nm);
4468  }
4469 }
4470 
4471 static Oid
4473 {
4474  /*
4475  * If the old version didn't assign an array type, but the new version
4476  * does, we must select an unused type OID to assign. This currently only
4477  * happens for domains, when upgrading pre-v11 to v11 and up.
4478  *
4479  * Note: local state here is kind of ugly, but we must have some, since we
4480  * mustn't choose the same unused OID more than once.
4481  */
4482  static Oid next_possible_free_oid = FirstNormalObjectId;
4483  PGresult *res;
4484  bool is_dup;
4485 
4486  do
4487  {
4488  ++next_possible_free_oid;
4489  printfPQExpBuffer(upgrade_query,
4490  "SELECT EXISTS(SELECT 1 "
4491  "FROM pg_catalog.pg_type "
4492  "WHERE oid = '%u'::pg_catalog.oid);",
4493  next_possible_free_oid);
4494  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4495  is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
4496  PQclear(res);
4497  } while (is_dup);
4498 
4499  return next_possible_free_oid;
4500 }
4501 
4502 static void
4504  PQExpBuffer upgrade_buffer,
4505  Oid pg_type_oid,
4506  bool force_array_type,
4507  bool include_multirange_type)
4508 {
4509  PQExpBuffer upgrade_query = createPQExpBuffer();
4510  PGresult *res;
4511  Oid pg_type_array_oid;
4512  Oid pg_type_multirange_oid;
4513  Oid pg_type_multirange_array_oid;
4514 
4515  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
4516  appendPQExpBuffer(upgrade_buffer,
4517  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4518  pg_type_oid);
4519 
4520  /* we only support old >= 8.3 for binary upgrades */
4521  appendPQExpBuffer(upgrade_query,
4522  "SELECT typarray "
4523  "FROM pg_catalog.pg_type "
4524  "WHERE oid = '%u'::pg_catalog.oid;",
4525  pg_type_oid);
4526 
4527  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4528 
4529  pg_type_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
4530 
4531  PQclear(res);
4532 
4533  if (!OidIsValid(pg_type_array_oid) && force_array_type)
4534  pg_type_array_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
4535 
4536  if (OidIsValid(pg_type_array_oid))
4537  {
4538  appendPQExpBufferStr(upgrade_buffer,
4539  "\n-- For binary upgrade, must preserve pg_type array oid\n");
4540  appendPQExpBuffer(upgrade_buffer,
4541  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4542  pg_type_array_oid);
4543  }
4544 
4545  /*
4546  * Pre-set the multirange type oid and its own array type oid.
4547  */
4548  if (include_multirange_type)
4549  {
4550  if (fout->remoteVersion >= 140000)
4551  {
4552  appendPQExpBuffer(upgrade_query,
4553  "SELECT t.oid, t.typarray "
4554  "FROM pg_catalog.pg_type t "
4555  "JOIN pg_catalog.pg_range r "
4556  "ON t.oid = r.rngmultitypid "
4557  "WHERE r.rngtypid = '%u'::pg_catalog.oid;",
4558  pg_type_oid);
4559 
4560  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4561 
4562  pg_type_multirange_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "oid")));
4563  pg_type_multirange_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
4564 
4565  PQclear(res);
4566  }
4567  else
4568  {
4569  pg_type_multirange_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
4570  pg_type_multirange_array_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
4571  }
4572 
4573  appendPQExpBufferStr(upgrade_buffer,
4574  "\n-- For binary upgrade, must preserve multirange pg_type oid\n");
4575  appendPQExpBuffer(upgrade_buffer,
4576  "SELECT pg_catalog.binary_upgrade_set_next_multirange_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4577  pg_type_multirange_oid);
4578  appendPQExpBufferStr(upgrade_buffer,
4579  "\n-- For binary upgrade, must preserve multirange pg_type array oid\n");
4580  appendPQExpBuffer(upgrade_buffer,
4581  "SELECT pg_catalog.binary_upgrade_set_next_multirange_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4582  pg_type_multirange_array_oid);
4583  }
4584 
4585  destroyPQExpBuffer(upgrade_query);
4586 }
4587 
4588 static void
4590  PQExpBuffer upgrade_buffer,
4591  Oid pg_rel_oid)
4592 {
4593  PQExpBuffer upgrade_query = createPQExpBuffer();
4594  PGresult *upgrade_res;
4595  Oid pg_type_oid;
4596 
4597  appendPQExpBuffer(upgrade_query,
4598  "SELECT c.reltype AS crel "
4599  "FROM pg_catalog.pg_class c "
4600  "WHERE c.oid = '%u'::pg_catalog.oid;",
4601  pg_rel_oid);
4602 
4603  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4604 
4605  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
4606 
4607  if (OidIsValid(pg_type_oid))
4608  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
4609  pg_type_oid, false, false);
4610 
4611  PQclear(upgrade_res);
4612  destroyPQExpBuffer(upgrade_query);
4613 }
4614 
4615 static void
4617  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
4618  bool is_index)
4619 {
4620  appendPQExpBufferStr(upgrade_buffer,
4621  "\n-- For binary upgrade, must preserve pg_class oids\n");
4622 
4623  if (!is_index)
4624  {
4625  PQExpBuffer upgrade_query = createPQExpBuffer();
4626  PGresult *upgrade_res;
4627  Oid pg_class_reltoastrelid;
4628  char pg_class_relkind;
4629  Oid pg_index_indexrelid;
4630 
4631  appendPQExpBuffer(upgrade_buffer,
4632  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
4633  pg_class_oid);
4634 
4635  /*
4636  * Preserve the OIDs of the table's toast table and index, if any.
4637  * Indexes cannot have toast tables, so we need not make this probe in
4638  * the index code path.
4639  *
4640  * One complexity is that the current table definition might not
4641  * require the creation of a TOAST table, but the old database might
4642  * have a TOAST table that was created earlier, before some wide
4643  * columns were dropped. By setting the TOAST oid we force creation
4644  * of the TOAST heap and index by the new backend, so we can copy the
4645  * files during binary upgrade without worrying about this case.
4646  */
4647  appendPQExpBuffer(upgrade_query,
4648  "SELECT c.reltoastrelid, c.relkind, i.indexrelid "
4649  "FROM pg_catalog.pg_class c LEFT JOIN "
4650  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
4651  "WHERE c.oid = '%u'::pg_catalog.oid;",
4652  pg_class_oid);
4653 
4654  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4655 
4656  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0,
4657  PQfnumber(upgrade_res, "reltoastrelid")));
4658  pg_class_relkind = *PQgetvalue(upgrade_res, 0,
4659  PQfnumber(upgrade_res, "relkind"));
4660  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0,
4661  PQfnumber(upgrade_res, "indexrelid")));
4662 
4663  /*
4664  * In a pre-v12 database, partitioned tables might be marked as having
4665  * toast tables, but we should ignore them if so.
4666  */
4667  if (OidIsValid(pg_class_reltoastrelid) &&
4668  pg_class_relkind != RELKIND_PARTITIONED_TABLE)
4669  {
4670  appendPQExpBuffer(upgrade_buffer,
4671  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
4672  pg_class_reltoastrelid);
4673 
4674  /* every toast table has an index */
4675  appendPQExpBuffer(upgrade_buffer,
4676  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4677  pg_index_indexrelid);
4678  }
4679 
4680  PQclear(upgrade_res);
4681  destroyPQExpBuffer(upgrade_query);
4682  }
4683  else
4684  appendPQExpBuffer(upgrade_buffer,
4685  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4686  pg_class_oid);
4687 
4688  appendPQExpBufferChar(upgrade_buffer, '\n');
4689 }
4690 
4691 /*
4692  * If the DumpableObject is a member of an extension, add a suitable
4693  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4694  *
4695  * For somewhat historical reasons, objname should already be quoted,
4696  * but not objnamespace (if any).
4697  */
4698 static void
4700  const DumpableObject *dobj,
4701  const char *objtype,
4702  const char *objname,
4703  const char *objnamespace)
4704 {
4705  DumpableObject *extobj = NULL;
4706  int i;
4707 
4708  if (!dobj->ext_member)
4709  return;
4710 
4711  /*
4712  * Find the parent extension. We could avoid this search if we wanted to
4713  * add a link field to DumpableObject, but the space costs of that would
4714  * be considerable. We assume that member objects could only have a
4715  * direct dependency on their own extension, not any others.
4716  */
4717  for (i = 0; i < dobj->nDeps; i++)
4718  {
4719  extobj = findObjectByDumpId(dobj->dependencies[i]);
4720  if (extobj && extobj->objType == DO_EXTENSION)
4721  break;
4722  extobj = NULL;
4723  }
4724  if (extobj == NULL)
4725  fatal("could not find parent extension for %s %s",
4726  objtype, objname);
4727 
4728  appendPQExpBufferStr(upgrade_buffer,
4729  "\n-- For binary upgrade, handle extension membership the hard way\n");
4730  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
4731  fmtId(extobj->name),
4732  objtype);
4733  if (objnamespace && *objnamespace)
4734  appendPQExpBuffer(upgrade_buffer, "%s.", fmtId(objnamespace));
4735  appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
4736 }
4737 
4738 /*
4739  * getNamespaces:
4740  * read all namespaces in the system catalogs and return them in the
4741  * NamespaceInfo* structure
4742  *
4743  * numNamespaces is set to the number of namespaces read in
4744  */
4745 NamespaceInfo *
4747 {
4748  DumpOptions *dopt = fout->dopt;
4749  PGresult *res;
4750  int ntups;
4751  int i;
4752  PQExpBuffer query;
4753  NamespaceInfo *nsinfo;
4754  int i_tableoid;
4755  int i_oid;
4756  int i_nspname;
4757  int i_rolname;
4758  int i_nspacl;
4759  int i_rnspacl;
4760  int i_initnspacl;
4761  int i_initrnspacl;
4762 
4763  query = createPQExpBuffer();
4764 
4765  /*
4766  * we fetch all namespaces including system ones, so that every object we
4767  * read in can be linked to a containing namespace.
4768  */
4769  if (fout->remoteVersion >= 90600)
4770  {
4771  PQExpBuffer acl_subquery = createPQExpBuffer();
4772  PQExpBuffer racl_subquery = createPQExpBuffer();
4773  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4774  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4775 
4776  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4777  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4778  dopt->binary_upgrade);
4779 
4780  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4781  "(%s nspowner) AS rolname, "
4782  "%s as nspacl, "
4783  "%s as rnspacl, "
4784  "%s as initnspacl, "
4785  "%s as initrnspacl "
4786  "FROM pg_namespace n "
4787  "LEFT JOIN pg_init_privs pip "
4788  "ON (n.oid = pip.objoid "
4789  "AND pip.classoid = 'pg_namespace'::regclass "
4790  "AND pip.objsubid = 0",
4792  acl_subquery->data,
4793  racl_subquery->data,
4794  init_acl_subquery->data,
4795  init_racl_subquery->data);
4796 
4797  appendPQExpBufferStr(query, ") ");
4798 
4799  destroyPQExpBuffer(acl_subquery);
4800  destroyPQExpBuffer(racl_subquery);
4801  destroyPQExpBuffer(init_acl_subquery);
4802  destroyPQExpBuffer(init_racl_subquery);
4803  }
4804  else
4805  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4806  "(%s nspowner) AS rolname, "
4807  "nspacl, NULL as rnspacl, "
4808  "NULL AS initnspacl, NULL as initrnspacl "
4809  "FROM pg_namespace",
4811 
4812  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4813 
4814  ntups = PQntuples(res);
4815 
4816  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4817 
4818  i_tableoid = PQfnumber(res, "tableoid");
4819  i_oid = PQfnumber(res, "oid");
4820  i_nspname = PQfnumber(res, "nspname");
4821  i_rolname = PQfnumber(res, "rolname");
4822  i_nspacl = PQfnumber(res, "nspacl");
4823  i_rnspacl = PQfnumber(res, "rnspacl");
4824  i_initnspacl = PQfnumber(res, "initnspacl");
4825  i_initrnspacl = PQfnumber(res, "initrnspacl");
4826 
4827  for (i = 0; i < ntups; i++)
4828  {
4829  nsinfo[i].dobj.objType = DO_NAMESPACE;
4830  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4831  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4832  AssignDumpId(&nsinfo[i].dobj);
4833  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4834  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4835  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4836  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4837  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4838  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4839 
4840  /* Decide whether to dump this namespace */
4841  selectDumpableNamespace(&nsinfo[i], fout);
4842 
4843  /*
4844  * Do not try to dump ACL if the ACL is empty or the default.
4845  *
4846  * This is useful because, for some schemas/objects, the only
4847  * component we are going to try and dump is the ACL and if we can
4848  * remove that then 'dump' goes to zero/false and we don't consider
4849  * this object for dumping at all later on.
4850  */
4851  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4852  PQgetisnull(res, i, i_initnspacl) &&
4853  PQgetisnull(res, i, i_initrnspacl))
4854  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4855 
4856  if (strlen(nsinfo[i].rolname) == 0)
4857  pg_log_warning("owner of schema \"%s\" appears to be invalid",
4858  nsinfo[i].dobj.name);
4859  }
4860 
4861  PQclear(res);
4862  destroyPQExpBuffer(query);
4863 
4864  *numNamespaces = ntups;
4865 
4866  return nsinfo;
4867 }
4868 
4869 /*
4870  * findNamespace:
4871  * given a namespace OID, look up the info read by getNamespaces
4872  */
4873 static NamespaceInfo *
4875 {
4876  NamespaceInfo *nsinfo;
4877 
4878  nsinfo = findNamespaceByOid(nsoid);
4879  if (nsinfo == NULL)
4880  fatal("schema with OID %u does not exist", nsoid);
4881  return nsinfo;
4882 }
4883 
4884 /*
4885  * getExtensions:
4886  * read all extensions in the system catalogs and return them in the
4887  * ExtensionInfo* structure
4888  *
4889  * numExtensions is set to the number of extensions read in
4890  */
4891 ExtensionInfo *
4893 {
4894  DumpOptions *dopt = fout->dopt;
4895  PGresult *res;
4896  int ntups;
4897  int i;
4898  PQExpBuffer query;
4899  ExtensionInfo *extinfo;
4900  int i_tableoid;
4901  int i_oid;
4902  int i_extname;
4903  int i_nspname;
4904  int i_extrelocatable;
4905  int i_extversion;
4906  int i_extconfig;
4907  int i_extcondition;
4908 
4909  /*
4910  * Before 9.1, there are no extensions.
4911  */
4912  if (fout->remoteVersion < 90100)
4913  {
4914  *numExtensions = 0;
4915  return NULL;
4916  }
4917 
4918  query = createPQExpBuffer();
4919 
4920  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4921  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4922  "FROM pg_extension x "
4923  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4924 
4925  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4926 
4927  ntups = PQntuples(res);
4928 
4929  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4930 
4931  i_tableoid = PQfnumber(res, "tableoid");
4932  i_oid = PQfnumber(res, "oid");
4933  i_extname = PQfnumber(res, "extname");
4934  i_nspname = PQfnumber(res, "nspname");
4935  i_extrelocatable = PQfnumber(res, "extrelocatable");
4936  i_extversion = PQfnumber(res, "extversion");
4937  i_extconfig = PQfnumber(res, "extconfig");
4938  i_extcondition = PQfnumber(res, "extcondition");
4939 
4940  for (i = 0; i < ntups; i++)
4941  {
4942  extinfo[i].dobj.objType = DO_EXTENSION;
4943  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4944  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4945  AssignDumpId(&extinfo[i].dobj);
4946  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4947  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4948  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4949  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4950  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4951  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4952 
4953  /* Decide whether we want to dump it */
4954  selectDumpableExtension(&(extinfo[i]), dopt);
4955  }
4956 
4957  PQclear(res);
4958  destroyPQExpBuffer(query);
4959 
4960  *numExtensions = ntups;
4961 
4962  return extinfo;
4963 }
4964 
4965 /*
4966  * getTypes:
4967  * read all types in the system catalogs and return them in the
4968  * TypeInfo* structure
4969  *
4970  * numTypes is set to the number of types read in
4971  *
4972  * NB: this must run after getFuncs() because we assume we can do
4973  * findFuncByOid().
4974  */
4975 TypeInfo *
4977 {
4978  DumpOptions *dopt = fout->dopt;
4979  PGresult *res;
4980  int ntups;
4981  int i;
4982  PQExpBuffer query = createPQExpBuffer();
4983  TypeInfo *tyinfo;
4984  ShellTypeInfo *stinfo;
4985  int i_tableoid;
4986  int i_oid;
4987  int i_typname;
4988  int i_typnamespace;
4989  int i_typacl;
4990  int i_rtypacl;
4991  int i_inittypacl;
4992  int i_initrtypacl;
4993  int i_rolname;
4994  int i_typelem;
4995  int i_typrelid;
4996  int i_typrelkind;
4997  int i_typtype;
4998  int i_typisdefined;
4999  int i_isarray;
5000 
5001  /*
5002  * we include even the built-in types because those may be used as array
5003  * elements by user-defined types
5004  *
5005  * we filter out the built-in types when we dump out the types
5006  *
5007  * same approach for undefined (shell) types and array types
5008  *
5009  * Note: as of 8.3 we can reliably detect whether a type is an
5010  * auto-generated array type by checking the element type's typarray.
5011  * (Before that the test is capable of generating false positives.) We
5012  * still check for name beginning with '_', though, so as to avoid the
5013  * cost of the subselect probe for all standard types. This would have to
5014  * be revisited if the backend ever allows renaming of array types.
5015  */
5016 
5017  if (fout->remoteVersion >= 90600)
5018  {
5019  PQExpBuffer acl_subquery = createPQExpBuffer();
5020  PQExpBuffer racl_subquery = createPQExpBuffer();
5021  PQExpBuffer initacl_subquery = createPQExpBuffer();
5022  PQExpBuffer initracl_subquery = createPQExpBuffer();
5023 
5024  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
5025  initracl_subquery, "t.typacl", "t.typowner", "'T'",
5026  dopt->binary_upgrade);
5027 
5028  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
5029  "t.typnamespace, "
5030  "%s AS typacl, "
5031  "%s AS rtypacl, "
5032  "%s AS inittypacl, "
5033  "%s AS initrtypacl, "
5034  "(%s t.typowner) AS rolname, "
5035  "t.typelem, t.typrelid, "
5036  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
5037  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
5038  "t.typtype, t.typisdefined, "
5039  "t.typname[0] = '_' AND t.typelem != 0 AND "
5040  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
5041  "FROM pg_type t "
5042  "LEFT JOIN pg_init_privs pip ON "
5043  "(t.oid = pip.objoid "
5044  "AND pip.classoid = 'pg_type'::regclass "
5045  "AND pip.objsubid = 0) ",
5046  acl_subquery->data,
5047  racl_subquery->data,
5048  initacl_subquery->data,
5049  initracl_subquery->data,
5051 
5052  destroyPQExpBuffer(acl_subquery);
5053  destroyPQExpBuffer(racl_subquery);
5054  destroyPQExpBuffer(initacl_subquery);
5055  destroyPQExpBuffer(initracl_subquery);
5056  }
5057  else if (fout->remoteVersion >= 90200)
5058  {
5059  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
5060  "typnamespace, typacl, NULL as rtypacl, "
5061  "NULL AS inittypacl, NULL AS initrtypacl, "
5062  "(%s typowner) AS rolname, "
5063  "typelem, typrelid, "
5064  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
5065  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
5066  "typtype, typisdefined, "
5067  "typname[0] = '_' AND typelem != 0 AND "
5068  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
5069  "FROM pg_type",
5071  }
5072  else if (fout->remoteVersion >= 80300)
5073  {
5074  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
5075  "typnamespace, NULL AS typacl, NULL as rtypacl, "
5076  "NULL AS inittypacl, NULL AS initrtypacl, "
5077  "(%s typowner) AS rolname, "
5078  "typelem, typrelid, "
5079  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
5080  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
5081  "typtype, typisdefined, "
5082  "typname[0] = '_' AND typelem != 0 AND "
5083  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
5084  "FROM pg_type",
5086  }
5087  else
5088  {
5089  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
5090  "typnamespace, NULL AS typacl, NULL as rtypacl, "
5091  "NULL AS inittypacl, NULL AS initrtypacl, "
5092  "(%s typowner) AS rolname, "
5093  "typelem, typrelid, "
5094  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
5095  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
5096  "typtype, typisdefined, "
5097  "typname[0] = '_' AND typelem != 0 AS isarray "
5098  "FROM pg_type",
5100  }
5101 
5102  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5103 
5104  ntups = PQntuples(res);
5105 
5106  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
5107 
5108  i_tableoid = PQfnumber(res, "tableoid");
5109  i_oid = PQfnumber(res, "oid");
5110  i_typname = PQfnumber(res, "typname");
5111  i_typnamespace = PQfnumber(res, "typnamespace");
5112  i_typacl = PQfnumber(res, "typacl");
5113  i_rtypacl = PQfnumber(res, "rtypacl");
5114  i_inittypacl = PQfnumber(res, "inittypacl");
5115  i_initrtypacl = PQfnumber(res, "initrtypacl");
5116  i_rolname = PQfnumber(res, "rolname");
5117  i_typelem = PQfnumber(res, "typelem");
5118  i_typrelid = PQfnumber(res, "typrelid");
5119  i_typrelkind = PQfnumber(res, "typrelkind");
5120  i_typtype = PQfnumber(res, "typtype");
5121  i_typisdefined = PQfnumber(res, "typisdefined");
5122  i_isarray = PQfnumber(res, "isarray");
5123 
5124  for (i = 0; i < ntups; i++)
5125  {
5126  tyinfo[i].dobj.objType = DO_TYPE;
5127  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5128  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5129  AssignDumpId(&tyinfo[i].dobj);
5130  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
5131  tyinfo[i].dobj.namespace =
5132  findNamespace(atooid(PQgetvalue(res, i, i_typnamespace)));
5133  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5134  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
5135  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
5136  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
5137  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
5138  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
5139  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
5140  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
5141  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
5142  tyinfo[i].shellType = NULL;
5143 
5144  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
5145  tyinfo[i].isDefined = true;
5146  else
5147  tyinfo[i].isDefined = false;
5148 
5149  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
5150  tyinfo[i].isArray = true;
5151  else
5152  tyinfo[i].isArray = false;
5153 
5154  if (tyinfo[i].typtype == 'm')
5155  tyinfo[i].isMultirange = true;
5156  else
5157  tyinfo[i].isMultirange = false;
5158 
5159  /* Decide whether we want to dump it */
5160  selectDumpableType(&tyinfo[i], fout);
5161 
5162  /* Do not try to dump ACL if no ACL exists. */
5163  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
5164  PQgetisnull(res, i, i_inittypacl) &&
5165  PQgetisnull(res, i, i_initrtypacl))
5166  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5167 
5168  /*
5169  * If it's a domain, fetch info about its constraints, if any
5170  */
5171  tyinfo[i].nDomChecks = 0;
5172  tyinfo[i].domChecks = NULL;
5173  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
5174  tyinfo[i].typtype == TYPTYPE_DOMAIN)
5175  getDomainConstraints(fout, &(tyinfo[i]));
5176 
5177  /*
5178  * If it's a base type, make a DumpableObject representing a shell
5179  * definition of the type. We will need to dump that ahead of the I/O
5180  * functions for the type. Similarly, range types need a shell
5181  * definition in case they have a canonicalize function.
5182  *
5183  * Note: the shell type doesn't have a catId. You might think it
5184  * should copy the base type's catId, but then it might capture the
5185  * pg_depend entries for the type, which we don't want.
5186  */
5187  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
5188  (tyinfo[i].typtype == TYPTYPE_BASE ||
5189  tyinfo[i].typtype == TYPTYPE_RANGE))
5190  {
5191  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
5192  stinfo->dobj.objType = DO_SHELL_TYPE;
5193  stinfo->dobj.catId = nilCatalogId;
5194  AssignDumpId(&stinfo->dobj);
5195  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
5196  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
5197  stinfo->baseType = &(tyinfo[i]);
5198  tyinfo[i].shellType = stinfo;
5199 
5200  /*
5201  * Initially mark the shell type as not to be dumped. We'll only
5202  * dump it if the I/O or canonicalize functions need to be dumped;
5203  * this is taken care of while sorting dependencies.
5204  */
5205  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
5206  }
5207 
5208  if (strlen(tyinfo[i].rolname) == 0)
5209  pg_log_warning("owner of data type \"%s\" appears to be invalid",
5210  tyinfo[i].dobj.name);
5211  }
5212 
5213  *numTypes = ntups;
5214 
5215  PQclear(res);
5216 
5217  destroyPQExpBuffer(query);
5218 
5219  return tyinfo;
5220 }
5221 
5222 /*
5223  * getOperators:
5224  * read all operators in the system catalogs and return them in the
5225  * OprInfo* structure
5226  *
5227  * numOprs is set to the number of operators read in
5228  */
5229 OprInfo *
5230 getOperators(Archive *fout, int *numOprs)
5231 {
5232  PGresult *res;
5233  int ntups;
5234  int i;
5235  PQExpBuffer query = createPQExpBuffer();
5236  OprInfo *oprinfo;
5237  int i_tableoid;
5238  int i_oid;
5239  int i_oprname;
5240  int i_oprnamespace;
5241  int i_rolname;
5242  int i_oprkind;
5243  int i_oprcode;
5244 
5245  /*
5246  * find all operators, including builtin operators; we filter out
5247  * system-defined operators at dump-out time.
5248  */
5249 
5250  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
5251  "oprnamespace, "
5252  "(%s oprowner) AS rolname, "
5253  "oprkind, "
5254  "oprcode::oid AS oprcode "
5255  "FROM pg_operator",
5257 
5258  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5259 
5260  ntups = PQntuples(res);
5261  *numOprs = ntups;
5262 
5263  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
5264 
5265  i_tableoid = PQfnumber(res, "tableoid");
5266  i_oid = PQfnumber(res, "oid");
5267  i_oprname = PQfnumber(res, "oprname");
5268  i_oprnamespace = PQfnumber(res, "oprnamespace");
5269  i_rolname = PQfnumber(res, "rolname");
5270  i_oprkind = PQfnumber(res, "oprkind");
5271  i_oprcode = PQfnumber(res, "oprcode");
5272 
5273  for (i = 0; i < ntups; i++)
5274  {
5275  oprinfo[i].dobj.objType = DO_OPERATOR;
5276  oprinfo[i].dobj.catId.tableoid =