PostgreSQL Source Code  git master
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #include <limits.h>
37 #ifdef HAVE_TERMIOS_H
38 #include <termios.h>
39 #endif
40 
41 #include "access/attnum.h"
42 #include "access/sysattr.h"
43 #include "access/transam.h"
44 #include "catalog/pg_aggregate_d.h"
45 #include "catalog/pg_am_d.h"
46 #include "catalog/pg_attribute_d.h"
47 #include "catalog/pg_cast_d.h"
48 #include "catalog/pg_class_d.h"
49 #include "catalog/pg_default_acl_d.h"
50 #include "catalog/pg_largeobject_d.h"
51 #include "catalog/pg_largeobject_metadata_d.h"
52 #include "catalog/pg_proc_d.h"
53 #include "catalog/pg_trigger_d.h"
54 #include "catalog/pg_type_d.h"
55 #include "common/connect.h"
56 #include "dumputils.h"
57 #include "fe_utils/string_utils.h"
58 #include "getopt_long.h"
59 #include "libpq/libpq-fs.h"
60 #include "parallel.h"
61 #include "pg_backup_db.h"
62 #include "pg_backup_utils.h"
63 #include "pg_dump.h"
64 #include "storage/block.h"
65 
66 typedef struct
67 {
68  const char *descr; /* comment for an object */
69  Oid classoid; /* object class (catalog OID) */
70  Oid objoid; /* object OID */
71  int objsubid; /* subobject (table column #) */
72 } CommentItem;
73 
74 typedef struct
75 {
76  const char *provider; /* label provider of this security label */
77  const char *label; /* security label for an object */
78  Oid classoid; /* object class (catalog OID) */
79  Oid objoid; /* object OID */
80  int objsubid; /* subobject (table column #) */
81 } SecLabelItem;
82 
83 typedef enum OidOptions
84 {
88 } OidOptions;
89 
90 /* global decls */
91 static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
92 
93 /* subquery used to convert user ID (eg, datdba) to user name */
94 static const char *username_subquery;
95 
96 /*
97  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
98  * FirstNormalObjectId - 1.
99  */
100 static Oid g_last_builtin_oid; /* value of the last builtin oid */
101 
102 /* The specified names/patterns should to match at least one entity */
103 static int strict_names = 0;
104 
105 /*
106  * Object inclusion/exclusion lists
107  *
108  * The string lists record the patterns given by command-line switches,
109  * which we then convert to lists of OIDs of matching objects.
110  */
112 static SimpleOidList schema_include_oids = {NULL, NULL};
114 static SimpleOidList schema_exclude_oids = {NULL, NULL};
115 
117 static SimpleOidList table_include_oids = {NULL, NULL};
119 static SimpleOidList table_exclude_oids = {NULL, NULL};
121 static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
124 
125 static const CatalogId nilCatalogId = {0, 0};
126 
127 /* override for standard extra_float_digits setting */
128 static bool have_extra_float_digits = false;
130 
131 /*
132  * The default number of rows per INSERT when
133  * --inserts is specified without --rows-per-insert
134  */
135 #define DUMP_DEFAULT_ROWS_PER_INSERT 1
136 
137 /*
138  * Macro for producing quoted, schema-qualified name of a dumpable object.
139  */
140 #define fmtQualifiedDumpable(obj) \
141  fmtQualifiedId((obj)->dobj.namespace->dobj.name, \
142  (obj)->dobj.name)
143 
144 static void help(const char *progname);
145 static void setup_connection(Archive *AH,
146  const char *dumpencoding, const char *dumpsnapshot,
147  char *use_role);
149 static void expand_schema_name_patterns(Archive *fout,
150  SimpleStringList *patterns,
151  SimpleOidList *oids,
152  bool strict_names);
154  SimpleStringList *patterns,
155  SimpleOidList *oids);
156 static void expand_table_name_patterns(Archive *fout,
157  SimpleStringList *patterns,
158  SimpleOidList *oids,
159  bool strict_names);
160 static NamespaceInfo *findNamespace(Oid nsoid);
161 static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
162 static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
163 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
164 static void dumpComment(Archive *fout, const char *type, const char *name,
165  const char *namespace, const char *owner,
166  CatalogId catalogId, int subid, DumpId dumpId);
167 static int findComments(Archive *fout, Oid classoid, Oid objoid,
168  CommentItem **items);
169 static int collectComments(Archive *fout, CommentItem **items);
170 static void dumpSecLabel(Archive *fout, const char *type, const char *name,
171  const char *namespace, const char *owner,
172  CatalogId catalogId, int subid, DumpId dumpId);
173 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
174  SecLabelItem **items);
175 static int collectSecLabels(Archive *fout, SecLabelItem **items);
176 static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
177 static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
178 static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
179 static void dumpType(Archive *fout, TypeInfo *tyinfo);
180 static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
181 static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
182 static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
183 static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
184 static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
185 static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
186 static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
187 static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
188 static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
189 static void dumpFunc(Archive *fout, FuncInfo *finfo);
190 static void dumpCast(Archive *fout, CastInfo *cast);
191 static void dumpTransform(Archive *fout, TransformInfo *transform);
192 static void dumpOpr(Archive *fout, OprInfo *oprinfo);
193 static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
194 static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
195 static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
196 static void dumpCollation(Archive *fout, CollInfo *collinfo);
197 static void dumpConversion(Archive *fout, ConvInfo *convinfo);
198 static void dumpRule(Archive *fout, RuleInfo *rinfo);
199 static void dumpAgg(Archive *fout, AggInfo *agginfo);
200 static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
201 static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
202 static void dumpTable(Archive *fout, TableInfo *tbinfo);
203 static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
204 static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
205 static void dumpSequence(Archive *fout, TableInfo *tbinfo);
206 static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
207 static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
208 static void dumpIndexAttach(Archive *fout, IndexAttachInfo *attachinfo);
209 static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
210 static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
211 static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
212 static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
213 static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
214 static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
215 static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
216 static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
217 static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
218 static void dumpUserMappings(Archive *fout,
219  const char *servername, const char *namespace,
220  const char *owner, CatalogId catalogId, DumpId dumpId);
221 static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
222 
223 static DumpId dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
224  const char *type, const char *name, const char *subname,
225  const char *nspname, const char *owner,
226  const char *acls, const char *racls,
227  const char *initacls, const char *initracls);
228 
229 static void getDependencies(Archive *fout);
230 static void BuildArchiveDependencies(Archive *fout);
232  DumpId **dependencies, int *nDeps, int *allocDeps);
233 
235 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
236  DumpableObject *boundaryObjs);
237 
238 static void addConstrChildIdxDeps(DumpableObject *dobj, IndxInfo *refidx);
239 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
240 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
241 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo);
242 static void buildMatViewRefreshDependencies(Archive *fout);
243 static void getTableDataFKConstraints(void);
244 static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
245  bool is_agg);
246 static char *format_function_arguments_old(Archive *fout,
247  FuncInfo *finfo, int nallargs,
248  char **allargtypes,
249  char **argmodes,
250  char **argnames);
251 static char *format_function_signature(Archive *fout,
252  FuncInfo *finfo, bool honor_quotes);
253 static char *convertRegProcReference(const char *proc);
254 static char *getFormattedOperatorName(const char *oproid);
255 static char *convertTSFunction(Archive *fout, Oid funcOid);
256 static Oid findLastBuiltinOid_V71(Archive *fout);
257 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
258 static void getBlobs(Archive *fout);
259 static void dumpBlob(Archive *fout, BlobInfo *binfo);
260 static int dumpBlobs(Archive *fout, void *arg);
261 static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
262 static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
263 static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
264 static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
265 static void dumpDatabase(Archive *AH);
266 static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
267  const char *dbname, Oid dboid);
268 static void dumpEncoding(Archive *AH);
269 static void dumpStdStrings(Archive *AH);
270 static void dumpSearchPath(Archive *AH);
272  PQExpBuffer upgrade_buffer,
273  Oid pg_type_oid,
274  bool force_array_type);
276  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
277 static void binary_upgrade_set_pg_class_oids(Archive *fout,
278  PQExpBuffer upgrade_buffer,
279  Oid pg_class_oid, bool is_index);
280 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
281  DumpableObject *dobj,
282  const char *objtype,
283  const char *objname,
284  const char *objnamespace);
285 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
286 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
287 static bool nonemptyReloptions(const char *reloptions);
288 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
289  const char *prefix, Archive *fout);
290 static char *get_synchronized_snapshot(Archive *fout);
291 static void setupDumpWorker(Archive *AHX);
292 static TableInfo *getRootTableInfo(TableInfo *tbinfo);
293 
294 
295 int
296 main(int argc, char **argv)
297 {
298  int c;
299  const char *filename = NULL;
300  const char *format = "p";
301  TableInfo *tblinfo;
302  int numTables;
303  DumpableObject **dobjs;
304  int numObjs;
305  DumpableObject *boundaryObjs;
306  int i;
307  int optindex;
308  char *endptr;
309  RestoreOptions *ropt;
310  Archive *fout; /* the script file */
311  bool g_verbose = false;
312  const char *dumpencoding = NULL;
313  const char *dumpsnapshot = NULL;
314  char *use_role = NULL;
315  long rowsPerInsert;
316  int numWorkers = 1;
317  int compressLevel = -1;
318  int plainText = 0;
319  ArchiveFormat archiveFormat = archUnknown;
320  ArchiveMode archiveMode;
321 
322  static DumpOptions dopt;
323 
324  static struct option long_options[] = {
325  {"data-only", no_argument, NULL, 'a'},
326  {"blobs", no_argument, NULL, 'b'},
327  {"no-blobs", no_argument, NULL, 'B'},
328  {"clean", no_argument, NULL, 'c'},
329  {"create", no_argument, NULL, 'C'},
330  {"dbname", required_argument, NULL, 'd'},
331  {"file", required_argument, NULL, 'f'},
332  {"format", required_argument, NULL, 'F'},
333  {"host", required_argument, NULL, 'h'},
334  {"jobs", 1, NULL, 'j'},
335  {"no-reconnect", no_argument, NULL, 'R'},
336  {"no-owner", no_argument, NULL, 'O'},
337  {"port", required_argument, NULL, 'p'},
338  {"schema", required_argument, NULL, 'n'},
339  {"exclude-schema", required_argument, NULL, 'N'},
340  {"schema-only", no_argument, NULL, 's'},
341  {"superuser", required_argument, NULL, 'S'},
342  {"table", required_argument, NULL, 't'},
343  {"exclude-table", required_argument, NULL, 'T'},
344  {"no-password", no_argument, NULL, 'w'},
345  {"password", no_argument, NULL, 'W'},
346  {"username", required_argument, NULL, 'U'},
347  {"verbose", no_argument, NULL, 'v'},
348  {"no-privileges", no_argument, NULL, 'x'},
349  {"no-acl", no_argument, NULL, 'x'},
350  {"compress", required_argument, NULL, 'Z'},
351  {"encoding", required_argument, NULL, 'E'},
352  {"help", no_argument, NULL, '?'},
353  {"version", no_argument, NULL, 'V'},
354 
355  /*
356  * the following options don't have an equivalent short option letter
357  */
358  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
359  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
360  {"column-inserts", no_argument, &dopt.column_inserts, 1},
361  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
362  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
363  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
364  {"exclude-table-data", required_argument, NULL, 4},
365  {"extra-float-digits", required_argument, NULL, 8},
366  {"if-exists", no_argument, &dopt.if_exists, 1},
367  {"inserts", no_argument, NULL, 9},
368  {"lock-wait-timeout", required_argument, NULL, 2},
369  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
370  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
371  {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
372  {"role", required_argument, NULL, 3},
373  {"section", required_argument, NULL, 5},
374  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
375  {"snapshot", required_argument, NULL, 6},
376  {"strict-names", no_argument, &strict_names, 1},
377  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
378  {"no-comments", no_argument, &dopt.no_comments, 1},
379  {"no-publications", no_argument, &dopt.no_publications, 1},
380  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
381  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
382  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
383  {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
384  {"no-sync", no_argument, NULL, 7},
385  {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
386  {"rows-per-insert", required_argument, NULL, 10},
387  {"include-foreign-data", required_argument, NULL, 11},
388 
389  {NULL, 0, NULL, 0}
390  };
391 
392  pg_logging_init(argv[0]);
394  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
395 
396  /*
397  * Initialize what we need for parallel execution, especially for thread
398  * support on Windows.
399  */
401 
402  progname = get_progname(argv[0]);
403 
404  if (argc > 1)
405  {
406  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
407  {
408  help(progname);
409  exit_nicely(0);
410  }
411  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
412  {
413  puts("pg_dump (PostgreSQL) " PG_VERSION);
414  exit_nicely(0);
415  }
416  }
417 
418  InitDumpOptions(&dopt);
419 
420  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxZ:",
421  long_options, &optindex)) != -1)
422  {
423  switch (c)
424  {
425  case 'a': /* Dump data only */
426  dopt.dataOnly = true;
427  break;
428 
429  case 'b': /* Dump blobs */
430  dopt.outputBlobs = true;
431  break;
432 
433  case 'B': /* Don't dump blobs */
434  dopt.dontOutputBlobs = true;
435  break;
436 
437  case 'c': /* clean (i.e., drop) schema prior to create */
438  dopt.outputClean = 1;
439  break;
440 
441  case 'C': /* Create DB */
442  dopt.outputCreateDB = 1;
443  break;
444 
445  case 'd': /* database name */
446  dopt.cparams.dbname = pg_strdup(optarg);
447  break;
448 
449  case 'E': /* Dump encoding */
450  dumpencoding = pg_strdup(optarg);
451  break;
452 
453  case 'f':
454  filename = pg_strdup(optarg);
455  break;
456 
457  case 'F':
458  format = pg_strdup(optarg);
459  break;
460 
461  case 'h': /* server host */
462  dopt.cparams.pghost = pg_strdup(optarg);
463  break;
464 
465  case 'j': /* number of dump jobs */
466  numWorkers = atoi(optarg);
467  break;
468 
469  case 'n': /* include schema(s) */
470  simple_string_list_append(&schema_include_patterns, optarg);
471  dopt.include_everything = false;
472  break;
473 
474  case 'N': /* exclude schema(s) */
475  simple_string_list_append(&schema_exclude_patterns, optarg);
476  break;
477 
478  case 'O': /* Don't reconnect to match owner */
479  dopt.outputNoOwner = 1;
480  break;
481 
482  case 'p': /* server port */
483  dopt.cparams.pgport = pg_strdup(optarg);
484  break;
485 
486  case 'R':
487  /* no-op, still accepted for backwards compatibility */
488  break;
489 
490  case 's': /* dump schema only */
491  dopt.schemaOnly = true;
492  break;
493 
494  case 'S': /* Username for superuser in plain text output */
496  break;
497 
498  case 't': /* include table(s) */
499  simple_string_list_append(&table_include_patterns, optarg);
500  dopt.include_everything = false;
501  break;
502 
503  case 'T': /* exclude table(s) */
504  simple_string_list_append(&table_exclude_patterns, optarg);
505  break;
506 
507  case 'U':
509  break;
510 
511  case 'v': /* verbose */
512  g_verbose = true;
514  break;
515 
516  case 'w':
518  break;
519 
520  case 'W':
522  break;
523 
524  case 'x': /* skip ACL dump */
525  dopt.aclsSkip = true;
526  break;
527 
528  case 'Z': /* Compression Level */
529  compressLevel = atoi(optarg);
530  if (compressLevel < 0 || compressLevel > 9)
531  {
532  pg_log_error("compression level must be in range 0..9");
533  exit_nicely(1);
534  }
535  break;
536 
537  case 0:
538  /* This covers the long options. */
539  break;
540 
541  case 2: /* lock-wait-timeout */
543  break;
544 
545  case 3: /* SET ROLE */
546  use_role = pg_strdup(optarg);
547  break;
548 
549  case 4: /* exclude table(s) data */
550  simple_string_list_append(&tabledata_exclude_patterns, optarg);
551  break;
552 
553  case 5: /* section */
555  break;
556 
557  case 6: /* snapshot */
558  dumpsnapshot = pg_strdup(optarg);
559  break;
560 
561  case 7: /* no-sync */
562  dosync = false;
563  break;
564 
565  case 8:
567  extra_float_digits = atoi(optarg);
568  if (extra_float_digits < -15 || extra_float_digits > 3)
569  {
570  pg_log_error("extra_float_digits must be in range -15..3");
571  exit_nicely(1);
572  }
573  break;
574 
575  case 9: /* inserts */
576 
577  /*
578  * dump_inserts also stores --rows-per-insert, careful not to
579  * overwrite that.
580  */
581  if (dopt.dump_inserts == 0)
583  break;
584 
585  case 10: /* rows per insert */
586  errno = 0;
587  rowsPerInsert = strtol(optarg, &endptr, 10);
588 
589  if (endptr == optarg || *endptr != '\0' ||
590  rowsPerInsert <= 0 || rowsPerInsert > INT_MAX ||
591  errno == ERANGE)
592  {
593  pg_log_error("rows-per-insert must be in range %d..%d",
594  1, INT_MAX);
595  exit_nicely(1);
596  }
597  dopt.dump_inserts = (int) rowsPerInsert;
598  break;
599 
600  case 11: /* include foreign data */
601  simple_string_list_append(&foreign_servers_include_patterns,
602  optarg);
603  break;
604 
605  default:
606  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
607  exit_nicely(1);
608  }
609  }
610 
611  /*
612  * Non-option argument specifies database name as long as it wasn't
613  * already specified with -d / --dbname
614  */
615  if (optind < argc && dopt.cparams.dbname == NULL)
616  dopt.cparams.dbname = argv[optind++];
617 
618  /* Complain if any arguments remain */
619  if (optind < argc)
620  {
621  pg_log_error("too many command-line arguments (first is \"%s\")",
622  argv[optind]);
623  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
624  progname);
625  exit_nicely(1);
626  }
627 
628  /* --column-inserts implies --inserts */
629  if (dopt.column_inserts && dopt.dump_inserts == 0)
631 
632  /*
633  * Binary upgrade mode implies dumping sequence data even in schema-only
634  * mode. This is not exposed as a separate option, but kept separate
635  * internally for clarity.
636  */
637  if (dopt.binary_upgrade)
638  dopt.sequence_data = 1;
639 
640  if (dopt.dataOnly && dopt.schemaOnly)
641  {
642  pg_log_error("options -s/--schema-only and -a/--data-only cannot be used together");
643  exit_nicely(1);
644  }
645 
646  if (dopt.schemaOnly && foreign_servers_include_patterns.head != NULL)
647  fatal("options -s/--schema-only and --include-foreign-data cannot be used together");
648 
649  if (numWorkers > 1 && foreign_servers_include_patterns.head != NULL)
650  fatal("option --include-foreign-data is not supported with parallel backup");
651 
652  if (dopt.dataOnly && dopt.outputClean)
653  {
654  pg_log_error("options -c/--clean and -a/--data-only cannot be used together");
655  exit_nicely(1);
656  }
657 
658  if (dopt.if_exists && !dopt.outputClean)
659  fatal("option --if-exists requires option -c/--clean");
660 
661  /*
662  * --inserts are already implied above if --column-inserts or
663  * --rows-per-insert were specified.
664  */
665  if (dopt.do_nothing && dopt.dump_inserts == 0)
666  fatal("option --on-conflict-do-nothing requires option --inserts, --rows-per-insert, or --column-inserts");
667 
668  /* Identify archive format to emit */
669  archiveFormat = parseArchiveFormat(format, &archiveMode);
670 
671  /* archiveFormat specific setup */
672  if (archiveFormat == archNull)
673  plainText = 1;
674 
675  /* Custom and directory formats are compressed by default, others not */
676  if (compressLevel == -1)
677  {
678 #ifdef HAVE_LIBZ
679  if (archiveFormat == archCustom || archiveFormat == archDirectory)
680  compressLevel = Z_DEFAULT_COMPRESSION;
681  else
682 #endif
683  compressLevel = 0;
684  }
685 
686 #ifndef HAVE_LIBZ
687  if (compressLevel != 0)
688  pg_log_warning("requested compression not available in this installation -- archive will be uncompressed");
689  compressLevel = 0;
690 #endif
691 
692  /*
693  * If emitting an archive format, we always want to emit a DATABASE item,
694  * in case --create is specified at pg_restore time.
695  */
696  if (!plainText)
697  dopt.outputCreateDB = 1;
698 
699  /*
700  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
701  * parallel jobs because that's the maximum limit for the
702  * WaitForMultipleObjects() call.
703  */
704  if (numWorkers <= 0
705 #ifdef WIN32
706  || numWorkers > MAXIMUM_WAIT_OBJECTS
707 #endif
708  )
709  fatal("invalid number of parallel jobs");
710 
711  /* Parallel backup only in the directory archive format so far */
712  if (archiveFormat != archDirectory && numWorkers > 1)
713  fatal("parallel backup only supported by the directory format");
714 
715  /* Open the output file */
716  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
717  archiveMode, setupDumpWorker);
718 
719  /* Make dump options accessible right away */
720  SetArchiveOptions(fout, &dopt, NULL);
721 
722  /* Register the cleanup hook */
723  on_exit_close_archive(fout);
724 
725  /* Let the archiver know how noisy to be */
726  fout->verbose = g_verbose;
727 
728 
729  /*
730  * We allow the server to be back to 8.0, and up to any minor release of
731  * our own major version. (See also version check in pg_dumpall.c.)
732  */
733  fout->minRemoteVersion = 80000;
734  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
735 
736  fout->numWorkers = numWorkers;
737 
738  /*
739  * Open the database using the Archiver, so it knows about it. Errors mean
740  * death.
741  */
742  ConnectDatabase(fout, &dopt.cparams, false);
743  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
744 
745  /*
746  * Disable security label support if server version < v9.1.x (prevents
747  * access to nonexistent pg_seclabel catalog)
748  */
749  if (fout->remoteVersion < 90100)
750  dopt.no_security_labels = 1;
751 
752  /*
753  * On hot standbys, never try to dump unlogged table data, since it will
754  * just throw an error.
755  */
756  if (fout->isStandby)
757  dopt.no_unlogged_table_data = true;
758 
759  /* Select the appropriate subquery to convert user IDs to names */
760  if (fout->remoteVersion >= 80100)
761  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
762  else
763  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
764 
765  /* check the version for the synchronized snapshots feature */
766  if (numWorkers > 1 && fout->remoteVersion < 90200
767  && !dopt.no_synchronized_snapshots)
768  fatal("Synchronized snapshots are not supported by this server version.\n"
769  "Run with --no-synchronized-snapshots instead if you do not need\n"
770  "synchronized snapshots.");
771 
772  /* check the version when a snapshot is explicitly specified by user */
773  if (dumpsnapshot && fout->remoteVersion < 90200)
774  fatal("Exported snapshots are not supported by this server version.");
775 
776  /*
777  * Find the last built-in OID, if needed (prior to 8.1)
778  *
779  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
780  */
781  if (fout->remoteVersion < 80100)
783  else
785 
786  pg_log_info("last built-in OID is %u", g_last_builtin_oid);
787 
788  /* Expand schema selection patterns into OID lists */
789  if (schema_include_patterns.head != NULL)
790  {
791  expand_schema_name_patterns(fout, &schema_include_patterns,
792  &schema_include_oids,
793  strict_names);
794  if (schema_include_oids.head == NULL)
795  fatal("no matching schemas were found");
796  }
797  expand_schema_name_patterns(fout, &schema_exclude_patterns,
798  &schema_exclude_oids,
799  false);
800  /* non-matching exclusion patterns aren't an error */
801 
802  /* Expand table selection patterns into OID lists */
803  if (table_include_patterns.head != NULL)
804  {
805  expand_table_name_patterns(fout, &table_include_patterns,
806  &table_include_oids,
807  strict_names);
808  if (table_include_oids.head == NULL)
809  fatal("no matching tables were found");
810  }
811  expand_table_name_patterns(fout, &table_exclude_patterns,
812  &table_exclude_oids,
813  false);
814 
815  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
816  &tabledata_exclude_oids,
817  false);
818 
819  expand_foreign_server_name_patterns(fout, &foreign_servers_include_patterns,
820  &foreign_servers_include_oids);
821 
822  /* non-matching exclusion patterns aren't an error */
823 
824  /*
825  * Dumping blobs is the default for dumps where an inclusion switch is not
826  * used (an "include everything" dump). -B can be used to exclude blobs
827  * from those dumps. -b can be used to include blobs even when an
828  * inclusion switch is used.
829  *
830  * -s means "schema only" and blobs are data, not schema, so we never
831  * include blobs when -s is used.
832  */
833  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
834  dopt.outputBlobs = true;
835 
836  /*
837  * Now scan the database and create DumpableObject structs for all the
838  * objects we intend to dump.
839  */
840  tblinfo = getSchemaData(fout, &numTables);
841 
842  if (fout->remoteVersion < 80400)
843  guessConstraintInheritance(tblinfo, numTables);
844 
845  if (!dopt.schemaOnly)
846  {
847  getTableData(&dopt, tblinfo, numTables, 0);
849  if (dopt.dataOnly)
851  }
852 
853  if (dopt.schemaOnly && dopt.sequence_data)
854  getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
855 
856  /*
857  * In binary-upgrade mode, we do not have to worry about the actual blob
858  * data or the associated metadata that resides in the pg_largeobject and
859  * pg_largeobject_metadata tables, respectively.
860  *
861  * However, we do need to collect blob information as there may be
862  * comments or other information on blobs that we do need to dump out.
863  */
864  if (dopt.outputBlobs || dopt.binary_upgrade)
865  getBlobs(fout);
866 
867  /*
868  * Collect dependency data to assist in ordering the objects.
869  */
870  getDependencies(fout);
871 
872  /* Lastly, create dummy objects to represent the section boundaries */
873  boundaryObjs = createBoundaryObjects();
874 
875  /* Get pointers to all the known DumpableObjects */
876  getDumpableObjects(&dobjs, &numObjs);
877 
878  /*
879  * Add dummy dependencies to enforce the dump section ordering.
880  */
881  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
882 
883  /*
884  * Sort the objects into a safe dump order (no forward references).
885  *
886  * We rely on dependency information to help us determine a safe order, so
887  * the initial sort is mostly for cosmetic purposes: we sort by name to
888  * ensure that logically identical schemas will dump identically.
889  */
890  sortDumpableObjectsByTypeName(dobjs, numObjs);
891 
892  sortDumpableObjects(dobjs, numObjs,
893  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
894 
895  /*
896  * Create archive TOC entries for all the objects to be dumped, in a safe
897  * order.
898  */
899 
900  /* First the special ENCODING, STDSTRINGS, and SEARCHPATH entries. */
901  dumpEncoding(fout);
902  dumpStdStrings(fout);
903  dumpSearchPath(fout);
904 
905  /* The database items are always next, unless we don't want them at all */
906  if (dopt.outputCreateDB)
907  dumpDatabase(fout);
908 
909  /* Now the rearrangeable objects. */
910  for (i = 0; i < numObjs; i++)
911  dumpDumpableObject(fout, dobjs[i]);
912 
913  /*
914  * Set up options info to ensure we dump what we want.
915  */
916  ropt = NewRestoreOptions();
917  ropt->filename = filename;
918 
919  /* if you change this list, see dumpOptionsFromRestoreOptions */
920  ropt->cparams.dbname = dopt.cparams.dbname ? pg_strdup(dopt.cparams.dbname) : NULL;
921  ropt->cparams.pgport = dopt.cparams.pgport ? pg_strdup(dopt.cparams.pgport) : NULL;
922  ropt->cparams.pghost = dopt.cparams.pghost ? pg_strdup(dopt.cparams.pghost) : NULL;
923  ropt->cparams.username = dopt.cparams.username ? pg_strdup(dopt.cparams.username) : NULL;
925  ropt->dropSchema = dopt.outputClean;
926  ropt->dataOnly = dopt.dataOnly;
927  ropt->schemaOnly = dopt.schemaOnly;
928  ropt->if_exists = dopt.if_exists;
929  ropt->column_inserts = dopt.column_inserts;
930  ropt->dumpSections = dopt.dumpSections;
931  ropt->aclsSkip = dopt.aclsSkip;
932  ropt->superuser = dopt.outputSuperuser;
933  ropt->createDB = dopt.outputCreateDB;
934  ropt->noOwner = dopt.outputNoOwner;
935  ropt->noTablespace = dopt.outputNoTablespaces;
936  ropt->disable_triggers = dopt.disable_triggers;
937  ropt->use_setsessauth = dopt.use_setsessauth;
939  ropt->dump_inserts = dopt.dump_inserts;
940  ropt->no_comments = dopt.no_comments;
941  ropt->no_publications = dopt.no_publications;
943  ropt->no_subscriptions = dopt.no_subscriptions;
944  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
947  ropt->sequence_data = dopt.sequence_data;
948  ropt->binary_upgrade = dopt.binary_upgrade;
949 
950  if (compressLevel == -1)
951  ropt->compression = 0;
952  else
953  ropt->compression = compressLevel;
954 
955  ropt->suppressDumpWarnings = true; /* We've already shown them */
956 
957  SetArchiveOptions(fout, &dopt, ropt);
958 
959  /* Mark which entries should be output */
961 
962  /*
963  * The archive's TOC entries are now marked as to which ones will actually
964  * be output, so we can set up their dependency lists properly. This isn't
965  * necessary for plain-text output, though.
966  */
967  if (!plainText)
969 
970  /*
971  * And finally we can do the actual output.
972  *
973  * Note: for non-plain-text output formats, the output file is written
974  * inside CloseArchive(). This is, um, bizarre; but not worth changing
975  * right now.
976  */
977  if (plainText)
978  RestoreArchive(fout);
979 
980  CloseArchive(fout);
981 
982  exit_nicely(0);
983 }
984 
985 
986 static void
987 help(const char *progname)
988 {
989  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
990  printf(_("Usage:\n"));
991  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
992 
993  printf(_("\nGeneral options:\n"));
994  printf(_(" -f, --file=FILENAME output file or directory name\n"));
995  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
996  " plain text (default))\n"));
997  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
998  printf(_(" -v, --verbose verbose mode\n"));
999  printf(_(" -V, --version output version information, then exit\n"));
1000  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
1001  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
1002  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
1003  printf(_(" -?, --help show this help, then exit\n"));
1004 
1005  printf(_("\nOptions controlling the output content:\n"));
1006  printf(_(" -a, --data-only dump only the data, not the schema\n"));
1007  printf(_(" -b, --blobs include large objects in dump\n"));
1008  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
1009  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
1010  printf(_(" -C, --create include commands to create database in dump\n"));
1011  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
1012  printf(_(" -n, --schema=PATTERN dump the specified schema(s) only\n"));
1013  printf(_(" -N, --exclude-schema=PATTERN do NOT dump the specified schema(s)\n"));
1014  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
1015  " plain-text format\n"));
1016  printf(_(" -s, --schema-only dump only the schema, no data\n"));
1017  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
1018  printf(_(" -t, --table=PATTERN dump the specified table(s) only\n"));
1019  printf(_(" -T, --exclude-table=PATTERN do NOT dump the specified table(s)\n"));
1020  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
1021  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
1022  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
1023  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
1024  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
1025  printf(_(" --enable-row-security enable row security (dump only content user has\n"
1026  " access to)\n"));
1027  printf(_(" --exclude-table-data=PATTERN do NOT dump data for the specified table(s)\n"));
1028  printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
1029  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
1030  printf(_(" --include-foreign-data=PATTERN\n"
1031  " include data of foreign tables on foreign\n"
1032  " servers matching PATTERN\n"));
1033  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
1034  printf(_(" --load-via-partition-root load partitions via the root table\n"));
1035  printf(_(" --no-comments do not dump comments\n"));
1036  printf(_(" --no-publications do not dump publications\n"));
1037  printf(_(" --no-security-labels do not dump security label assignments\n"));
1038  printf(_(" --no-subscriptions do not dump subscriptions\n"));
1039  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
1040  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
1041  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
1042  printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
1043  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
1044  printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
1045  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
1046  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
1047  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
1048  printf(_(" --strict-names require table and/or schema include patterns to\n"
1049  " match at least one entity each\n"));
1050  printf(_(" --use-set-session-authorization\n"
1051  " use SET SESSION AUTHORIZATION commands instead of\n"
1052  " ALTER OWNER commands to set ownership\n"));
1053 
1054  printf(_("\nConnection options:\n"));
1055  printf(_(" -d, --dbname=DBNAME database to dump\n"));
1056  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
1057  printf(_(" -p, --port=PORT database server port number\n"));
1058  printf(_(" -U, --username=NAME connect as specified database user\n"));
1059  printf(_(" -w, --no-password never prompt for password\n"));
1060  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
1061  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
1062 
1063  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
1064  "variable value is used.\n\n"));
1065  printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
1066  printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
1067 }
1068 
1069 static void
1070 setup_connection(Archive *AH, const char *dumpencoding,
1071  const char *dumpsnapshot, char *use_role)
1072 {
1073  DumpOptions *dopt = AH->dopt;
1074  PGconn *conn = GetConnection(AH);
1075  const char *std_strings;
1076 
1078 
1079  /*
1080  * Set the client encoding if requested.
1081  */
1082  if (dumpencoding)
1083  {
1084  if (PQsetClientEncoding(conn, dumpencoding) < 0)
1085  fatal("invalid client encoding \"%s\" specified",
1086  dumpencoding);
1087  }
1088 
1089  /*
1090  * Get the active encoding and the standard_conforming_strings setting, so
1091  * we know how to escape strings.
1092  */
1093  AH->encoding = PQclientEncoding(conn);
1094 
1095  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1096  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1097 
1098  /*
1099  * Set the role if requested. In a parallel dump worker, we'll be passed
1100  * use_role == NULL, but AH->use_role is already set (if user specified it
1101  * originally) and we should use that.
1102  */
1103  if (!use_role && AH->use_role)
1104  use_role = AH->use_role;
1105 
1106  /* Set the role if requested */
1107  if (use_role && AH->remoteVersion >= 80100)
1108  {
1109  PQExpBuffer query = createPQExpBuffer();
1110 
1111  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1112  ExecuteSqlStatement(AH, query->data);
1113  destroyPQExpBuffer(query);
1114 
1115  /* save it for possible later use by parallel workers */
1116  if (!AH->use_role)
1117  AH->use_role = pg_strdup(use_role);
1118  }
1119 
1120  /* Set the datestyle to ISO to ensure the dump's portability */
1121  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1122 
1123  /* Likewise, avoid using sql_standard intervalstyle */
1124  if (AH->remoteVersion >= 80400)
1125  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1126 
1127  /*
1128  * Use an explicitly specified extra_float_digits if it has been provided.
1129  * Otherwise, set extra_float_digits so that we can dump float data
1130  * exactly (given correctly implemented float I/O code, anyway).
1131  */
1133  {
1135 
1136  appendPQExpBuffer(q, "SET extra_float_digits TO %d",
1138  ExecuteSqlStatement(AH, q->data);
1139  destroyPQExpBuffer(q);
1140  }
1141  else if (AH->remoteVersion >= 90000)
1142  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1143  else
1144  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1145 
1146  /*
1147  * If synchronized scanning is supported, disable it, to prevent
1148  * unpredictable changes in row ordering across a dump and reload.
1149  */
1150  if (AH->remoteVersion >= 80300)
1151  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1152 
1153  /*
1154  * Disable timeouts if supported.
1155  */
1156  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1157  if (AH->remoteVersion >= 90300)
1158  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1159  if (AH->remoteVersion >= 90600)
1160  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1161 
1162  /*
1163  * Quote all identifiers, if requested.
1164  */
1165  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1166  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1167 
1168  /*
1169  * Adjust row-security mode, if supported.
1170  */
1171  if (AH->remoteVersion >= 90500)
1172  {
1173  if (dopt->enable_row_security)
1174  ExecuteSqlStatement(AH, "SET row_security = on");
1175  else
1176  ExecuteSqlStatement(AH, "SET row_security = off");
1177  }
1178 
1179  /*
1180  * Start transaction-snapshot mode transaction to dump consistent data.
1181  */
1182  ExecuteSqlStatement(AH, "BEGIN");
1183  if (AH->remoteVersion >= 90100)
1184  {
1185  /*
1186  * To support the combination of serializable_deferrable with the jobs
1187  * option we use REPEATABLE READ for the worker connections that are
1188  * passed a snapshot. As long as the snapshot is acquired in a
1189  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1190  * REPEATABLE READ transaction provides the appropriate integrity
1191  * guarantees. This is a kluge, but safe for back-patching.
1192  */
1193  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1195  "SET TRANSACTION ISOLATION LEVEL "
1196  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1197  else
1199  "SET TRANSACTION ISOLATION LEVEL "
1200  "REPEATABLE READ, READ ONLY");
1201  }
1202  else
1203  {
1205  "SET TRANSACTION ISOLATION LEVEL "
1206  "SERIALIZABLE, READ ONLY");
1207  }
1208 
1209  /*
1210  * If user specified a snapshot to use, select that. In a parallel dump
1211  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1212  * is already set (if the server can handle it) and we should use that.
1213  */
1214  if (dumpsnapshot)
1215  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1216 
1217  if (AH->sync_snapshot_id)
1218  {
1219  PQExpBuffer query = createPQExpBuffer();
1220 
1221  appendPQExpBufferStr(query, "SET TRANSACTION SNAPSHOT ");
1222  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1223  ExecuteSqlStatement(AH, query->data);
1224  destroyPQExpBuffer(query);
1225  }
1226  else if (AH->numWorkers > 1 &&
1227  AH->remoteVersion >= 90200 &&
1229  {
1230  if (AH->isStandby && AH->remoteVersion < 100000)
1231  fatal("Synchronized snapshots on standby servers are not supported by this server version.\n"
1232  "Run with --no-synchronized-snapshots instead if you do not need\n"
1233  "synchronized snapshots.");
1234 
1235 
1237  }
1238 }
1239 
1240 /* Set up connection for a parallel worker process */
1241 static void
1243 {
1244  /*
1245  * We want to re-select all the same values the leader connection is
1246  * using. We'll have inherited directly-usable values in
1247  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1248  * inherited encoding value back to a string to pass to setup_connection.
1249  */
1250  setup_connection(AH,
1252  NULL,
1253  NULL);
1254 }
1255 
1256 static char *
1258 {
1259  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1260  char *result;
1261  PGresult *res;
1262 
1263  res = ExecuteSqlQueryForSingleRow(fout, query);
1264  result = pg_strdup(PQgetvalue(res, 0, 0));
1265  PQclear(res);
1266 
1267  return result;
1268 }
1269 
1270 static ArchiveFormat
1272 {
1273  ArchiveFormat archiveFormat;
1274 
1275  *mode = archModeWrite;
1276 
1277  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1278  {
1279  /* This is used by pg_dumpall, and is not documented */
1280  archiveFormat = archNull;
1281  *mode = archModeAppend;
1282  }
1283  else if (pg_strcasecmp(format, "c") == 0)
1284  archiveFormat = archCustom;
1285  else if (pg_strcasecmp(format, "custom") == 0)
1286  archiveFormat = archCustom;
1287  else if (pg_strcasecmp(format, "d") == 0)
1288  archiveFormat = archDirectory;
1289  else if (pg_strcasecmp(format, "directory") == 0)
1290  archiveFormat = archDirectory;
1291  else if (pg_strcasecmp(format, "p") == 0)
1292  archiveFormat = archNull;
1293  else if (pg_strcasecmp(format, "plain") == 0)
1294  archiveFormat = archNull;
1295  else if (pg_strcasecmp(format, "t") == 0)
1296  archiveFormat = archTar;
1297  else if (pg_strcasecmp(format, "tar") == 0)
1298  archiveFormat = archTar;
1299  else
1300  fatal("invalid output format \"%s\" specified", format);
1301  return archiveFormat;
1302 }
1303 
1304 /*
1305  * Find the OIDs of all schemas matching the given list of patterns,
1306  * and append them to the given OID list.
1307  */
1308 static void
1310  SimpleStringList *patterns,
1311  SimpleOidList *oids,
1312  bool strict_names)
1313 {
1314  PQExpBuffer query;
1315  PGresult *res;
1316  SimpleStringListCell *cell;
1317  int i;
1318 
1319  if (patterns->head == NULL)
1320  return; /* nothing to do */
1321 
1322  query = createPQExpBuffer();
1323 
1324  /*
1325  * The loop below runs multiple SELECTs might sometimes result in
1326  * duplicate entries in the OID list, but we don't care.
1327  */
1328 
1329  for (cell = patterns->head; cell; cell = cell->next)
1330  {
1331  appendPQExpBufferStr(query,
1332  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1333  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1334  false, NULL, "n.nspname", NULL, NULL);
1335 
1336  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1337  if (strict_names && PQntuples(res) == 0)
1338  fatal("no matching schemas were found for pattern \"%s\"", cell->val);
1339 
1340  for (i = 0; i < PQntuples(res); i++)
1341  {
1342  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1343  }
1344 
1345  PQclear(res);
1346  resetPQExpBuffer(query);
1347  }
1348 
1349  destroyPQExpBuffer(query);
1350 }
1351 
1352 /*
1353  * Find the OIDs of all foreign servers matching the given list of patterns,
1354  * and append them to the given OID list.
1355  */
1356 static void
1358  SimpleStringList *patterns,
1359  SimpleOidList *oids)
1360 {
1361  PQExpBuffer query;
1362  PGresult *res;
1363  SimpleStringListCell *cell;
1364  int i;
1365 
1366  if (patterns->head == NULL)
1367  return; /* nothing to do */
1368 
1369  query = createPQExpBuffer();
1370 
1371  /*
1372  * The loop below runs multiple SELECTs might sometimes result in
1373  * duplicate entries in the OID list, but we don't care.
1374  */
1375 
1376  for (cell = patterns->head; cell; cell = cell->next)
1377  {
1378  appendPQExpBufferStr(query,
1379  "SELECT oid FROM pg_catalog.pg_foreign_server s\n");
1380  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1381  false, NULL, "s.srvname", NULL, NULL);
1382 
1383  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1384  if (PQntuples(res) == 0)
1385  fatal("no matching foreign servers were found for pattern \"%s\"", cell->val);
1386 
1387  for (i = 0; i < PQntuples(res); i++)
1388  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1389 
1390  PQclear(res);
1391  resetPQExpBuffer(query);
1392  }
1393 
1394  destroyPQExpBuffer(query);
1395 }
1396 
1397 /*
1398  * Find the OIDs of all tables matching the given list of patterns,
1399  * and append them to the given OID list. See also expand_dbname_patterns()
1400  * in pg_dumpall.c
1401  */
1402 static void
1404  SimpleStringList *patterns, SimpleOidList *oids,
1405  bool strict_names)
1406 {
1407  PQExpBuffer query;
1408  PGresult *res;
1409  SimpleStringListCell *cell;
1410  int i;
1411 
1412  if (patterns->head == NULL)
1413  return; /* nothing to do */
1414 
1415  query = createPQExpBuffer();
1416 
1417  /*
1418  * this might sometimes result in duplicate entries in the OID list, but
1419  * we don't care.
1420  */
1421 
1422  for (cell = patterns->head; cell; cell = cell->next)
1423  {
1424  /*
1425  * Query must remain ABSOLUTELY devoid of unqualified names. This
1426  * would be unnecessary given a pg_table_is_visible() variant taking a
1427  * search_path argument.
1428  */
1429  appendPQExpBuffer(query,
1430  "SELECT c.oid"
1431  "\nFROM pg_catalog.pg_class c"
1432  "\n LEFT JOIN pg_catalog.pg_namespace n"
1433  "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1434  "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1435  "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1436  RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
1437  RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
1438  RELKIND_PARTITIONED_TABLE);
1439  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1440  false, "n.nspname", "c.relname", NULL,
1441  "pg_catalog.pg_table_is_visible(c.oid)");
1442 
1443  ExecuteSqlStatement(fout, "RESET search_path");
1444  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1447  if (strict_names && PQntuples(res) == 0)
1448  fatal("no matching tables were found for pattern \"%s\"", cell->val);
1449 
1450  for (i = 0; i < PQntuples(res); i++)
1451  {
1452  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1453  }
1454 
1455  PQclear(res);
1456  resetPQExpBuffer(query);
1457  }
1458 
1459  destroyPQExpBuffer(query);
1460 }
1461 
1462 /*
1463  * checkExtensionMembership
1464  * Determine whether object is an extension member, and if so,
1465  * record an appropriate dependency and set the object's dump flag.
1466  *
1467  * It's important to call this for each object that could be an extension
1468  * member. Generally, we integrate this with determining the object's
1469  * to-be-dumped-ness, since extension membership overrides other rules for that.
1470  *
1471  * Returns true if object is an extension member, else false.
1472  */
1473 static bool
1475 {
1476  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1477 
1478  if (ext == NULL)
1479  return false;
1480 
1481  dobj->ext_member = true;
1482 
1483  /* Record dependency so that getDependencies needn't deal with that */
1484  addObjectDependency(dobj, ext->dobj.dumpId);
1485 
1486  /*
1487  * In 9.6 and above, mark the member object to have any non-initial ACL,
1488  * policies, and security labels dumped.
1489  *
1490  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1491  * extract the information about the object. We don't provide support for
1492  * initial policies and security labels and it seems unlikely for those to
1493  * ever exist, but we may have to revisit this later.
1494  *
1495  * Prior to 9.6, we do not include any extension member components.
1496  *
1497  * In binary upgrades, we still dump all components of the members
1498  * individually, since the idea is to exactly reproduce the database
1499  * contents rather than replace the extension contents with something
1500  * different.
1501  */
1502  if (fout->dopt->binary_upgrade)
1503  dobj->dump = ext->dobj.dump;
1504  else
1505  {
1506  if (fout->remoteVersion < 90600)
1507  dobj->dump = DUMP_COMPONENT_NONE;
1508  else
1509  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1512  }
1513 
1514  return true;
1515 }
1516 
1517 /*
1518  * selectDumpableNamespace: policy-setting subroutine
1519  * Mark a namespace as to be dumped or not
1520  */
1521 static void
1523 {
1524  /*
1525  * If specific tables are being dumped, do not dump any complete
1526  * namespaces. If specific namespaces are being dumped, dump just those
1527  * namespaces. Otherwise, dump all non-system namespaces.
1528  */
1529  if (table_include_oids.head != NULL)
1530  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1531  else if (schema_include_oids.head != NULL)
1532  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1533  simple_oid_list_member(&schema_include_oids,
1534  nsinfo->dobj.catId.oid) ?
1536  else if (fout->remoteVersion >= 90600 &&
1537  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1538  {
1539  /*
1540  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1541  * they are interesting (and not the original ACLs which were set at
1542  * initdb time, see pg_init_privs).
1543  */
1544  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1545  }
1546  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1547  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1548  {
1549  /* Other system schemas don't get dumped */
1550  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1551  }
1552  else if (strcmp(nsinfo->dobj.name, "public") == 0)
1553  {
1554  /*
1555  * The public schema is a strange beast that sits in a sort of
1556  * no-mans-land between being a system object and a user object. We
1557  * don't want to dump creation or comment commands for it, because
1558  * that complicates matters for non-superuser use of pg_dump. But we
1559  * should dump any ACL changes that have occurred for it, and of
1560  * course we should dump contained objects.
1561  */
1562  nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1564  }
1565  else
1566  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1567 
1568  /*
1569  * In any case, a namespace can be excluded by an exclusion switch
1570  */
1571  if (nsinfo->dobj.dump_contains &&
1572  simple_oid_list_member(&schema_exclude_oids,
1573  nsinfo->dobj.catId.oid))
1574  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1575 
1576  /*
1577  * If the schema belongs to an extension, allow extension membership to
1578  * override the dump decision for the schema itself. However, this does
1579  * not change dump_contains, so this won't change what we do with objects
1580  * within the schema. (If they belong to the extension, they'll get
1581  * suppressed by it, otherwise not.)
1582  */
1583  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1584 }
1585 
1586 /*
1587  * selectDumpableTable: policy-setting subroutine
1588  * Mark a table as to be dumped or not
1589  */
1590 static void
1592 {
1593  if (checkExtensionMembership(&tbinfo->dobj, fout))
1594  return; /* extension membership overrides all else */
1595 
1596  /*
1597  * If specific tables are being dumped, dump just those tables; else, dump
1598  * according to the parent namespace's dump flag.
1599  */
1600  if (table_include_oids.head != NULL)
1601  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1602  tbinfo->dobj.catId.oid) ?
1604  else
1605  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1606 
1607  /*
1608  * In any case, a table can be excluded by an exclusion switch
1609  */
1610  if (tbinfo->dobj.dump &&
1611  simple_oid_list_member(&table_exclude_oids,
1612  tbinfo->dobj.catId.oid))
1613  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1614 }
1615 
1616 /*
1617  * selectDumpableType: policy-setting subroutine
1618  * Mark a type as to be dumped or not
1619  *
1620  * If it's a table's rowtype or an autogenerated array type, we also apply a
1621  * special type code to facilitate sorting into the desired order. (We don't
1622  * want to consider those to be ordinary types because that would bring tables
1623  * up into the datatype part of the dump order.) We still set the object's
1624  * dump flag; that's not going to cause the dummy type to be dumped, but we
1625  * need it so that casts involving such types will be dumped correctly -- see
1626  * dumpCast. This means the flag should be set the same as for the underlying
1627  * object (the table or base type).
1628  */
1629 static void
1631 {
1632  /* skip complex types, except for standalone composite types */
1633  if (OidIsValid(tyinfo->typrelid) &&
1634  tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
1635  {
1636  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1637 
1638  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1639  if (tytable != NULL)
1640  tyinfo->dobj.dump = tytable->dobj.dump;
1641  else
1642  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1643  return;
1644  }
1645 
1646  /* skip auto-generated array types */
1647  if (tyinfo->isArray)
1648  {
1649  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1650 
1651  /*
1652  * Fall through to set the dump flag; we assume that the subsequent
1653  * rules will do the same thing as they would for the array's base
1654  * type. (We cannot reliably look up the base type here, since
1655  * getTypes may not have processed it yet.)
1656  */
1657  }
1658 
1659  if (checkExtensionMembership(&tyinfo->dobj, fout))
1660  return; /* extension membership overrides all else */
1661 
1662  /* Dump based on if the contents of the namespace are being dumped */
1663  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1664 }
1665 
1666 /*
1667  * selectDumpableDefaultACL: policy-setting subroutine
1668  * Mark a default ACL as to be dumped or not
1669  *
1670  * For per-schema default ACLs, dump if the schema is to be dumped.
1671  * Otherwise dump if we are dumping "everything". Note that dataOnly
1672  * and aclsSkip are checked separately.
1673  */
1674 static void
1676 {
1677  /* Default ACLs can't be extension members */
1678 
1679  if (dinfo->dobj.namespace)
1680  /* default ACLs are considered part of the namespace */
1681  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1682  else
1683  dinfo->dobj.dump = dopt->include_everything ?
1685 }
1686 
1687 /*
1688  * selectDumpableCast: policy-setting subroutine
1689  * Mark a cast as to be dumped or not
1690  *
1691  * Casts do not belong to any particular namespace (since they haven't got
1692  * names), nor do they have identifiable owners. To distinguish user-defined
1693  * casts from built-in ones, we must resort to checking whether the cast's
1694  * OID is in the range reserved for initdb.
1695  */
1696 static void
1698 {
1699  if (checkExtensionMembership(&cast->dobj, fout))
1700  return; /* extension membership overrides all else */
1701 
1702  /*
1703  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1704  * support ACLs currently.
1705  */
1706  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1707  cast->dobj.dump = DUMP_COMPONENT_NONE;
1708  else
1709  cast->dobj.dump = fout->dopt->include_everything ?
1711 }
1712 
1713 /*
1714  * selectDumpableProcLang: policy-setting subroutine
1715  * Mark a procedural language as to be dumped or not
1716  *
1717  * Procedural languages do not belong to any particular namespace. To
1718  * identify built-in languages, we must resort to checking whether the
1719  * language's OID is in the range reserved for initdb.
1720  */
1721 static void
1723 {
1724  if (checkExtensionMembership(&plang->dobj, fout))
1725  return; /* extension membership overrides all else */
1726 
1727  /*
1728  * Only include procedural languages when we are dumping everything.
1729  *
1730  * For from-initdb procedural languages, only include ACLs, as we do for
1731  * the pg_catalog namespace. We need this because procedural languages do
1732  * not live in any namespace.
1733  */
1734  if (!fout->dopt->include_everything)
1735  plang->dobj.dump = DUMP_COMPONENT_NONE;
1736  else
1737  {
1738  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1739  plang->dobj.dump = fout->remoteVersion < 90600 ?
1741  else
1742  plang->dobj.dump = DUMP_COMPONENT_ALL;
1743  }
1744 }
1745 
1746 /*
1747  * selectDumpableAccessMethod: policy-setting subroutine
1748  * Mark an access method as to be dumped or not
1749  *
1750  * Access methods do not belong to any particular namespace. To identify
1751  * built-in access methods, we must resort to checking whether the
1752  * method's OID is in the range reserved for initdb.
1753  */
1754 static void
1756 {
1757  if (checkExtensionMembership(&method->dobj, fout))
1758  return; /* extension membership overrides all else */
1759 
1760  /*
1761  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1762  * they do not support ACLs currently.
1763  */
1764  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1765  method->dobj.dump = DUMP_COMPONENT_NONE;
1766  else
1767  method->dobj.dump = fout->dopt->include_everything ?
1769 }
1770 
1771 /*
1772  * selectDumpableExtension: policy-setting subroutine
1773  * Mark an extension as to be dumped or not
1774  *
1775  * Built-in extensions should be skipped except for checking ACLs, since we
1776  * assume those will already be installed in the target database. We identify
1777  * such extensions by their having OIDs in the range reserved for initdb.
1778  * We dump all user-added extensions by default, or none of them if
1779  * include_everything is false (i.e., a --schema or --table switch was given).
1780  */
1781 static void
1783 {
1784  /*
1785  * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
1786  * change permissions on their member objects, if they wish to, and have
1787  * those changes preserved.
1788  */
1789  if (extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1790  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1791  else
1792  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1795 }
1796 
1797 /*
1798  * selectDumpablePublicationTable: policy-setting subroutine
1799  * Mark a publication table as to be dumped or not
1800  *
1801  * Publication tables have schemas, but those are ignored in decision making,
1802  * because publications are only dumped when we are dumping everything.
1803  */
1804 static void
1806 {
1807  if (checkExtensionMembership(dobj, fout))
1808  return; /* extension membership overrides all else */
1809 
1810  dobj->dump = fout->dopt->include_everything ?
1812 }
1813 
1814 /*
1815  * selectDumpableObject: policy-setting subroutine
1816  * Mark a generic dumpable object as to be dumped or not
1817  *
1818  * Use this only for object types without a special-case routine above.
1819  */
1820 static void
1822 {
1823  if (checkExtensionMembership(dobj, fout))
1824  return; /* extension membership overrides all else */
1825 
1826  /*
1827  * Default policy is to dump if parent namespace is dumpable, or for
1828  * non-namespace-associated items, dump if we're dumping "everything".
1829  */
1830  if (dobj->namespace)
1831  dobj->dump = dobj->namespace->dobj.dump_contains;
1832  else
1833  dobj->dump = fout->dopt->include_everything ?
1835 }
1836 
1837 /*
1838  * Dump a table's contents for loading using the COPY command
1839  * - this routine is called by the Archiver when it wants the table
1840  * to be dumped.
1841  */
1842 static int
1843 dumpTableData_copy(Archive *fout, void *dcontext)
1844 {
1845  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1846  TableInfo *tbinfo = tdinfo->tdtable;
1847  const char *classname = tbinfo->dobj.name;
1849 
1850  /*
1851  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1852  * which uses it already.
1853  */
1854  PQExpBuffer clistBuf = createPQExpBuffer();
1855  PGconn *conn = GetConnection(fout);
1856  PGresult *res;
1857  int ret;
1858  char *copybuf;
1859  const char *column_list;
1860 
1861  pg_log_info("dumping contents of table \"%s.%s\"",
1862  tbinfo->dobj.namespace->dobj.name, classname);
1863 
1864  /*
1865  * Specify the column list explicitly so that we have no possibility of
1866  * retrieving data in the wrong column order. (The default column
1867  * ordering of COPY will not be what we want in certain corner cases
1868  * involving ADD COLUMN and inheritance.)
1869  */
1870  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1871 
1872  /*
1873  * Use COPY (SELECT ...) TO when dumping a foreign table's data, and when
1874  * a filter condition was specified. For other cases a simple COPY
1875  * suffices.
1876  */
1877  if (tdinfo->filtercond || tbinfo->relkind == RELKIND_FOREIGN_TABLE)
1878  {
1879  /* Note: this syntax is only supported in 8.2 and up */
1880  appendPQExpBufferStr(q, "COPY (SELECT ");
1881  /* klugery to get rid of parens in column list */
1882  if (strlen(column_list) > 2)
1883  {
1884  appendPQExpBufferStr(q, column_list + 1);
1885  q->data[q->len - 1] = ' ';
1886  }
1887  else
1888  appendPQExpBufferStr(q, "* ");
1889 
1890  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1891  fmtQualifiedDumpable(tbinfo),
1892  tdinfo->filtercond ? tdinfo->filtercond : "");
1893  }
1894  else
1895  {
1896  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1897  fmtQualifiedDumpable(tbinfo),
1898  column_list);
1899  }
1900  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1901  PQclear(res);
1902  destroyPQExpBuffer(clistBuf);
1903 
1904  for (;;)
1905  {
1906  ret = PQgetCopyData(conn, &copybuf, 0);
1907 
1908  if (ret < 0)
1909  break; /* done or error */
1910 
1911  if (copybuf)
1912  {
1913  WriteData(fout, copybuf, ret);
1914  PQfreemem(copybuf);
1915  }
1916 
1917  /* ----------
1918  * THROTTLE:
1919  *
1920  * There was considerable discussion in late July, 2000 regarding
1921  * slowing down pg_dump when backing up large tables. Users with both
1922  * slow & fast (multi-processor) machines experienced performance
1923  * degradation when doing a backup.
1924  *
1925  * Initial attempts based on sleeping for a number of ms for each ms
1926  * of work were deemed too complex, then a simple 'sleep in each loop'
1927  * implementation was suggested. The latter failed because the loop
1928  * was too tight. Finally, the following was implemented:
1929  *
1930  * If throttle is non-zero, then
1931  * See how long since the last sleep.
1932  * Work out how long to sleep (based on ratio).
1933  * If sleep is more than 100ms, then
1934  * sleep
1935  * reset timer
1936  * EndIf
1937  * EndIf
1938  *
1939  * where the throttle value was the number of ms to sleep per ms of
1940  * work. The calculation was done in each loop.
1941  *
1942  * Most of the hard work is done in the backend, and this solution
1943  * still did not work particularly well: on slow machines, the ratio
1944  * was 50:1, and on medium paced machines, 1:1, and on fast
1945  * multi-processor machines, it had little or no effect, for reasons
1946  * that were unclear.
1947  *
1948  * Further discussion ensued, and the proposal was dropped.
1949  *
1950  * For those people who want this feature, it can be implemented using
1951  * gettimeofday in each loop, calculating the time since last sleep,
1952  * multiplying that by the sleep ratio, then if the result is more
1953  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1954  * function to sleep for a subsecond period ie.
1955  *
1956  * select(0, NULL, NULL, NULL, &tvi);
1957  *
1958  * This will return after the interval specified in the structure tvi.
1959  * Finally, call gettimeofday again to save the 'last sleep time'.
1960  * ----------
1961  */
1962  }
1963  archprintf(fout, "\\.\n\n\n");
1964 
1965  if (ret == -2)
1966  {
1967  /* copy data transfer failed */
1968  pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
1969  pg_log_error("Error message from server: %s", PQerrorMessage(conn));
1970  pg_log_error("The command was: %s", q->data);
1971  exit_nicely(1);
1972  }
1973 
1974  /* Check command status and return to normal libpq state */
1975  res = PQgetResult(conn);
1976  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1977  {
1978  pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
1979  pg_log_error("Error message from server: %s", PQerrorMessage(conn));
1980  pg_log_error("The command was: %s", q->data);
1981  exit_nicely(1);
1982  }
1983  PQclear(res);
1984 
1985  /* Do this to ensure we've pumped libpq back to idle state */
1986  if (PQgetResult(conn) != NULL)
1987  pg_log_warning("unexpected extra results during COPY of table \"%s\"",
1988  classname);
1989 
1990  destroyPQExpBuffer(q);
1991  return 1;
1992 }
1993 
1994 /*
1995  * Dump table data using INSERT commands.
1996  *
1997  * Caution: when we restore from an archive file direct to database, the
1998  * INSERT commands emitted by this function have to be parsed by
1999  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
2000  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
2001  */
2002 static int
2003 dumpTableData_insert(Archive *fout, void *dcontext)
2004 {
2005  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
2006  TableInfo *tbinfo = tdinfo->tdtable;
2007  DumpOptions *dopt = fout->dopt;
2009  PQExpBuffer insertStmt = NULL;
2010  PGresult *res;
2011  int nfields;
2012  int rows_per_statement = dopt->dump_inserts;
2013  int rows_this_statement = 0;
2014 
2015  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
2016  "SELECT * FROM ONLY %s",
2017  fmtQualifiedDumpable(tbinfo));
2018  if (tdinfo->filtercond)
2019  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
2020 
2021  ExecuteSqlStatement(fout, q->data);
2022 
2023  while (1)
2024  {
2025  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
2026  PGRES_TUPLES_OK);
2027  nfields = PQnfields(res);
2028 
2029  /*
2030  * First time through, we build as much of the INSERT statement as
2031  * possible in "insertStmt", which we can then just print for each
2032  * statement. If the table happens to have zero columns then this will
2033  * be a complete statement, otherwise it will end in "VALUES" and be
2034  * ready to have the row's column values printed.
2035  */
2036  if (insertStmt == NULL)
2037  {
2038  TableInfo *targettab;
2039 
2040  insertStmt = createPQExpBuffer();
2041 
2042  /*
2043  * When load-via-partition-root is set, get the root table name
2044  * for the partition table, so that we can reload data through the
2045  * root table.
2046  */
2047  if (dopt->load_via_partition_root && tbinfo->ispartition)
2048  targettab = getRootTableInfo(tbinfo);
2049  else
2050  targettab = tbinfo;
2051 
2052  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
2053  fmtQualifiedDumpable(targettab));
2054 
2055  /* corner case for zero-column table */
2056  if (nfields == 0)
2057  {
2058  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
2059  }
2060  else
2061  {
2062  /* append the list of column names if required */
2063  if (dopt->column_inserts)
2064  {
2065  appendPQExpBufferChar(insertStmt, '(');
2066  for (int field = 0; field < nfields; field++)
2067  {
2068  if (field > 0)
2069  appendPQExpBufferStr(insertStmt, ", ");
2070  appendPQExpBufferStr(insertStmt,
2071  fmtId(PQfname(res, field)));
2072  }
2073  appendPQExpBufferStr(insertStmt, ") ");
2074  }
2075 
2076  if (tbinfo->needs_override)
2077  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
2078 
2079  appendPQExpBufferStr(insertStmt, "VALUES");
2080  }
2081  }
2082 
2083  for (int tuple = 0; tuple < PQntuples(res); tuple++)
2084  {
2085  /* Write the INSERT if not in the middle of a multi-row INSERT. */
2086  if (rows_this_statement == 0)
2087  archputs(insertStmt->data, fout);
2088 
2089  /*
2090  * If it is zero-column table then we've already written the
2091  * complete statement, which will mean we've disobeyed
2092  * --rows-per-insert when it's set greater than 1. We do support
2093  * a way to make this multi-row with: SELECT UNION ALL SELECT
2094  * UNION ALL ... but that's non-standard so we should avoid it
2095  * given that using INSERTs is mostly only ever needed for
2096  * cross-database exports.
2097  */
2098  if (nfields == 0)
2099  continue;
2100 
2101  /* Emit a row heading */
2102  if (rows_per_statement == 1)
2103  archputs(" (", fout);
2104  else if (rows_this_statement > 0)
2105  archputs(",\n\t(", fout);
2106  else
2107  archputs("\n\t(", fout);
2108 
2109  for (int field = 0; field < nfields; field++)
2110  {
2111  if (field > 0)
2112  archputs(", ", fout);
2113  if (tbinfo->attgenerated[field])
2114  {
2115  archputs("DEFAULT", fout);
2116  continue;
2117  }
2118  if (PQgetisnull(res, tuple, field))
2119  {
2120  archputs("NULL", fout);
2121  continue;
2122  }
2123 
2124  /* XXX This code is partially duplicated in ruleutils.c */
2125  switch (PQftype(res, field))
2126  {
2127  case INT2OID:
2128  case INT4OID:
2129  case INT8OID:
2130  case OIDOID:
2131  case FLOAT4OID:
2132  case FLOAT8OID:
2133  case NUMERICOID:
2134  {
2135  /*
2136  * These types are printed without quotes unless
2137  * they contain values that aren't accepted by the
2138  * scanner unquoted (e.g., 'NaN'). Note that
2139  * strtod() and friends might accept NaN, so we
2140  * can't use that to test.
2141  *
2142  * In reality we only need to defend against
2143  * infinity and NaN, so we need not get too crazy
2144  * about pattern matching here.
2145  */
2146  const char *s = PQgetvalue(res, tuple, field);
2147 
2148  if (strspn(s, "0123456789 +-eE.") == strlen(s))
2149  archputs(s, fout);
2150  else
2151  archprintf(fout, "'%s'", s);
2152  }
2153  break;
2154 
2155  case BITOID:
2156  case VARBITOID:
2157  archprintf(fout, "B'%s'",
2158  PQgetvalue(res, tuple, field));
2159  break;
2160 
2161  case BOOLOID:
2162  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2163  archputs("true", fout);
2164  else
2165  archputs("false", fout);
2166  break;
2167 
2168  default:
2169  /* All other types are printed as string literals. */
2170  resetPQExpBuffer(q);
2172  PQgetvalue(res, tuple, field),
2173  fout);
2174  archputs(q->data, fout);
2175  break;
2176  }
2177  }
2178 
2179  /* Terminate the row ... */
2180  archputs(")", fout);
2181 
2182  /* ... and the statement, if the target no. of rows is reached */
2183  if (++rows_this_statement >= rows_per_statement)
2184  {
2185  if (dopt->do_nothing)
2186  archputs(" ON CONFLICT DO NOTHING;\n", fout);
2187  else
2188  archputs(";\n", fout);
2189  /* Reset the row counter */
2190  rows_this_statement = 0;
2191  }
2192  }
2193 
2194  if (PQntuples(res) <= 0)
2195  {
2196  PQclear(res);
2197  break;
2198  }
2199  PQclear(res);
2200  }
2201 
2202  /* Terminate any statements that didn't make the row count. */
2203  if (rows_this_statement > 0)
2204  {
2205  if (dopt->do_nothing)
2206  archputs(" ON CONFLICT DO NOTHING;\n", fout);
2207  else
2208  archputs(";\n", fout);
2209  }
2210 
2211  archputs("\n\n", fout);
2212 
2213  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2214 
2215  destroyPQExpBuffer(q);
2216  if (insertStmt != NULL)
2217  destroyPQExpBuffer(insertStmt);
2218 
2219  return 1;
2220 }
2221 
2222 /*
2223  * getRootTableInfo:
2224  * get the root TableInfo for the given partition table.
2225  */
2226 static TableInfo *
2228 {
2229  TableInfo *parentTbinfo;
2230 
2231  Assert(tbinfo->ispartition);
2232  Assert(tbinfo->numParents == 1);
2233 
2234  parentTbinfo = tbinfo->parents[0];
2235  while (parentTbinfo->ispartition)
2236  {
2237  Assert(parentTbinfo->numParents == 1);
2238  parentTbinfo = parentTbinfo->parents[0];
2239  }
2240 
2241  return parentTbinfo;
2242 }
2243 
2244 /*
2245  * dumpTableData -
2246  * dump the contents of a single table
2247  *
2248  * Actually, this just makes an ArchiveEntry for the table contents.
2249  */
2250 static void
2252 {
2253  DumpOptions *dopt = fout->dopt;
2254  TableInfo *tbinfo = tdinfo->tdtable;
2255  PQExpBuffer copyBuf = createPQExpBuffer();
2256  PQExpBuffer clistBuf = createPQExpBuffer();
2257  DataDumperPtr dumpFn;
2258  char *copyStmt;
2259  const char *copyFrom;
2260 
2261  /* We had better have loaded per-column details about this table */
2262  Assert(tbinfo->interesting);
2263 
2264  if (dopt->dump_inserts == 0)
2265  {
2266  /* Dump/restore using COPY */
2267  dumpFn = dumpTableData_copy;
2268 
2269  /*
2270  * When load-via-partition-root is set, get the root table name for
2271  * the partition table, so that we can reload data through the root
2272  * table.
2273  */
2274  if (dopt->load_via_partition_root && tbinfo->ispartition)
2275  {
2276  TableInfo *parentTbinfo;
2277 
2278  parentTbinfo = getRootTableInfo(tbinfo);
2279  copyFrom = fmtQualifiedDumpable(parentTbinfo);
2280  }
2281  else
2282  copyFrom = fmtQualifiedDumpable(tbinfo);
2283 
2284  /* must use 2 steps here 'cause fmtId is nonreentrant */
2285  appendPQExpBuffer(copyBuf, "COPY %s ",
2286  copyFrom);
2287  appendPQExpBuffer(copyBuf, "%s FROM stdin;\n",
2288  fmtCopyColumnList(tbinfo, clistBuf));
2289  copyStmt = copyBuf->data;
2290  }
2291  else
2292  {
2293  /* Restore using INSERT */
2294  dumpFn = dumpTableData_insert;
2295  copyStmt = NULL;
2296  }
2297 
2298  /*
2299  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2300  * dependency on its table as "special" and pass it to ArchiveEntry now.
2301  * See comments for BuildArchiveDependencies.
2302  */
2303  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2304  {
2305  TocEntry *te;
2306 
2307  te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2308  ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2309  .namespace = tbinfo->dobj.namespace->dobj.name,
2310  .owner = tbinfo->rolname,
2311  .description = "TABLE DATA",
2312  .section = SECTION_DATA,
2313  .copyStmt = copyStmt,
2314  .deps = &(tbinfo->dobj.dumpId),
2315  .nDeps = 1,
2316  .dumpFn = dumpFn,
2317  .dumpArg = tdinfo));
2318 
2319  /*
2320  * Set the TocEntry's dataLength in case we are doing a parallel dump
2321  * and want to order dump jobs by table size. We choose to measure
2322  * dataLength in table pages during dump, so no scaling is needed.
2323  * However, relpages is declared as "integer" in pg_class, and hence
2324  * also in TableInfo, but it's really BlockNumber a/k/a unsigned int.
2325  * Cast so that we get the right interpretation of table sizes
2326  * exceeding INT_MAX pages.
2327  */
2328  te->dataLength = (BlockNumber) tbinfo->relpages;
2329  }
2330 
2331  destroyPQExpBuffer(copyBuf);
2332  destroyPQExpBuffer(clistBuf);
2333 }
2334 
2335 /*
2336  * refreshMatViewData -
2337  * load or refresh the contents of a single materialized view
2338  *
2339  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2340  * statement.
2341  */
2342 static void
2344 {
2345  TableInfo *tbinfo = tdinfo->tdtable;
2346  PQExpBuffer q;
2347 
2348  /* If the materialized view is not flagged as populated, skip this. */
2349  if (!tbinfo->relispopulated)
2350  return;
2351 
2352  q = createPQExpBuffer();
2353 
2354  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2355  fmtQualifiedDumpable(tbinfo));
2356 
2357  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2358  ArchiveEntry(fout,
2359  tdinfo->dobj.catId, /* catalog ID */
2360  tdinfo->dobj.dumpId, /* dump ID */
2361  ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2362  .namespace = tbinfo->dobj.namespace->dobj.name,
2363  .owner = tbinfo->rolname,
2364  .description = "MATERIALIZED VIEW DATA",
2365  .section = SECTION_POST_DATA,
2366  .createStmt = q->data,
2367  .deps = tdinfo->dobj.dependencies,
2368  .nDeps = tdinfo->dobj.nDeps));
2369 
2370  destroyPQExpBuffer(q);
2371 }
2372 
2373 /*
2374  * getTableData -
2375  * set up dumpable objects representing the contents of tables
2376  */
2377 static void
2378 getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind)
2379 {
2380  int i;
2381 
2382  for (i = 0; i < numTables; i++)
2383  {
2384  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2385  (!relkind || tblinfo[i].relkind == relkind))
2386  makeTableDataInfo(dopt, &(tblinfo[i]));
2387  }
2388 }
2389 
2390 /*
2391  * Make a dumpable object for the data of this specific table
2392  *
2393  * Note: we make a TableDataInfo if and only if we are going to dump the
2394  * table data; the "dump" flag in such objects isn't used.
2395  */
2396 static void
2398 {
2399  TableDataInfo *tdinfo;
2400 
2401  /*
2402  * Nothing to do if we already decided to dump the table. This will
2403  * happen for "config" tables.
2404  */
2405  if (tbinfo->dataObj != NULL)
2406  return;
2407 
2408  /* Skip VIEWs (no data to dump) */
2409  if (tbinfo->relkind == RELKIND_VIEW)
2410  return;
2411  /* Skip FOREIGN TABLEs (no data to dump) unless requested explicitly */
2412  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
2413  (foreign_servers_include_oids.head == NULL ||
2414  !simple_oid_list_member(&foreign_servers_include_oids,
2415  tbinfo->foreign_server)))
2416  return;
2417  /* Skip partitioned tables (data in partitions) */
2418  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2419  return;
2420 
2421  /* Don't dump data in unlogged tables, if so requested */
2422  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2423  dopt->no_unlogged_table_data)
2424  return;
2425 
2426  /* Check that the data is not explicitly excluded */
2427  if (simple_oid_list_member(&tabledata_exclude_oids,
2428  tbinfo->dobj.catId.oid))
2429  return;
2430 
2431  /* OK, let's dump it */
2432  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2433 
2434  if (tbinfo->relkind == RELKIND_MATVIEW)
2435  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2436  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2437  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2438  else
2439  tdinfo->dobj.objType = DO_TABLE_DATA;
2440 
2441  /*
2442  * Note: use tableoid 0 so that this object won't be mistaken for
2443  * something that pg_depend entries apply to.
2444  */
2445  tdinfo->dobj.catId.tableoid = 0;
2446  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2447  AssignDumpId(&tdinfo->dobj);
2448  tdinfo->dobj.name = tbinfo->dobj.name;
2449  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2450  tdinfo->tdtable = tbinfo;
2451  tdinfo->filtercond = NULL; /* might get set later */
2452  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2453 
2454  tbinfo->dataObj = tdinfo;
2455 
2456  /* Make sure that we'll collect per-column info for this table. */
2457  tbinfo->interesting = true;
2458 }
2459 
2460 /*
2461  * The refresh for a materialized view must be dependent on the refresh for
2462  * any materialized view that this one is dependent on.
2463  *
2464  * This must be called after all the objects are created, but before they are
2465  * sorted.
2466  */
2467 static void
2469 {
2470  PQExpBuffer query;
2471  PGresult *res;
2472  int ntups,
2473  i;
2474  int i_classid,
2475  i_objid,
2476  i_refobjid;
2477 
2478  /* No Mat Views before 9.3. */
2479  if (fout->remoteVersion < 90300)
2480  return;
2481 
2482  query = createPQExpBuffer();
2483 
2484  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2485  "( "
2486  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2487  "FROM pg_depend d1 "
2488  "JOIN pg_class c1 ON c1.oid = d1.objid "
2489  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2490  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2491  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2492  "AND d2.objid = r1.oid "
2493  "AND d2.refobjid <> d1.objid "
2494  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2495  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2496  CppAsString2(RELKIND_VIEW) ") "
2497  "WHERE d1.classid = 'pg_class'::regclass "
2498  "UNION "
2499  "SELECT w.objid, d3.refobjid, c3.relkind "
2500  "FROM w "
2501  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2502  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2503  "AND d3.objid = r3.oid "
2504  "AND d3.refobjid <> w.refobjid "
2505  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2506  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2507  CppAsString2(RELKIND_VIEW) ") "
2508  ") "
2509  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2510  "FROM w "
2511  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2512 
2513  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2514 
2515  ntups = PQntuples(res);
2516 
2517  i_classid = PQfnumber(res, "classid");
2518  i_objid = PQfnumber(res, "objid");
2519  i_refobjid = PQfnumber(res, "refobjid");
2520 
2521  for (i = 0; i < ntups; i++)
2522  {
2523  CatalogId objId;
2524  CatalogId refobjId;
2525  DumpableObject *dobj;
2526  DumpableObject *refdobj;
2527  TableInfo *tbinfo;
2528  TableInfo *reftbinfo;
2529 
2530  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2531  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2532  refobjId.tableoid = objId.tableoid;
2533  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2534 
2535  dobj = findObjectByCatalogId(objId);
2536  if (dobj == NULL)
2537  continue;
2538 
2539  Assert(dobj->objType == DO_TABLE);
2540  tbinfo = (TableInfo *) dobj;
2541  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2542  dobj = (DumpableObject *) tbinfo->dataObj;
2543  if (dobj == NULL)
2544  continue;
2545  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2546 
2547  refdobj = findObjectByCatalogId(refobjId);
2548  if (refdobj == NULL)
2549  continue;
2550 
2551  Assert(refdobj->objType == DO_TABLE);
2552  reftbinfo = (TableInfo *) refdobj;
2553  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2554  refdobj = (DumpableObject *) reftbinfo->dataObj;
2555  if (refdobj == NULL)
2556  continue;
2557  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2558 
2559  addObjectDependency(dobj, refdobj->dumpId);
2560 
2561  if (!reftbinfo->relispopulated)
2562  tbinfo->relispopulated = false;
2563  }
2564 
2565  PQclear(res);
2566 
2567  destroyPQExpBuffer(query);
2568 }
2569 
2570 /*
2571  * getTableDataFKConstraints -
2572  * add dump-order dependencies reflecting foreign key constraints
2573  *
2574  * This code is executed only in a data-only dump --- in schema+data dumps
2575  * we handle foreign key issues by not creating the FK constraints until
2576  * after the data is loaded. In a data-only dump, however, we want to
2577  * order the table data objects in such a way that a table's referenced
2578  * tables are restored first. (In the presence of circular references or
2579  * self-references this may be impossible; we'll detect and complain about
2580  * that during the dependency sorting step.)
2581  */
2582 static void
2584 {
2585  DumpableObject **dobjs;
2586  int numObjs;
2587  int i;
2588 
2589  /* Search through all the dumpable objects for FK constraints */
2590  getDumpableObjects(&dobjs, &numObjs);
2591  for (i = 0; i < numObjs; i++)
2592  {
2593  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2594  {
2595  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2596  TableInfo *ftable;
2597 
2598  /* Not interesting unless both tables are to be dumped */
2599  if (cinfo->contable == NULL ||
2600  cinfo->contable->dataObj == NULL)
2601  continue;
2602  ftable = findTableByOid(cinfo->confrelid);
2603  if (ftable == NULL ||
2604  ftable->dataObj == NULL)
2605  continue;
2606 
2607  /*
2608  * Okay, make referencing table's TABLE_DATA object depend on the
2609  * referenced table's TABLE_DATA object.
2610  */
2612  ftable->dataObj->dobj.dumpId);
2613  }
2614  }
2615  free(dobjs);
2616 }
2617 
2618 
2619 /*
2620  * guessConstraintInheritance:
2621  * In pre-8.4 databases, we can't tell for certain which constraints
2622  * are inherited. We assume a CHECK constraint is inherited if its name
2623  * matches the name of any constraint in the parent. Originally this code
2624  * tried to compare the expression texts, but that can fail for various
2625  * reasons --- for example, if the parent and child tables are in different
2626  * schemas, reverse-listing of function calls may produce different text
2627  * (schema-qualified or not) depending on search path.
2628  *
2629  * In 8.4 and up we can rely on the conislocal field to decide which
2630  * constraints must be dumped; much safer.
2631  *
2632  * This function assumes all conislocal flags were initialized to true.
2633  * It clears the flag on anything that seems to be inherited.
2634  */
2635 static void
2637 {
2638  int i,
2639  j,
2640  k;
2641 
2642  for (i = 0; i < numTables; i++)
2643  {
2644  TableInfo *tbinfo = &(tblinfo[i]);
2645  int numParents;
2646  TableInfo **parents;
2647  TableInfo *parent;
2648 
2649  /* Sequences and views never have parents */
2650  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2651  tbinfo->relkind == RELKIND_VIEW)
2652  continue;
2653 
2654  /* Don't bother computing anything for non-target tables, either */
2655  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2656  continue;
2657 
2658  numParents = tbinfo->numParents;
2659  parents = tbinfo->parents;
2660 
2661  if (numParents == 0)
2662  continue; /* nothing to see here, move along */
2663 
2664  /* scan for inherited CHECK constraints */
2665  for (j = 0; j < tbinfo->ncheck; j++)
2666  {
2667  ConstraintInfo *constr;
2668 
2669  constr = &(tbinfo->checkexprs[j]);
2670 
2671  for (k = 0; k < numParents; k++)
2672  {
2673  int l;
2674 
2675  parent = parents[k];
2676  for (l = 0; l < parent->ncheck; l++)
2677  {
2678  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2679 
2680  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2681  {
2682  constr->conislocal = false;
2683  break;
2684  }
2685  }
2686  if (!constr->conislocal)
2687  break;
2688  }
2689  }
2690  }
2691 }
2692 
2693 
2694 /*
2695  * dumpDatabase:
2696  * dump the database definition
2697  */
2698 static void
2700 {
2701  DumpOptions *dopt = fout->dopt;
2702  PQExpBuffer dbQry = createPQExpBuffer();
2703  PQExpBuffer delQry = createPQExpBuffer();
2704  PQExpBuffer creaQry = createPQExpBuffer();
2705  PQExpBuffer labelq = createPQExpBuffer();
2706  PGconn *conn = GetConnection(fout);
2707  PGresult *res;
2708  int i_tableoid,
2709  i_oid,
2710  i_datname,
2711  i_dba,
2712  i_encoding,
2713  i_collate,
2714  i_ctype,
2715  i_frozenxid,
2716  i_minmxid,
2717  i_datacl,
2718  i_rdatacl,
2719  i_datistemplate,
2720  i_datconnlimit,
2721  i_tablespace;
2722  CatalogId dbCatId;
2723  DumpId dbDumpId;
2724  const char *datname,
2725  *dba,
2726  *encoding,
2727  *collate,
2728  *ctype,
2729  *datacl,
2730  *rdatacl,
2731  *datistemplate,
2732  *datconnlimit,
2733  *tablespace;
2734  uint32 frozenxid,
2735  minmxid;
2736  char *qdatname;
2737 
2738  pg_log_info("saving database definition");
2739 
2740  /*
2741  * Fetch the database-level properties for this database.
2742  *
2743  * The order in which privileges are in the ACL string (the order they
2744  * have been GRANT'd in, which the backend maintains) must be preserved to
2745  * ensure that GRANTs WITH GRANT OPTION and subsequent GRANTs based on
2746  * those are dumped in the correct order. Note that initial privileges
2747  * (pg_init_privs) are not supported on databases, so this logic cannot
2748  * make use of buildACLQueries().
2749  */
2750  if (fout->remoteVersion >= 90600)
2751  {
2752  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2753  "(%s datdba) AS dba, "
2754  "pg_encoding_to_char(encoding) AS encoding, "
2755  "datcollate, datctype, datfrozenxid, datminmxid, "
2756  "(SELECT array_agg(acl ORDER BY row_n) FROM "
2757  " (SELECT acl, row_n FROM "
2758  " unnest(coalesce(datacl,acldefault('d',datdba))) "
2759  " WITH ORDINALITY AS perm(acl,row_n) "
2760  " WHERE NOT EXISTS ( "
2761  " SELECT 1 "
2762  " FROM unnest(acldefault('d',datdba)) "
2763  " AS init(init_acl) "
2764  " WHERE acl = init_acl)) AS datacls) "
2765  " AS datacl, "
2766  "(SELECT array_agg(acl ORDER BY row_n) FROM "
2767  " (SELECT acl, row_n FROM "
2768  " unnest(acldefault('d',datdba)) "
2769  " WITH ORDINALITY AS initp(acl,row_n) "
2770  " WHERE NOT EXISTS ( "
2771  " SELECT 1 "
2772  " FROM unnest(coalesce(datacl,acldefault('d',datdba))) "
2773  " AS permp(orig_acl) "
2774  " WHERE acl = orig_acl)) AS rdatacls) "
2775  " AS rdatacl, "
2776  "datistemplate, datconnlimit, "
2777  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2778  "shobj_description(oid, 'pg_database') AS description "
2779 
2780  "FROM pg_database "
2781  "WHERE datname = current_database()",
2783  }
2784  else if (fout->remoteVersion >= 90300)
2785  {
2786  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2787  "(%s datdba) AS dba, "
2788  "pg_encoding_to_char(encoding) AS encoding, "
2789  "datcollate, datctype, datfrozenxid, datminmxid, "
2790  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2791  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2792  "shobj_description(oid, 'pg_database') AS description "
2793 
2794  "FROM pg_database "
2795  "WHERE datname = current_database()",
2797  }
2798  else if (fout->remoteVersion >= 80400)
2799  {
2800  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2801  "(%s datdba) AS dba, "
2802  "pg_encoding_to_char(encoding) AS encoding, "
2803  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2804  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2805  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2806  "shobj_description(oid, 'pg_database') AS description "
2807 
2808  "FROM pg_database "
2809  "WHERE datname = current_database()",
2811  }
2812  else if (fout->remoteVersion >= 80200)
2813  {
2814  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2815  "(%s datdba) AS dba, "
2816  "pg_encoding_to_char(encoding) AS encoding, "
2817  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2818  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2819  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2820  "shobj_description(oid, 'pg_database') AS description "
2821 
2822  "FROM pg_database "
2823  "WHERE datname = current_database()",
2825  }
2826  else
2827  {
2828  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2829  "(%s datdba) AS dba, "
2830  "pg_encoding_to_char(encoding) AS encoding, "
2831  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2832  "datacl, '' as rdatacl, datistemplate, "
2833  "-1 as datconnlimit, "
2834  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2835  "FROM pg_database "
2836  "WHERE datname = current_database()",
2838  }
2839 
2840  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2841 
2842  i_tableoid = PQfnumber(res, "tableoid");
2843  i_oid = PQfnumber(res, "oid");
2844  i_datname = PQfnumber(res, "datname");
2845  i_dba = PQfnumber(res, "dba");
2846  i_encoding = PQfnumber(res, "encoding");
2847  i_collate = PQfnumber(res, "datcollate");
2848  i_ctype = PQfnumber(res, "datctype");
2849  i_frozenxid = PQfnumber(res, "datfrozenxid");
2850  i_minmxid = PQfnumber(res, "datminmxid");
2851  i_datacl = PQfnumber(res, "datacl");
2852  i_rdatacl = PQfnumber(res, "rdatacl");
2853  i_datistemplate = PQfnumber(res, "datistemplate");
2854  i_datconnlimit = PQfnumber(res, "datconnlimit");
2855  i_tablespace = PQfnumber(res, "tablespace");
2856 
2857  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2858  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2859  datname = PQgetvalue(res, 0, i_datname);
2860  dba = PQgetvalue(res, 0, i_dba);
2861  encoding = PQgetvalue(res, 0, i_encoding);
2862  collate = PQgetvalue(res, 0, i_collate);
2863  ctype = PQgetvalue(res, 0, i_ctype);
2864  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2865  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2866  datacl = PQgetvalue(res, 0, i_datacl);
2867  rdatacl = PQgetvalue(res, 0, i_rdatacl);
2868  datistemplate = PQgetvalue(res, 0, i_datistemplate);
2869  datconnlimit = PQgetvalue(res, 0, i_datconnlimit);
2870  tablespace = PQgetvalue(res, 0, i_tablespace);
2871 
2872  qdatname = pg_strdup(fmtId(datname));
2873 
2874  /*
2875  * Prepare the CREATE DATABASE command. We must specify encoding, locale,
2876  * and tablespace since those can't be altered later. Other DB properties
2877  * are left to the DATABASE PROPERTIES entry, so that they can be applied
2878  * after reconnecting to the target DB.
2879  */
2880  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2881  qdatname);
2882  if (strlen(encoding) > 0)
2883  {
2884  appendPQExpBufferStr(creaQry, " ENCODING = ");
2885  appendStringLiteralAH(creaQry, encoding, fout);
2886  }
2887  if (strlen(collate) > 0 && strcmp(collate, ctype) == 0)
2888  {
2889  appendPQExpBufferStr(creaQry, " LOCALE = ");
2890  appendStringLiteralAH(creaQry, collate, fout);
2891  }
2892  else
2893  {
2894  if (strlen(collate) > 0)
2895  {
2896  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2897  appendStringLiteralAH(creaQry, collate, fout);
2898  }
2899  if (strlen(ctype) > 0)
2900  {
2901  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2902  appendStringLiteralAH(creaQry, ctype, fout);
2903  }
2904  }
2905 
2906  /*
2907  * Note: looking at dopt->outputNoTablespaces here is completely the wrong
2908  * thing; the decision whether to specify a tablespace should be left till
2909  * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
2910  * label the DATABASE entry with the tablespace and let the normal
2911  * tablespace selection logic work ... but CREATE DATABASE doesn't pay
2912  * attention to default_tablespace, so that won't work.
2913  */
2914  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2915  !dopt->outputNoTablespaces)
2916  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2917  fmtId(tablespace));
2918  appendPQExpBufferStr(creaQry, ";\n");
2919 
2920  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2921  qdatname);
2922 
2923  dbDumpId = createDumpId();
2924 
2925  ArchiveEntry(fout,
2926  dbCatId, /* catalog ID */
2927  dbDumpId, /* dump ID */
2928  ARCHIVE_OPTS(.tag = datname,
2929  .owner = dba,
2930  .description = "DATABASE",
2931  .section = SECTION_PRE_DATA,
2932  .createStmt = creaQry->data,
2933  .dropStmt = delQry->data));
2934 
2935  /* Compute correct tag for archive entry */
2936  appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
2937 
2938  /* Dump DB comment if any */
2939  if (fout->remoteVersion >= 80200)
2940  {
2941  /*
2942  * 8.2 and up keep comments on shared objects in a shared table, so we
2943  * cannot use the dumpComment() code used for other database objects.
2944  * Be careful that the ArchiveEntry parameters match that function.
2945  */
2946  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2947 
2948  if (comment && *comment && !dopt->no_comments)
2949  {
2950  resetPQExpBuffer(dbQry);
2951 
2952  /*
2953  * Generates warning when loaded into a differently-named
2954  * database.
2955  */
2956  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
2957  appendStringLiteralAH(dbQry, comment, fout);
2958  appendPQExpBufferStr(dbQry, ";\n");
2959 
2960  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2961  ARCHIVE_OPTS(.tag = labelq->data,
2962  .owner = dba,
2963  .description = "COMMENT",
2964  .section = SECTION_NONE,
2965  .createStmt = dbQry->data,
2966  .deps = &dbDumpId,
2967  .nDeps = 1));
2968  }
2969  }
2970  else
2971  {
2972  dumpComment(fout, "DATABASE", qdatname, NULL, dba,
2973  dbCatId, 0, dbDumpId);
2974  }
2975 
2976  /* Dump DB security label, if enabled */
2977  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2978  {
2979  PGresult *shres;
2980  PQExpBuffer seclabelQry;
2981 
2982  seclabelQry = createPQExpBuffer();
2983 
2984  buildShSecLabelQuery("pg_database", dbCatId.oid, seclabelQry);
2985  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2986  resetPQExpBuffer(seclabelQry);
2987  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2988  if (seclabelQry->len > 0)
2989  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2990  ARCHIVE_OPTS(.tag = labelq->data,
2991  .owner = dba,
2992  .description = "SECURITY LABEL",
2993  .section = SECTION_NONE,
2994  .createStmt = seclabelQry->data,
2995  .deps = &dbDumpId,
2996  .nDeps = 1));
2997  destroyPQExpBuffer(seclabelQry);
2998  PQclear(shres);
2999  }
3000 
3001  /*
3002  * Dump ACL if any. Note that we do not support initial privileges
3003  * (pg_init_privs) on databases.
3004  */
3005  dumpACL(fout, dbDumpId, InvalidDumpId, "DATABASE",
3006  qdatname, NULL, NULL,
3007  dba, datacl, rdatacl, "", "");
3008 
3009  /*
3010  * Now construct a DATABASE PROPERTIES archive entry to restore any
3011  * non-default database-level properties. (The reason this must be
3012  * separate is that we cannot put any additional commands into the TOC
3013  * entry that has CREATE DATABASE. pg_restore would execute such a group
3014  * in an implicit transaction block, and the backend won't allow CREATE
3015  * DATABASE in that context.)
3016  */
3017  resetPQExpBuffer(creaQry);
3018  resetPQExpBuffer(delQry);
3019 
3020  if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
3021  appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
3022  qdatname, datconnlimit);
3023 
3024  if (strcmp(datistemplate, "t") == 0)
3025  {
3026  appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
3027  qdatname);
3028 
3029  /*
3030  * The backend won't accept DROP DATABASE on a template database. We
3031  * can deal with that by removing the template marking before the DROP
3032  * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
3033  * since no such command is currently supported, fake it with a direct
3034  * UPDATE on pg_database.
3035  */
3036  appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
3037  "SET datistemplate = false WHERE datname = ");
3038  appendStringLiteralAH(delQry, datname, fout);
3039  appendPQExpBufferStr(delQry, ";\n");
3040  }
3041 
3042  /* Add database-specific SET options */
3043  dumpDatabaseConfig(fout, creaQry, datname, dbCatId.oid);
3044 
3045  /*
3046  * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
3047  * entry, too, for lack of a better place.
3048  */
3049  if (dopt->binary_upgrade)
3050  {
3051  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
3052  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
3053  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
3054  "WHERE datname = ",
3055  frozenxid, minmxid);
3056  appendStringLiteralAH(creaQry, datname, fout);
3057  appendPQExpBufferStr(creaQry, ";\n");
3058  }
3059 
3060  if (creaQry->len > 0)
3061  ArchiveEntry(fout, nilCatalogId, createDumpId(),
3062  ARCHIVE_OPTS(.tag = datname,
3063  .owner = dba,
3064  .description = "DATABASE PROPERTIES",
3065  .section = SECTION_PRE_DATA,
3066  .createStmt = creaQry->data,
3067  .dropStmt = delQry->data,
3068  .deps = &dbDumpId));
3069 
3070  /*
3071  * pg_largeobject comes from the old system intact, so set its
3072  * relfrozenxids and relminmxids.
3073  */
3074  if (dopt->binary_upgrade)
3075  {
3076  PGresult *lo_res;
3077  PQExpBuffer loFrozenQry = createPQExpBuffer();
3078  PQExpBuffer loOutQry = createPQExpBuffer();
3079  int i_relfrozenxid,
3080  i_relminmxid;
3081 
3082  /*
3083  * pg_largeobject
3084  */
3085  if (fout->remoteVersion >= 90300)
3086  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
3087  "FROM pg_catalog.pg_class\n"
3088  "WHERE oid = %u;\n",
3089  LargeObjectRelationId);
3090  else
3091  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
3092  "FROM pg_catalog.pg_class\n"
3093  "WHERE oid = %u;\n",
3094  LargeObjectRelationId);
3095 
3096  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
3097 
3098  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
3099  i_relminmxid = PQfnumber(lo_res, "relminmxid");
3100 
3101  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
3102  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
3103  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
3104  "WHERE oid = %u;\n",
3105  atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
3106  atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
3107  LargeObjectRelationId);
3108  ArchiveEntry(fout, nilCatalogId, createDumpId(),
3109  ARCHIVE_OPTS(.tag = "pg_largeobject",
3110  .description = "pg_largeobject",
3111  .section = SECTION_PRE_DATA,
3112  .createStmt = loOutQry->data));
3113 
3114  PQclear(lo_res);
3115 
3116  destroyPQExpBuffer(loFrozenQry);
3117  destroyPQExpBuffer(loOutQry);
3118  }
3119 
3120  PQclear(res);
3121 
3122  free(qdatname);
3123  destroyPQExpBuffer(dbQry);
3124  destroyPQExpBuffer(delQry);
3125  destroyPQExpBuffer(creaQry);
3126  destroyPQExpBuffer(labelq);
3127 }
3128 
3129 /*
3130  * Collect any database-specific or role-and-database-specific SET options
3131  * for this database, and append them to outbuf.
3132  */
3133 static void
3135  const char *dbname, Oid dboid)
3136 {
3137  PGconn *conn = GetConnection(AH);
3139  PGresult *res;
3140  int count = 1;
3141 
3142  /*
3143  * First collect database-specific options. Pre-8.4 server versions lack
3144  * unnest(), so we do this the hard way by querying once per subscript.
3145  */
3146  for (;;)
3147  {
3148  if (AH->remoteVersion >= 90000)
3149  printfPQExpBuffer(buf, "SELECT setconfig[%d] FROM pg_db_role_setting "
3150  "WHERE setrole = 0 AND setdatabase = '%u'::oid",
3151  count, dboid);
3152  else
3153  printfPQExpBuffer(buf, "SELECT datconfig[%d] FROM pg_database WHERE oid = '%u'::oid", count, dboid);
3154 
3155  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3156 
3157  if (PQntuples(res) == 1 &&
3158  !PQgetisnull(res, 0, 0))
3159  {
3160  makeAlterConfigCommand(conn, PQgetvalue(res, 0, 0),
3161  "DATABASE", dbname, NULL, NULL,
3162  outbuf);
3163  PQclear(res);
3164  count++;
3165  }
3166  else
3167  {
3168  PQclear(res);
3169  break;
3170  }
3171  }
3172 
3173  /* Now look for role-and-database-specific options */
3174  if (AH->remoteVersion >= 90000)
3175  {
3176  /* Here we can assume we have unnest() */
3177  printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
3178  "FROM pg_db_role_setting s, pg_roles r "
3179  "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
3180  dboid);
3181 
3182  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3183 
3184  if (PQntuples(res) > 0)
3185  {
3186  int i;
3187 
3188  for (i = 0; i < PQntuples(res); i++)
3189  makeAlterConfigCommand(conn, PQgetvalue(res, i, 1),
3190  "ROLE", PQgetvalue(res, i, 0),
3191  "DATABASE", dbname,
3192  outbuf);
3193  }
3194 
3195  PQclear(res);
3196  }
3197 
3198  destroyPQExpBuffer(buf);
3199 }
3200 
3201 /*
3202  * dumpEncoding: put the correct encoding into the archive
3203  */
3204 static void
3206 {
3207  const char *encname = pg_encoding_to_char(AH->encoding);
3209 
3210  pg_log_info("saving encoding = %s", encname);
3211 
3212  appendPQExpBufferStr(qry, "SET client_encoding = ");
3213  appendStringLiteralAH(qry, encname, AH);
3214  appendPQExpBufferStr(qry, ";\n");
3215 
3216  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3217  ARCHIVE_OPTS(.tag = "ENCODING",
3218  .description = "ENCODING",
3219  .section = SECTION_PRE_DATA,
3220  .createStmt = qry->data));
3221 
3222  destroyPQExpBuffer(qry);
3223 }
3224 
3225 
3226 /*
3227  * dumpStdStrings: put the correct escape string behavior into the archive
3228  */
3229 static void
3231 {
3232  const char *stdstrings = AH->std_strings ? "on" : "off";
3234 
3235  pg_log_info("saving standard_conforming_strings = %s",
3236  stdstrings);
3237 
3238  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3239  stdstrings);
3240 
3241  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3242  ARCHIVE_OPTS(.tag = "STDSTRINGS",
3243  .description = "STDSTRINGS",
3244  .section = SECTION_PRE_DATA,
3245  .createStmt = qry->data));
3246 
3247  destroyPQExpBuffer(qry);
3248 }
3249 
3250 /*
3251  * dumpSearchPath: record the active search_path in the archive
3252  */
3253 static void
3255 {
3257  PQExpBuffer path = createPQExpBuffer();
3258  PGresult *res;
3259  char **schemanames = NULL;
3260  int nschemanames = 0;
3261  int i;
3262 
3263  /*
3264  * We use the result of current_schemas(), not the search_path GUC,
3265  * because that might contain wildcards such as "$user", which won't
3266  * necessarily have the same value during restore. Also, this way avoids
3267  * listing schemas that may appear in search_path but not actually exist,
3268  * which seems like a prudent exclusion.
3269  */
3270  res = ExecuteSqlQueryForSingleRow(AH,
3271  "SELECT pg_catalog.current_schemas(false)");
3272 
3273  if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3274  fatal("could not parse result of current_schemas()");
3275 
3276  /*
3277  * We use set_config(), not a simple "SET search_path" command, because
3278  * the latter has less-clean behavior if the search path is empty. While
3279  * that's likely to get fixed at some point, it seems like a good idea to
3280  * be as backwards-compatible as possible in what we put into archives.
3281  */
3282  for (i = 0; i < nschemanames; i++)
3283  {
3284  if (i > 0)
3285  appendPQExpBufferStr(path, ", ");
3286  appendPQExpBufferStr(path, fmtId(schemanames[i]));
3287  }
3288 
3289  appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3290  appendStringLiteralAH(qry, path->data, AH);
3291  appendPQExpBufferStr(qry, ", false);\n");
3292 
3293  pg_log_info("saving search_path = %s", path->data);
3294 
3295  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3296  ARCHIVE_OPTS(.tag = "SEARCHPATH",
3297  .description = "SEARCHPATH",
3298  .section = SECTION_PRE_DATA,
3299  .createStmt = qry->data));
3300 
3301  /* Also save it in AH->searchpath, in case we're doing plain text dump */
3302  AH->searchpath = pg_strdup(qry->data);
3303 
3304  if (schemanames)
3305  free(schemanames);
3306  PQclear(res);
3307  destroyPQExpBuffer(qry);
3308  destroyPQExpBuffer(path);
3309 }
3310 
3311 
3312 /*
3313  * getBlobs:
3314  * Collect schema-level data about large objects
3315  */
3316 static void
3318 {
3319  DumpOptions *dopt = fout->dopt;
3320  PQExpBuffer blobQry = createPQExpBuffer();
3321  BlobInfo *binfo;
3322  DumpableObject *bdata;
3323  PGresult *res;
3324  int ntups;
3325  int i;
3326  int i_oid;
3327  int i_lomowner;
3328  int i_lomacl;
3329  int i_rlomacl;
3330  int i_initlomacl;
3331  int i_initrlomacl;
3332 
3333  pg_log_info("reading large objects");
3334 
3335  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
3336  if (fout->remoteVersion >= 90600)
3337  {
3338  PQExpBuffer acl_subquery = createPQExpBuffer();
3339  PQExpBuffer racl_subquery = createPQExpBuffer();
3340  PQExpBuffer init_acl_subquery = createPQExpBuffer();
3341  PQExpBuffer init_racl_subquery = createPQExpBuffer();
3342 
3343  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
3344  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
3345  dopt->binary_upgrade);
3346 
3347  appendPQExpBuffer(blobQry,
3348  "SELECT l.oid, (%s l.lomowner) AS rolname, "
3349  "%s AS lomacl, "
3350  "%s AS rlomacl, "
3351  "%s AS initlomacl, "
3352  "%s AS initrlomacl "
3353  "FROM pg_largeobject_metadata l "
3354  "LEFT JOIN pg_init_privs pip ON "
3355  "(l.oid = pip.objoid "
3356  "AND pip.classoid = 'pg_largeobject'::regclass "
3357  "AND pip.objsubid = 0) ",
3359  acl_subquery->data,
3360  racl_subquery->data,
3361  init_acl_subquery->data,
3362  init_racl_subquery->data);
3363 
3364  destroyPQExpBuffer(acl_subquery);
3365  destroyPQExpBuffer(racl_subquery);
3366  destroyPQExpBuffer(init_acl_subquery);
3367  destroyPQExpBuffer(init_racl_subquery);
3368  }
3369  else if (fout->remoteVersion >= 90000)
3370  appendPQExpBuffer(blobQry,
3371  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
3372  "NULL AS rlomacl, NULL AS initlomacl, "
3373  "NULL AS initrlomacl "
3374  " FROM pg_largeobject_metadata",
3376  else
3377  appendPQExpBufferStr(blobQry,
3378  "SELECT DISTINCT loid AS oid, "
3379  "NULL::name AS rolname, NULL::oid AS lomacl, "
3380  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
3381  "NULL::oid AS initrlomacl "
3382  " FROM pg_largeobject");
3383 
3384  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
3385 
3386  i_oid = PQfnumber(res, "oid");
3387  i_lomowner = PQfnumber(res, "rolname");
3388  i_lomacl = PQfnumber(res, "lomacl");
3389  i_rlomacl = PQfnumber(res, "rlomacl");
3390  i_initlomacl = PQfnumber(res, "initlomacl");
3391  i_initrlomacl = PQfnumber(res, "initrlomacl");
3392 
3393  ntups = PQntuples(res);
3394 
3395  /*
3396  * Each large object has its own BLOB archive entry.
3397  */
3398  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
3399 
3400  for (i = 0; i < ntups; i++)
3401  {
3402  binfo[i].dobj.objType = DO_BLOB;
3403  binfo[i].dobj.catId.tableoid = LargeObjectRelationId;
3404  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3405  AssignDumpId(&binfo[i].dobj);
3406 
3407  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
3408  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
3409  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
3410  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
3411  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
3412  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
3413 
3414  if (PQgetisnull(res, i, i_lomacl) &&
3415  PQgetisnull(res, i, i_rlomacl) &&
3416  PQgetisnull(res, i, i_initlomacl) &&
3417  PQgetisnull(res, i, i_initrlomacl))
3418  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
3419 
3420  /*
3421  * In binary-upgrade mode for blobs, we do *not* dump out the blob
3422  * data, as it will be copied by pg_upgrade, which simply copies the
3423  * pg_largeobject table. We *do* however dump out anything but the
3424  * data, as pg_upgrade copies just pg_largeobject, but not
3425  * pg_largeobject_metadata, after the dump is restored.
3426  */
3427  if (dopt->binary_upgrade)
3428  binfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
3429  }
3430 
3431  /*
3432  * If we have any large objects, a "BLOBS" archive entry is needed. This
3433  * is just a placeholder for sorting; it carries no data now.
3434  */
3435  if (ntups > 0)
3436  {
3437  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
3438  bdata->objType = DO_BLOB_DATA;
3439  bdata->catId = nilCatalogId;
3440  AssignDumpId(bdata);
3441  bdata->name = pg_strdup("BLOBS");
3442  }
3443 
3444  PQclear(res);
3445  destroyPQExpBuffer(blobQry);
3446 }
3447 
3448 /*
3449  * dumpBlob
3450  *
3451  * dump the definition (metadata) of the given large object
3452  */
3453 static void
3454 dumpBlob(Archive *fout, BlobInfo *binfo)
3455 {
3456  PQExpBuffer cquery = createPQExpBuffer();
3457  PQExpBuffer dquery = createPQExpBuffer();
3458 
3459  appendPQExpBuffer(cquery,
3460  "SELECT pg_catalog.lo_create('%s');\n",
3461  binfo->dobj.name);
3462 
3463  appendPQExpBuffer(dquery,
3464  "SELECT pg_catalog.lo_unlink('%s');\n",
3465  binfo->dobj.name);
3466 
3467  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
3468  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
3469  ARCHIVE_OPTS(.tag = binfo->dobj.name,
3470  .owner = binfo->rolname,
3471  .description = "BLOB",
3472  .section = SECTION_PRE_DATA,
3473  .createStmt = cquery->data,
3474  .dropStmt = dquery->data));
3475 
3476  /* Dump comment if any */
3477  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3478  dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
3479  NULL, binfo->rolname,
3480  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3481 
3482  /* Dump security label if any */
3483  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3484  dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
3485  NULL, binfo->rolname,
3486  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3487 
3488  /* Dump ACL if any */
3489  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3490  dumpACL(fout, binfo->dobj.dumpId, InvalidDumpId, "LARGE OBJECT",
3491  binfo->dobj.name, NULL,
3492  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3493  binfo->initblobacl, binfo->initrblobacl);
3494 
3495  destroyPQExpBuffer(cquery);
3496  destroyPQExpBuffer(dquery);
3497 }
3498 
3499 /*
3500  * dumpBlobs:
3501  * dump the data contents of all large objects
3502  */
3503 static int
3504 dumpBlobs(Archive *fout, void *arg)
3505 {
3506  const char *blobQry;
3507  const char *blobFetchQry;
3508  PGconn *conn = GetConnection(fout);
3509  PGresult *res;
3510  char buf[LOBBUFSIZE];
3511  int ntups;
3512  int i;
3513  int cnt;
3514 
3515  pg_log_info("saving large objects");
3516 
3517  /*
3518  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3519  * the already-in-memory dumpable objects instead...
3520  */
3521  if (fout->remoteVersion >= 90000)
3522  blobQry =
3523  "DECLARE bloboid CURSOR FOR "
3524  "SELECT oid FROM pg_largeobject_metadata ORDER BY 1";
3525  else
3526  blobQry =
3527  "DECLARE bloboid CURSOR FOR "
3528  "SELECT DISTINCT loid FROM pg_largeobject ORDER BY 1";
3529 
3530  ExecuteSqlStatement(fout, blobQry);
3531 
3532  /* Command to fetch from cursor */
3533  blobFetchQry = "FETCH 1000 IN bloboid";
3534 
3535  do
3536  {
3537  /* Do a fetch */
3538  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3539 
3540  /* Process the tuples, if any */
3541  ntups = PQntuples(res);
3542  for (i = 0; i < ntups; i++)
3543  {
3544  Oid blobOid;
3545  int loFd;
3546 
3547  blobOid = atooid(PQgetvalue(res, i, 0));
3548  /* Open the BLOB */
3549  loFd = lo_open(conn, blobOid, INV_READ);
3550  if (loFd == -1)
3551  fatal("could not open large object %u: %s",
3552  blobOid, PQerrorMessage(conn));
3553 
3554  StartBlob(fout, blobOid);
3555 
3556  /* Now read it in chunks, sending data to archive */
3557  do
3558  {
3559  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3560  if (cnt < 0)
3561  fatal("error reading large object %u: %s",
3562  blobOid, PQerrorMessage(conn));
3563 
3564  WriteData(fout, buf, cnt);
3565  } while (cnt > 0);
3566 
3567  lo_close(conn, loFd);
3568 
3569  EndBlob(fout, blobOid);
3570  }
3571 
3572  PQclear(res);
3573  } while (ntups > 0);
3574 
3575  return 1;
3576 }
3577 
3578 /*
3579  * getPolicies
3580  * get information about policies on a dumpable table.
3581  */
3582 void
3583 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3584 {
3585  PQExpBuffer query;
3586  PGresult *res;
3587  PolicyInfo *polinfo;
3588  int i_oid;
3589  int i_tableoid;
3590  int i_polname;
3591  int i_polcmd;
3592  int i_polpermissive;
3593  int i_polroles;
3594  int i_polqual;
3595  int i_polwithcheck;
3596  int i,
3597  j,
3598  ntups;
3599 
3600  if (fout->remoteVersion < 90500)
3601  return;
3602 
3603  query = createPQExpBuffer();
3604 
3605  for (i = 0; i < numTables; i++)
3606  {
3607  TableInfo *tbinfo = &tblinfo[i];
3608 
3609  /* Ignore row security on tables not to be dumped */
3610  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3611  continue;
3612 
3613  pg_log_info("reading row security enabled for table \"%s.%s\"",
3614  tbinfo->dobj.namespace->dobj.name,
3615  tbinfo->dobj.name);
3616 
3617  /*
3618  * Get row security enabled information for the table. We represent
3619  * RLS being enabled on a table by creating a PolicyInfo object with
3620  * null polname.
3621  */
3622  if (tbinfo->rowsec)
3623  {
3624  /*
3625  * Note: use tableoid 0 so that this object won't be mistaken for
3626  * something that pg_depend entries apply to.
3627  */
3628  polinfo = pg_malloc(sizeof(PolicyInfo));
3629  polinfo->dobj.objType = DO_POLICY;
3630  polinfo->dobj.catId.tableoid = 0;
3631  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3632  AssignDumpId(&polinfo->dobj);
3633  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3634  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3635  polinfo->poltable = tbinfo;
3636  polinfo->polname = NULL;
3637  polinfo->polcmd = '\0';
3638  polinfo->polpermissive = 0;
3639  polinfo->polroles = NULL;
3640  polinfo->polqual = NULL;
3641  polinfo->polwithcheck = NULL;
3642  }
3643 
3644  pg_log_info("reading policies for table \"%s.%s\"",
3645  tbinfo->dobj.namespace->dobj.name,
3646  tbinfo->dobj.name);
3647 
3648  resetPQExpBuffer(query);
3649 
3650  /* Get the policies for the table. */
3651  if (fout->remoteVersion >= 100000)
3652  appendPQExpBuffer(query,
3653  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3654  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3655  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3656  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3657  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3658  "FROM pg_catalog.pg_policy pol "
3659  "WHERE polrelid = '%u'",
3660  tbinfo->dobj.catId.oid);
3661  else
3662  appendPQExpBuffer(query,
3663  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3664  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3665  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3666  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3667  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3668  "FROM pg_catalog.pg_policy pol "
3669  "WHERE polrelid = '%u'",
3670  tbinfo->dobj.catId.oid);
3671  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3672 
3673  ntups = PQntuples(res);
3674 
3675  if (ntups == 0)
3676  {
3677  /*
3678  * No explicit policies to handle (only the default-deny policy,
3679  * which is handled as part of the table definition). Clean up
3680  * and return.
3681  */
3682  PQclear(res);
3683  continue;
3684  }
3685 
3686  i_oid = PQfnumber(res, "oid");
3687  i_tableoid = PQfnumber(res, "tableoid");
3688  i_polname = PQfnumber(res, "polname");
3689  i_polcmd = PQfnumber(res, "polcmd");
3690  i_polpermissive = PQfnumber(res, "polpermissive");
3691  i_polroles = PQfnumber(res, "polroles");
3692  i_polqual = PQfnumber(res, "polqual");
3693  i_polwithcheck = PQfnumber(res, "polwithcheck");
3694 
3695  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3696 
3697  for (j = 0; j < ntups; j++)
3698  {
3699  polinfo[j].dobj.objType = DO_POLICY;
3700  polinfo[j].dobj.catId.tableoid =
3701  atooid(PQgetvalue(res, j, i_tableoid));
3702  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3703  AssignDumpId(&polinfo[j].dobj);
3704  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3705  polinfo[j].poltable = tbinfo;
3706  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3707  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3708 
3709  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3710  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3711 
3712  if (PQgetisnull(res, j, i_polroles))
3713  polinfo[j].polroles = NULL;
3714  else
3715  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3716 
3717  if (PQgetisnull(res, j, i_polqual))
3718  polinfo[j].polqual = NULL;
3719  else
3720  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3721 
3722  if (PQgetisnull(res, j, i_polwithcheck))
3723  polinfo[j].polwithcheck = NULL;
3724  else
3725  polinfo[j].polwithcheck
3726  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3727  }
3728  PQclear(res);
3729  }
3730  destroyPQExpBuffer(query);
3731 }
3732 
3733 /*
3734  * dumpPolicy
3735  * dump the definition of the given policy
3736  */
3737 static void
3739 {
3740  DumpOptions *dopt = fout->dopt;
3741  TableInfo *tbinfo = polinfo->poltable;
3742  PQExpBuffer query;
3743  PQExpBuffer delqry;
3744  PQExpBuffer polprefix;
3745  char *qtabname;
3746  const char *cmd;
3747  char *tag;
3748 
3749  if (dopt->dataOnly)
3750  return;
3751 
3752  /*
3753  * If polname is NULL, then this record is just indicating that ROW LEVEL
3754  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3755  * ROW LEVEL SECURITY.
3756  */
3757  if (polinfo->polname == NULL)
3758  {
3759  query = createPQExpBuffer();
3760 
3761  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3762  fmtQualifiedDumpable(tbinfo));
3763 
3764  /*
3765  * We must emit the ROW SECURITY object's dependency on its table
3766  * explicitly, because it will not match anything in pg_depend (unlike
3767  * the case for other PolicyInfo objects).
3768  */
3769  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3770  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3771  ARCHIVE_OPTS(.tag = polinfo->dobj.name,
3772  .namespace = polinfo->dobj.namespace->dobj.name,
3773  .owner = tbinfo->rolname,
3774  .description = "ROW SECURITY",
3775  .section = SECTION_POST_DATA,
3776  .createStmt = query->data,
3777  .deps = &(tbinfo->dobj.dumpId),
3778  .nDeps = 1));
3779 
3780  destroyPQExpBuffer(query);
3781  return;
3782  }
3783 
3784  if (polinfo->polcmd == '*')
3785  cmd = "";
3786  else if (polinfo->polcmd == 'r')
3787  cmd = " FOR SELECT";
3788  else if (polinfo->polcmd == 'a')
3789  cmd = " FOR INSERT";
3790  else if (polinfo->polcmd == 'w')
3791  cmd = " FOR UPDATE";
3792  else if (polinfo->polcmd == 'd')
3793  cmd = " FOR DELETE";
3794  else
3795  {
3796  pg_log_error("unexpected policy command type: %c",
3797  polinfo->polcmd);
3798  exit_nicely(1);
3799  }
3800 
3801  query = createPQExpBuffer();
3802  delqry = createPQExpBuffer();
3803  polprefix = createPQExpBuffer();
3804 
3805  qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
3806 
3807  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3808 
3809  appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
3810  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3811 
3812  if (polinfo->polroles != NULL)
3813  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3814 
3815  if (polinfo->polqual != NULL)
3816  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3817 
3818  if (polinfo->polwithcheck != NULL)
3819  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3820 
3821  appendPQExpBufferStr(query, ";\n");
3822 
3823  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3824  appendPQExpBuffer(delqry, " ON %s;\n", fmtQualifiedDumpable(tbinfo));
3825 
3826  appendPQExpBuffer(polprefix, "POLICY %s ON",
3827  fmtId(polinfo->polname));
3828 
3829  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3830 
3831  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3832  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3833  ARCHIVE_OPTS(.tag = tag,
3834  .namespace = polinfo->dobj.namespace->dobj.name,
3835  .owner = tbinfo->rolname,
3836  .description = "POLICY",
3837  .section = SECTION_POST_DATA,
3838  .createStmt = query->data,
3839  .dropStmt = delqry->data));
3840 
3841  if (polinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3842  dumpComment(fout, polprefix->data, qtabname,
3843  tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
3844  polinfo->dobj.catId, 0, polinfo->dobj.dumpId);
3845 
3846  free(tag);
3847  destroyPQExpBuffer(query);
3848  destroyPQExpBuffer(delqry);
3849  destroyPQExpBuffer(polprefix);
3850  free(qtabname);
3851 }
3852 
3853 /*
3854  * getPublications
3855  * get information about publications
3856  */
3857 void
3859 {
3860  DumpOptions *dopt = fout->dopt;
3861  PQExpBuffer query;
3862  PGresult *res;
3863  PublicationInfo *pubinfo;
3864  int i_tableoid;
3865  int i_oid;
3866  int i_pubname;
3867  int i_rolname;
3868  int i_puballtables;
3869  int i_pubinsert;
3870  int i_pubupdate;
3871  int i_pubdelete;
3872  int i_pubtruncate;
3873  int i_pubviaroot;
3874  int i,
3875  ntups;
3876 
3877  if (dopt->no_publications || fout->remoteVersion < 100000)
3878  return;
3879 
3880  query = createPQExpBuffer();
3881 
3882  resetPQExpBuffer(query);
3883 
3884  /* Get the publications. */
3885  if (fout->remoteVersion >= 130000)
3886  appendPQExpBuffer(query,
3887  "SELECT p.tableoid, p.oid, p.pubname, "
3888  "(%s p.pubowner) AS rolname, "
3889  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, p.pubviaroot "
3890  "FROM pg_publication p",
3892  else if (fout->remoteVersion >= 110000)
3893  appendPQExpBuffer(query,
3894  "SELECT p.tableoid, p.oid, p.pubname, "
3895  "(%s p.pubowner) AS rolname, "
3896  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, false AS pubviaroot "
3897  "FROM pg_publication p",
3899  else
3900  appendPQExpBuffer(query,
3901  "SELECT p.tableoid, p.oid, p.pubname, "
3902  "(%s p.pubowner) AS rolname, "
3903  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, false AS pubtruncate, false AS pubviaroot "
3904  "FROM pg_publication p",
3906 
3907  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3908 
3909  ntups = PQntuples(res);
3910 
3911  i_tableoid = PQfnumber(res, "tableoid");
3912  i_oid = PQfnumber(res, "oid");
3913  i_pubname = PQfnumber(res, "pubname");
3914  i_rolname = PQfnumber(res, "rolname");
3915  i_puballtables = PQfnumber(res, "puballtables");
3916  i_pubinsert = PQfnumber(res, "pubinsert");
3917  i_pubupdate = PQfnumber(res, "pubupdate");
3918  i_pubdelete = PQfnumber(res, "pubdelete");
3919  i_pubtruncate = PQfnumber(res, "pubtruncate");
3920  i_pubviaroot = PQfnumber(res, "pubviaroot");
3921 
3922  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3923 
3924  for (i = 0; i < ntups; i++)
3925  {
3926  pubinfo[i].dobj.objType = DO_PUBLICATION;
3927  pubinfo[i].dobj.catId.tableoid =
3928  atooid(PQgetvalue(res, i, i_tableoid));
3929  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3930  AssignDumpId(&pubinfo[i].dobj);
3931  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3932  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3933  pubinfo[i].puballtables =
3934  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3935  pubinfo[i].pubinsert =
3936  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3937  pubinfo[i].pubupdate =
3938  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3939  pubinfo[i].pubdelete =
3940  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3941  pubinfo[i].pubtruncate =
3942  (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
3943  pubinfo[i].pubviaroot =
3944  (strcmp(PQgetvalue(res, i, i_pubviaroot), "t") == 0);
3945 
3946  if (strlen(pubinfo[i].rolname) == 0)
3947  pg_log_warning("owner of publication \"%s\" appears to be invalid",
3948  pubinfo[i].dobj.name);
3949 
3950  /* Decide whether we want to dump it */
3951  selectDumpableObject(&(pubinfo[i].dobj), fout);
3952  }
3953  PQclear(res);
3954 
3955  destroyPQExpBuffer(query);
3956 }
3957 
3958 /*
3959  * dumpPublication
3960  * dump the definition of the given publication
3961  */
3962 static void
3964 {
3965  PQExpBuffer delq;
3966  PQExpBuffer query;
3967  char *qpubname;
3968  bool first = true;
3969 
3970  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3971  return;
3972 
3973  delq = createPQExpBuffer();
3974  query = createPQExpBuffer();
3975 
3976  qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
3977 
3978  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3979  qpubname);
3980 
3981  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3982  qpubname);
3983 
3984  if (pubinfo->puballtables)
3985  appendPQExpBufferStr(query, " FOR ALL TABLES");
3986 
3987  appendPQExpBufferStr(query, " WITH (publish = '");
3988  if (pubinfo->pubinsert)
3989  {
3990  appendPQExpBufferStr(query, "insert");
3991  first = false;
3992  }
3993 
3994  if (pubinfo->pubupdate)
3995  {
3996  if (!first)
3997  appendPQExpBufferStr(query, ", ");
3998 
3999  appendPQExpBufferStr(query, "update");
4000  first = false;
4001  }
4002 
4003  if (pubinfo->pubdelete)
4004  {
4005  if (!first)
4006  appendPQExpBufferStr(query, ", ");
4007 
4008  appendPQExpBufferStr(query, "delete");
4009  first = false;
4010  }
4011 
4012  if (pubinfo->pubtruncate)
4013  {
4014  if (!first)
4015  appendPQExpBufferStr(query, ", ");
4016 
4017  appendPQExpBufferStr(query, "truncate");
4018  first = false;
4019  }
4020 
4021  appendPQExpBufferStr(query, "'");
4022 
4023  if (pubinfo->pubviaroot)
4024  appendPQExpBufferStr(query, ", publish_via_partition_root = true");
4025 
4026  appendPQExpBufferStr(query, ");\n");
4027 
4028  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
4029  ARCHIVE_OPTS(.tag = pubinfo->dobj.name,
4030  .owner = pubinfo->rolname,
4031  .description = "PUBLICATION",
4032  .section = SECTION_POST_DATA,
4033  .createStmt = query->data,
4034  .dropStmt = delq->data));
4035 
4036  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4037  dumpComment(fout, "PUBLICATION", qpubname,
4038  NULL, pubinfo->rolname,
4039  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4040 
4041  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4042  dumpSecLabel(fout, "PUBLICATION", qpubname,
4043  NULL, pubinfo->rolname,
4044  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4045 
4046  destroyPQExpBuffer(delq);
4047  destroyPQExpBuffer(query);
4048  free(qpubname);
4049 }
4050 
4051 /*
4052  * getPublicationTables
4053  * get information about publication membership for dumpable tables.
4054  */
4055 void
4057 {
4058  PQExpBuffer query;
4059  PGresult *res;
4060  PublicationRelInfo *pubrinfo;
4061  DumpOptions *dopt = fout->dopt;
4062  int i_tableoid;
4063  int i_oid;
4064  int i_pubname;
4065  int i,
4066  j,
4067  ntups;
4068 
4069  if (dopt->no_publications || fout->remoteVersion < 100000)
4070  return;
4071 
4072  query = createPQExpBuffer();
4073 
4074  for (i = 0; i < numTables; i++)
4075  {
4076  TableInfo *tbinfo = &tblinfo[i];
4077 
4078  /*
4079  * Only regular and partitioned tables can be added to publications.
4080  */
4081  if (tbinfo->relkind != RELKIND_RELATION &&
4082  tbinfo->relkind != RELKIND_PARTITIONED_TABLE)
4083  continue;
4084 
4085  /*
4086  * Ignore publication membership of tables whose definitions are not
4087  * to be dumped.
4088  */
4089  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4090  continue;
4091 
4092  pg_log_info("reading publication membership for table \"%s.%s\"",
4093  tbinfo->dobj.namespace->dobj.name,
4094  tbinfo->dobj.name);
4095 
4096  resetPQExpBuffer(query);
4097 
4098  /* Get the publication membership for the table. */
4099  appendPQExpBuffer(query,
4100  "SELECT pr.tableoid, pr.oid, p.pubname "
4101  "FROM pg_publication_rel pr, pg_publication p "
4102  "WHERE pr.prrelid = '%u'"
4103  " AND p.oid = pr.prpubid",
4104  tbinfo->dobj.catId.oid);
4105  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4106 
4107  ntups = PQntuples(res);
4108 
4109  if (ntups == 0)
4110  {
4111  /*
4112  * Table is not member of any publications. Clean up and return.
4113  */
4114  PQclear(res);
4115  continue;
4116  }
4117 
4118  i_tableoid = PQfnumber(res, "tableoid");
4119  i_oid = PQfnumber(res, "oid");
4120  i_pubname = PQfnumber(res, "pubname");
4121 
4122  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
4123 
4124  for (j = 0; j < ntups; j++)
4125  {
4126  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
4127  pubrinfo[j].dobj.catId.tableoid =
4128  atooid(PQgetvalue(res, j, i_tableoid));
4129  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
4130  AssignDumpId(&pubrinfo[j].dobj);
4131  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4132  pubrinfo[j].dobj.name = tbinfo->dobj.name;
4133  pubrinfo[j].pubname = pg_strdup(PQgetvalue(res, j, i_pubname));
4134  pubrinfo[j].pubtable = tbinfo;
4135 
4136  /* Decide whether we want to dump it */
4137  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
4138  }
4139  PQclear(res);
4140  }
4141  destroyPQExpBuffer(query);
4142 }
4143 
4144 /*
4145  * dumpPublicationTable
4146  * dump the definition of the given publication table mapping
4147  */
4148 static void
4150 {
4151  TableInfo *tbinfo = pubrinfo->pubtable;
4152  PQExpBuffer query;
4153  char *tag;
4154 
4155  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4156  return;
4157 
4158  tag = psprintf("%s %s", pubrinfo->pubname, tbinfo->dobj.name);
4159 
4160  query = createPQExpBuffer();
4161 
4162  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
4163  fmtId(pubrinfo->pubname));
4164  appendPQExpBuffer(query, " %s;\n",
4165  fmtQualifiedDumpable(tbinfo));
4166 
4167  /*
4168  * There is no point in creating drop query as the drop is done by table
4169  * drop.
4170  */
4171  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
4172  ARCHIVE_OPTS(.tag = tag,
4173  .namespace = tbinfo->dobj.namespace->dobj.name,
4174  .description = "PUBLICATION TABLE",
4175  .section = SECTION_POST_DATA,
4176  .createStmt = query->data));
4177 
4178  free(tag);
4179  destroyPQExpBuffer(query);
4180 }
4181 
4182 /*
4183  * Is the currently connected user a superuser?
4184  */
4185 static bool
4187 {
4188  ArchiveHandle *AH = (ArchiveHandle *) fout;
4189  const char *val;
4190 
4191  val = PQparameterStatus(AH->connection, "is_superuser");
4192 
4193  if (val && strcmp(val, "on") == 0)
4194  return true;
4195 
4196  return false;
4197 }
4198 
4199 /*
4200  * getSubscriptions
4201  * get information about subscriptions
4202  */
4203 void
4205 {
4206  DumpOptions *dopt = fout->dopt;
4207  PQExpBuffer query;
4208  PGresult *res;
4209  SubscriptionInfo *subinfo;
4210  int i_tableoid;
4211  int i_oid;
4212  int i_subname;
4213  int i_rolname;
4214  int i_substream;
4215  int i_subconninfo;
4216  int i_subslotname;
4217  int i_subsynccommit;
4218  int i_subpublications;
4219  int i_subbinary;
4220  int i,
4221  ntups;
4222 
4223  if (dopt->no_subscriptions || fout->remoteVersion < 100000)
4224  return;
4225 
4226  if (!is_superuser(fout))
4227  {
4228  int n;
4229 
4230  res = ExecuteSqlQuery(fout,
4231  "SELECT count(*) FROM pg_subscription "
4232  "WHERE subdbid = (SELECT oid FROM pg_database"
4233  " WHERE datname = current_database())",
4234  PGRES_TUPLES_OK);
4235  n = atoi(PQgetvalue(res, 0, 0));
4236  if (n > 0)
4237  pg_log_warning("subscriptions not dumped because current user is not a superuser");
4238  PQclear(res);
4239  return;
4240  }
4241 
4242  query = createPQExpBuffer();
4243 
4244  /* Get the subscriptions in current database. */
4245  appendPQExpBuffer(query,
4246  "SELECT s.tableoid, s.oid, s.subname,\n"
4247  " (%s s.subowner) AS rolname,\n"
4248  " s.subconninfo, s.subslotname, s.subsynccommit,\n"
4249  " s.subpublications,\n",
4251 
4252  if (fout->remoteVersion >= 140000)
4253  appendPQExpBufferStr(query, " s.subbinary,\n");
4254  else
4255  appendPQExpBufferStr(query, " false AS subbinary,\n");
4256 
4257  if (fout->remoteVersion >= 140000)
4258  appendPQExpBufferStr(query, " s.substream\n");
4259  else
4260  appendPQExpBufferStr(query, " false AS substream\n");
4261 
4262  appendPQExpBufferStr(query,
4263  "FROM pg_subscription s\n"
4264  "WHERE s.subdbid = (SELECT oid FROM pg_database\n"
4265  " WHERE datname = current_database())");
4266 
4267  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4268 
4269  ntups = PQntuples(res);
4270 
4271  i_tableoid = PQfnumber(res, "tableoid");
4272  i_oid = PQfnumber(res, "oid");
4273  i_subname = PQfnumber(res, "subname");
4274  i_rolname = PQfnumber(res, "rolname");
4275  i_subconninfo = PQfnumber(res, "subconninfo");
4276  i_subslotname = PQfnumber(res, "subslotname");
4277  i_subsynccommit = PQfnumber(res, "subsynccommit");
4278  i_subpublications = PQfnumber(res, "subpublications");
4279  i_subbinary = PQfnumber(res, "subbinary");
4280  i_substream = PQfnumber(res, "substream");
4281 
4282  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
4283 
4284  for (i = 0; i < ntups; i++)
4285  {
4286  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
4287  subinfo[i].dobj.catId.tableoid =
4288  atooid(PQgetvalue(res, i, i_tableoid));
4289  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4290  AssignDumpId(&subinfo[i].dobj);
4291  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
4292  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4293  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
4294  if (PQgetisnull(res, i, i_subslotname))
4295  subinfo[i].subslotname = NULL;
4296  else
4297  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
4298  subinfo[i].subsynccommit =
4299  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
4300  subinfo[i].subpublications =
4301  pg_strdup(PQgetvalue(res, i, i_subpublications));
4302  subinfo[i].subbinary =
4303  pg_strdup(PQgetvalue(res, i, i_subbinary));
4304  subinfo[i].substream =
4305  pg_strdup(PQgetvalue(res, i, i_substream));
4306 
4307  if (strlen(subinfo[i].rolname) == 0)
4308  pg_log_warning("owner of subscription \"%s\" appears to be invalid",
4309  subinfo[i].dobj.name);
4310 
4311  /* Decide whether we want to dump it */
4312  selectDumpableObject(&(subinfo[i].dobj), fout);
4313  }
4314  PQclear(res);
4315 
4316  destroyPQExpBuffer(query);
4317 }
4318 
4319 /*
4320  * dumpSubscription
4321  * dump the definition of the given subscription
4322  */
4323 static void
4325 {
4326  PQExpBuffer delq;
4327  PQExpBuffer query;
4328  PQExpBuffer publications;
4329  char *qsubname;
4330  char **pubnames = NULL;
4331  int npubnames = 0;
4332  int i;
4333 
4334  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4335  return;
4336 
4337  delq = createPQExpBuffer();
4338  query = createPQExpBuffer();
4339 
4340  qsubname = pg_strdup(fmtId(subinfo->dobj.name));
4341 
4342  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
4343  qsubname);
4344 
4345  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
4346  qsubname);
4347  appendStringLiteralAH(query, subinfo->subconninfo, fout);
4348 
4349  /* Build list of quoted publications and append them to query. */
4350  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
4351  {
4352  pg_log_warning("could not parse subpublications array");
4353  if (pubnames)
4354  free(pubnames);
4355  pubnames = NULL;
4356  npubnames = 0;
4357  }
4358 
4359  publications = createPQExpBuffer();
4360  for (i = 0; i < npubnames; i++)
4361  {
4362  if (i > 0)
4363  appendPQExpBufferStr(publications, ", ");
4364 
4365  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
4366  }
4367 
4368  appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
4369  if (subinfo->subslotname)
4370  appendStringLiteralAH(query, subinfo->subslotname, fout);
4371  else
4372  appendPQExpBufferStr(query, "NONE");
4373 
4374  if (strcmp(subinfo->subbinary, "t") == 0)
4375  appendPQExpBufferStr(query, ", binary = true");
4376 
4377  if (strcmp(subinfo->substream, "f") != 0)
4378  appendPQExpBufferStr(query, ", streaming = on");
4379 
4380  if (strcmp(subinfo->subsynccommit, "off") != 0)
4381  appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
4382 
4383  appendPQExpBufferStr(query, ");\n");
4384 
4385  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
4386  ARCHIVE_OPTS(.tag = subinfo->dobj.name,
4387  .owner = subinfo->rolname,
4388  .description = "SUBSCRIPTION",
4389  .section = SECTION_POST_DATA,
4390  .createStmt = query->data,
4391  .dropStmt = delq->data));
4392 
4393  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4394  dumpComment(fout, "SUBSCRIPTION", qsubname,
4395  NULL, subinfo->rolname,
4396  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4397 
4398  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4399  dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
4400  NULL, subinfo->rolname,
4401  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4402 
4403  destroyPQExpBuffer(publications);
4404  if (pubnames)
4405  free(pubnames);
4406 
4407  destroyPQExpBuffer(delq);
4408  destroyPQExpBuffer(query);
4409  free(qsubname);
4410 }
4411 
4412 /*
4413  * Given a "create query", append as many ALTER ... DEPENDS ON EXTENSION as
4414  * the object needs.
4415  */
4416 static void
4418  PQExpBuffer create,
4419  DumpableObject *dobj,
4420  const char *catalog,
4421  const char *keyword,
4422  const char *objname)
4423 {
4424  if (dobj->depends_on_ext)
4425  {
4426  char *nm;
4427  PGresult *res;
4428  PQExpBuffer query;
4429  int ntups;
4430  int i_extname;
4431  int i;
4432 
4433  /* dodge fmtId() non-reentrancy */
4434  nm = pg_strdup(objname);
4435 
4436  query = createPQExpBuffer();
4437  appendPQExpBuffer(query,
4438  "SELECT e.extname "
4439  "FROM pg_catalog.pg_depend d, pg_catalog.pg_extension e "
4440  "WHERE d.refobjid = e.oid AND classid = '%s'::pg_catalog.regclass "
4441  "AND objid = '%u'::pg_catalog.oid AND deptype = 'x' "
4442  "AND refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass",
4443  catalog,
4444  dobj->catId.oid);
4445  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4446  ntups = PQntuples(res);
4447  i_extname = PQfnumber(res, "extname");
4448  for (i = 0; i < ntups; i++)
4449  {
4450  appendPQExpBuffer(create, "ALTER %s %s DEPENDS ON EXTENSION %s;\n",
4451  keyword, nm,
4452  fmtId(PQgetvalue(res, i, i_extname)));
4453  }
4454 
4455  PQclear(res);
4456  destroyPQExpBuffer(query);
4457  pg_free(nm);
4458  }
4459 }
4460 
4461 
4462 static void
4464  PQExpBuffer upgrade_buffer,
4465  Oid pg_type_oid,
4466  bool force_array_type)
4467 {
4468  PQExpBuffer upgrade_query = createPQExpBuffer();
4469  PGresult *res;
4470  Oid pg_type_array_oid;
4471 
4472  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
4473  appendPQExpBuffer(upgrade_buffer,
4474  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4475  pg_type_oid);
4476 
4477  /* we only support old >= 8.3 for binary upgrades */
4478  appendPQExpBuffer(upgrade_query,
4479  "SELECT typarray "
4480  "FROM pg_catalog.pg_type "
4481  "WHERE oid = '%u'::pg_catalog.oid;",
4482  pg_type_oid);
4483 
4484  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4485 
4486  pg_type_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
4487 
4488  PQclear(res);
4489 
4490  if (!OidIsValid(pg_type_array_oid) && force_array_type)
4491  {
4492  /*
4493  * If the old version didn't assign an array type, but the new version
4494  * does, we must select an unused type OID to assign. This currently
4495  * only happens for domains, when upgrading pre-v11 to v11 and up.
4496  *
4497  * Note: local state here is kind of ugly, but we must have some,
4498  * since we mustn't choose the same unused OID more than once.
4499  */
4500  static Oid next_possible_free_oid = FirstNormalObjectId;
4501  bool is_dup;
4502 
4503  do
4504  {
4505  ++next_possible_free_oid;
4506  printfPQExpBuffer(upgrade_query,
4507  "SELECT EXISTS(SELECT 1 "
4508  "FROM pg_catalog.pg_type "
4509  "WHERE oid = '%u'::pg_catalog.oid);",
4510  next_possible_free_oid);
4511  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4512  is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
4513  PQclear(res);
4514  } while (is_dup);
4515 
4516  pg_type_array_oid = next_possible_free_oid;
4517  }
4518 
4519  if (OidIsValid(pg_type_array_oid))
4520  {
4521  appendPQExpBufferStr(upgrade_buffer,
4522  "\n-- For binary upgrade, must preserve pg_type array oid\n");
4523  appendPQExpBuffer(upgrade_buffer,
4524  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4525  pg_type_array_oid);
4526  }
4527 
4528  destroyPQExpBuffer(upgrade_query);
4529 }
4530 
4531 static void
4533  PQExpBuffer upgrade_buffer,
4534  Oid pg_rel_oid)
4535 {
4536  PQExpBuffer upgrade_query = createPQExpBuffer();
4537  PGresult *upgrade_res;
4538  Oid pg_type_oid;
4539 
4540  appendPQExpBuffer(upgrade_query,
4541  "SELECT c.reltype AS crel "
4542  "FROM pg_catalog.pg_class c "
4543  "WHERE c.oid = '%u'::pg_catalog.oid;",
4544  pg_rel_oid);
4545 
4546  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4547 
4548  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
4549 
4550  if (OidIsValid(pg_type_oid))
4551  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
4552  pg_type_oid, false);
4553 
4554  PQclear(upgrade_res);
4555  destroyPQExpBuffer(upgrade_query);
4556 }
4557 
4558 static void
4560  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
4561  bool is_index)
4562 {
4563  appendPQExpBufferStr(upgrade_buffer,
4564  "\n-- For binary upgrade, must preserve pg_class oids\n");
4565 
4566  if (!is_index)
4567  {
4568  PQExpBuffer upgrade_query = createPQExpBuffer();
4569  PGresult *upgrade_res;
4570  Oid pg_class_reltoastrelid;
4571  char pg_class_relkind;
4572  Oid pg_index_indexrelid;
4573 
4574  appendPQExpBuffer(upgrade_buffer,
4575  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
4576  pg_class_oid);
4577 
4578  /*
4579  * Preserve the OIDs of the table's toast table and index, if any.
4580  * Indexes cannot have toast tables, so we need not make this probe in
4581  * the index code path.
4582  *
4583  * One complexity is that the current table definition might not
4584  * require the creation of a TOAST table, but the old database might
4585  * have a TOAST table that was created earlier, before some wide
4586  * columns were dropped. By setting the TOAST oid we force creation
4587  * of the TOAST heap and index by the new backend, so we can copy the
4588  * files during binary upgrade without worrying about this case.
4589  */
4590  appendPQExpBuffer(upgrade_query,
4591  "SELECT c.reltoastrelid, c.relkind, i.indexrelid "
4592  "FROM pg_catalog.pg_class c LEFT JOIN "
4593  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
4594  "WHERE c.oid = '%u'::pg_catalog.oid;",
4595  pg_class_oid);
4596 
4597  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4598 
4599  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0,
4600  PQfnumber(upgrade_res, "reltoastrelid")));
4601  pg_class_relkind = *PQgetvalue(upgrade_res, 0,
4602  PQfnumber(upgrade_res, "relkind"));
4603  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0,
4604  PQfnumber(upgrade_res, "indexrelid")));
4605 
4606  /*
4607  * In a pre-v12 database, partitioned tables might be marked as having
4608  * toast tables, but we should ignore them if so.
4609  */
4610  if (OidIsValid(pg_class_reltoastrelid) &&
4611  pg_class_relkind != RELKIND_PARTITIONED_TABLE)
4612  {
4613  appendPQExpBuffer(upgrade_buffer,
4614  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
4615  pg_class_reltoastrelid);
4616 
4617  /* every toast table has an index */
4618  appendPQExpBuffer(upgrade_buffer,
4619  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4620  pg_index_indexrelid);
4621  }
4622 
4623  PQclear(upgrade_res);
4624  destroyPQExpBuffer(upgrade_query);
4625  }
4626  else
4627  appendPQExpBuffer(upgrade_buffer,
4628  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4629  pg_class_oid);
4630 
4631  appendPQExpBufferChar(upgrade_buffer, '\n');
4632 }
4633 
4634 /*
4635  * If the DumpableObject is a member of an extension, add a suitable
4636  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4637  *
4638  * For somewhat historical reasons, objname should already be quoted,
4639  * but not objnamespace (if any).
4640  */
4641 static void
4643  DumpableObject *dobj,
4644  const char *objtype,
4645  const char *objname,
4646  const char *objnamespace)
4647 {
4648  DumpableObject *extobj = NULL;
4649  int i;
4650 
4651  if (!dobj->ext_member)
4652  return;
4653 
4654  /*
4655  * Find the parent extension. We could avoid this search if we wanted to
4656  * add a link field to DumpableObject, but the space costs of that would
4657  * be considerable. We assume that member objects could only have a
4658  * direct dependency on their own extension, not any others.
4659  */
4660  for (i = 0; i < dobj->nDeps; i++)
4661  {
4662  extobj = findObjectByDumpId(dobj->dependencies[i]);
4663  if (extobj && extobj->objType == DO_EXTENSION)
4664  break;
4665  extobj = NULL;
4666  }
4667  if (extobj == NULL)
4668  fatal("could not find parent extension for %s %s",
4669  objtype, objname);
4670 
4671  appendPQExpBufferStr(upgrade_buffer,
4672  "\n-- For binary upgrade, handle extension membership the hard way\n");
4673  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
4674  fmtId(extobj->name),
4675  objtype);
4676  if (objnamespace && *objnamespace)
4677  appendPQExpBuffer(upgrade_buffer, "%s.", fmtId(objnamespace));
4678  appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
4679 }
4680 
4681 /*
4682  * getNamespaces:
4683  * read all namespaces in the system catalogs and return them in the
4684  * NamespaceInfo* structure
4685  *
4686  * numNamespaces is set to the number of namespaces read in
4687  */
4688 NamespaceInfo *
4690 {
4691  DumpOptions *dopt = fout->dopt;
4692  PGresult *res;
4693  int ntups;
4694  int i;
4695  PQExpBuffer query;
4696  NamespaceInfo *nsinfo;
4697  int i_tableoid;
4698  int i_oid;
4699  int i_nspname;
4700  int i_rolname;
4701  int i_nspacl;
4702  int i_rnspacl;
4703  int i_initnspacl;
4704  int i_initrnspacl;
4705 
4706  query = createPQExpBuffer();
4707 
4708  /*
4709  * we fetch all namespaces including system ones, so that every object we
4710  * read in can be linked to a containing namespace.
4711  */
4712  if (fout->remoteVersion >= 90600)
4713  {
4714  PQExpBuffer acl_subquery = createPQExpBuffer();
4715  PQExpBuffer racl_subquery = createPQExpBuffer();
4716  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4717  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4718 
4719  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4720  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4721  dopt->binary_upgrade);
4722 
4723  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4724  "(%s nspowner) AS rolname, "
4725  "%s as nspacl, "
4726  "%s as rnspacl, "
4727  "%s as initnspacl, "
4728  "%s as initrnspacl "
4729  "FROM pg_namespace n "
4730  "LEFT JOIN pg_init_privs pip "
4731  "ON (n.oid = pip.objoid "
4732  "AND pip.classoid = 'pg_namespace'::regclass "
4733  "AND pip.objsubid = 0",
4735  acl_subquery->data,
4736  racl_subquery->data,
4737  init_acl_subquery->data,
4738  init_racl_subquery->data);
4739 
4740  appendPQExpBufferStr(query, ") ");
4741 
4742  destroyPQExpBuffer(acl_subquery);
4743  destroyPQExpBuffer(racl_subquery);
4744  destroyPQExpBuffer(init_acl_subquery);
4745  destroyPQExpBuffer(init_racl_subquery);
4746  }
4747  else
4748  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4749  "(%s nspowner) AS rolname, "
4750  "nspacl, NULL as rnspacl, "
4751  "NULL AS initnspacl, NULL as initrnspacl "
4752  "FROM pg_namespace",
4754 
4755  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4756 
4757  ntups = PQntuples(res);
4758 
4759  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4760 
4761  i_tableoid = PQfnumber(res, "tableoid");
4762  i_oid = PQfnumber(res, "oid");
4763  i_nspname = PQfnumber(res, "nspname");
4764  i_rolname = PQfnumber(res, "rolname");
4765  i_nspacl = PQfnumber(res, "nspacl");
4766  i_rnspacl = PQfnumber(res, "rnspacl");
4767  i_initnspacl = PQfnumber(res, "initnspacl");
4768  i_initrnspacl = PQfnumber(res, "initrnspacl");
4769 
4770  for (i = 0; i < ntups; i++)
4771  {
4772  nsinfo[i].dobj.objType = DO_NAMESPACE;
4773  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4774  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4775  AssignDumpId(&nsinfo[i].dobj);
4776  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4777  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4778  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4779  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4780  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4781  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4782 
4783  /* Decide whether to dump this namespace */
4784  selectDumpableNamespace(&nsinfo[i], fout);
4785 
4786  /*
4787  * Do not try to dump ACL if the ACL is empty or the default.
4788  *
4789  * This is useful because, for some schemas/objects, the only
4790  * component we are going to try and dump is the ACL and if we can
4791  * remove that then 'dump' goes to zero/false and we don't consider
4792  * this object for dumping at all later on.
4793  */
4794  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4795  PQgetisnull(res, i, i_initnspacl) &&
4796  PQgetisnull(res, i, i_initrnspacl))
4797  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4798 
4799  if (strlen(nsinfo[i].rolname) == 0)
4800  pg_log_warning("owner of schema \"%s\" appears to be invalid",
4801  nsinfo[i].dobj.name);
4802  }
4803 
4804  PQclear(res);
4805  destroyPQExpBuffer(query);
4806 
4807  *numNamespaces = ntups;
4808 
4809  return nsinfo;
4810 }
4811 
4812 /*
4813  * findNamespace:
4814  * given a namespace OID, look up the info read by getNamespaces
4815  */
4816 static NamespaceInfo *
4818 {
4819  NamespaceInfo *nsinfo;
4820 
4821  nsinfo = findNamespaceByOid(nsoid);
4822  if (nsinfo == NULL)
4823  fatal("schema with OID %u does not exist", nsoid);
4824  return nsinfo;
4825 }
4826 
4827 /*
4828  * getExtensions:
4829  * read all extensions in the system catalogs and return them in the
4830  * ExtensionInfo* structure
4831  *
4832  * numExtensions is set to the number of extensions read in
4833  */
4834 ExtensionInfo *
4836 {
4837  DumpOptions *dopt = fout->dopt;
4838  PGresult *res;
4839  int ntups;
4840  int i;
4841  PQExpBuffer query;
4842  ExtensionInfo *extinfo;
4843  int i_tableoid;
4844  int i_oid;
4845  int i_extname;
4846  int i_nspname;
4847  int i_extrelocatable;
4848  int i_extversion;
4849  int i_extconfig;
4850  int i_extcondition;
4851 
4852  /*
4853  * Before 9.1, there are no extensions.
4854  */
4855  if (fout->remoteVersion < 90100)
4856  {
4857  *numExtensions = 0;
4858  return NULL;
4859  }
4860 
4861  query = createPQExpBuffer();
4862 
4863  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4864  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4865  "FROM pg_extension x "
4866  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4867 
4868  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4869 
4870  ntups = PQntuples(res);
4871 
4872  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4873 
4874  i_tableoid = PQfnumber(res, "tableoid");
4875  i_oid = PQfnumber(res, "oid");
4876  i_extname = PQfnumber(res, "extname");
4877  i_nspname = PQfnumber(res, "nspname");
4878  i_extrelocatable = PQfnumber(res, "extrelocatable");
4879  i_extversion = PQfnumber(res, "extversion");
4880  i_extconfig = PQfnumber(res, "extconfig");
4881  i_extcondition = PQfnumber(res, "extcondition");
4882 
4883  for (i = 0; i < ntups; i++)
4884  {
4885  extinfo[i].dobj.objType = DO_EXTENSION;
4886  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4887  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4888  AssignDumpId(&extinfo[i].dobj);
4889  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4890  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4891  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4892  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4893  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4894  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4895 
4896  /* Decide whether we want to dump it */
4897  selectDumpableExtension(&(extinfo[i]), dopt);
4898  }
4899 
4900  PQclear(res);
4901  destroyPQExpBuffer(query);
4902 
4903  *numExtensions = ntups;
4904 
4905  return extinfo;
4906 }
4907 
4908 /*
4909  * getTypes:
4910  * read all types in the system catalogs and return them in the
4911  * TypeInfo* structure
4912  *
4913  * numTypes is set to the number of types read in
4914  *
4915  * NB: this must run after getFuncs() because we assume we can do
4916  * findFuncByOid().
4917  */
4918 TypeInfo *
4920 {
4921  DumpOptions *dopt = fout->dopt;
4922  PGresult *res;
4923  int ntups;
4924  int i;
4925  PQExpBuffer query = createPQExpBuffer();
4926  TypeInfo *tyinfo;
4927  ShellTypeInfo *stinfo;
4928  int i_tableoid;
4929  int i_oid;
4930  int i_typname;
4931  int i_typnamespace;
4932  int i_typacl;
4933  int i_rtypacl;
4934  int i_inittypacl;
4935  int i_initrtypacl;
4936  int i_rolname;
4937  int i_typelem;
4938  int i_typrelid;
4939  int i_typrelkind;
4940  int i_typtype;
4941  int i_typisdefined;
4942  int i_isarray;
4943 
4944  /*
4945  * we include even the built-in types because those may be used as array
4946  * elements by user-defined types
4947  *
4948  * we filter out the built-in types when we dump out the types
4949  *
4950  * same approach for undefined (shell) types and array types
4951  *
4952  * Note: as of 8.3 we can reliably detect whether a type is an
4953  * auto-generated array type by checking the element type's typarray.
4954  * (Before that the test is capable of generating false positives.) We
4955  * still check for name beginning with '_', though, so as to avoid the
4956  * cost of the subselect probe for all standard types. This would have to
4957  * be revisited if the backend ever allows renaming of array types.
4958  */
4959 
4960  if (fout->remoteVersion >= 90600)
4961  {
4962  PQExpBuffer acl_subquery = createPQExpBuffer();
4963  PQExpBuffer racl_subquery = createPQExpBuffer();
4964  PQExpBuffer initacl_subquery = createPQExpBuffer();
4965  PQExpBuffer initracl_subquery = createPQExpBuffer();
4966 
4967  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
4968  initracl_subquery, "t.typacl", "t.typowner", "'T'",
4969  dopt->binary_upgrade);
4970 
4971  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
4972  "t.typnamespace, "
4973  "%s AS typacl, "
4974  "%s AS rtypacl, "
4975  "%s AS inittypacl, "
4976  "%s AS initrtypacl, "
4977  "(%s t.typowner) AS rolname, "
4978  "t.typelem, t.typrelid, "
4979  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
4980  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
4981  "t.typtype, t.typisdefined, "
4982  "t.typname[0] = '_' AND t.typelem != 0 AND "
4983  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
4984  "FROM pg_type t "
4985  "LEFT JOIN pg_init_privs pip ON "
4986  "(t.oid = pip.objoid "
4987  "AND pip.classoid = 'pg_type'::regclass "
4988  "AND pip.objsubid = 0) ",
4989  acl_subquery->data,
4990  racl_subquery->data,
4991  initacl_subquery->data,
4992  initracl_subquery->data,
4994 
4995  destroyPQExpBuffer(acl_subquery);
4996  destroyPQExpBuffer(racl_subquery);
4997  destroyPQExpBuffer(initacl_subquery);
4998  destroyPQExpBuffer(initracl_subquery);
4999  }
5000  else if (fout->remoteVersion >= 90200)
5001  {
5002  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
5003  "typnamespace, typacl, NULL as rtypacl, "
5004  "NULL AS inittypacl, NULL AS initrtypacl, "
5005  "(%s typowner) AS rolname, "
5006  "typelem, typrelid, "
5007  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
5008  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
5009  "typtype, typisdefined, "
5010  "typname[0] = '_' AND typelem != 0 AND "
5011  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
5012  "FROM pg_type",
5014  }
5015  else if (fout->remoteVersion >= 80300)
5016  {
5017  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
5018  "typnamespace, NULL AS typacl, NULL as rtypacl, "
5019  "NULL AS inittypacl, NULL AS initrtypacl, "
5020  "(%s typowner) AS rolname, "
5021  "typelem, typrelid, "
5022  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
5023  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
5024  "typtype, typisdefined, "
5025  "typname[0] = '_' AND typelem != 0 AND "
5026  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
5027  "FROM pg_type",
5029  }
5030  else
5031  {
5032  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
5033  "typnamespace, NULL AS typacl, NULL as rtypacl, "
5034  "NULL AS inittypacl, NULL AS initrtypacl, "
5035  "(%s typowner) AS rolname, "
5036  "typelem, typrelid, "
5037  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
5038  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
5039  "typtype, typisdefined, "
5040  "typname[0] = '_' AND typelem != 0 AS isarray "
5041  "FROM pg_type",
5043  }
5044 
5045  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5046 
5047  ntups = PQntuples(res);
5048 
5049  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
5050 
5051  i_tableoid = PQfnumber(res, "tableoid");
5052  i_oid = PQfnumber(res, "oid");
5053  i_typname = PQfnumber(res, "typname");
5054  i_typnamespace = PQfnumber(res, "typnamespace");
5055  i_typacl = PQfnumber(res, "typacl");
5056  i_rtypacl = PQfnumber(res, "rtypacl");
5057  i_inittypacl = PQfnumber(res, "inittypacl");
5058  i_initrtypacl = PQfnumber(res, "initrtypacl");
5059  i_rolname = PQfnumber(res, "rolname");
5060  i_typelem = PQfnumber(res, "typelem");
5061  i_typrelid = PQfnumber(res, "typrelid");
5062  i_typrelkind = PQfnumber(res, "typrelkind");
5063  i_typtype = PQfnumber(res, "typtype");
5064  i_typisdefined = PQfnumber(res, "typisdefined");
5065  i_isarray = PQfnumber(res, "isarray");
5066 
5067  for (i = 0; i < ntups; i++)
5068  {
5069  tyinfo[i].dobj.objType = DO_TYPE;
5070  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5071  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5072  AssignDumpId(&tyinfo[i].dobj);
5073  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
5074  tyinfo[i].dobj.namespace =
5075  findNamespace(atooid(PQgetvalue(res, i, i_typnamespace)));
5076  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5077  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
5078  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
5079  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
5080  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
5081  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
5082  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
5083  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
5084  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
5085  tyinfo[i].shellType = NULL;
5086 
5087  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
5088  tyinfo[i].isDefined = true;
5089  else
5090  tyinfo[i].isDefined = false;
5091 
5092  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
5093  tyinfo[i].isArray = true;
5094  else
5095  tyinfo[i].isArray = false;
5096 
5097  /* Decide whether we want to dump it */
5098  selectDumpableType(&tyinfo[i], fout);
5099 
5100  /* Do not try to dump ACL if no ACL exists. */
5101  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
5102  PQgetisnull(res, i, i_inittypacl) &&
5103  PQgetisnull(res, i, i_initrtypacl))
5104  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5105 
5106  /*
5107  * If it's a domain, fetch info about its constraints, if any
5108  */
5109  tyinfo[i].nDomChecks = 0;
5110  tyinfo[i].domChecks = NULL;
5111  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
5112  tyinfo[i].typtype == TYPTYPE_DOMAIN)
5113  getDomainConstraints(fout, &(tyinfo[i]));
5114 
5115  /*
5116  * If it's a base type, make a DumpableObject representing a shell
5117  * definition of the type. We will need to dump that ahead of the I/O
5118  * functions for the type. Similarly, range types need a shell
5119  * definition in case they have a canonicalize function.
5120  *
5121  * Note: the shell type doesn't have a catId. You might think it
5122  * should copy the base type's catId, but then it might capture the
5123  * pg_depend entries for the type, which we don't want.
5124  */
5125  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
5126  (tyinfo[i].typtype == TYPTYPE_BASE ||
5127  tyinfo[i].typtype == TYPTYPE_RANGE))
5128  {
5129  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
5130  stinfo->dobj.objType = DO_SHELL_TYPE;
5131  stinfo->dobj.catId = nilCatalogId;
5132  AssignDumpId(&stinfo->dobj);
5133  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
5134  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
5135  stinfo->baseType = &(tyinfo[i]);
5136  tyinfo[i].shellType = stinfo;
5137 
5138  /*
5139  * Initially mark the shell type as not to be dumped. We'll only
5140  * dump it if the I/O or canonicalize functions need to be dumped;
5141  * this is taken care of while sorting dependencies.
5142  */
5143  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
5144  }
5145 
5146  if (strlen(tyinfo[i].rolname) == 0)
5147  pg_log_warning("owner of data type \"%s\" appears to be invalid",
5148  tyinfo[i].dobj.name);
5149  }
5150 
5151  *numTypes = ntups;
5152 
5153  PQclear(res);
5154 
5155  destroyPQExpBuffer(query);
5156 
5157  return tyinfo;
5158 }
5159 
5160 /*
5161  * getOperators:
5162  * read all operators in the system catalogs and return them in the
5163  * OprInfo* structure
5164  *
5165  * numOprs is set to the number of operators read in
5166  */
5167 OprInfo *
5168 getOperators(Archive *fout, int *numOprs)
5169 {
5170  PGresult *res;
5171  int ntups;
5172  int i;
5173  PQExpBuffer query = createPQExpBuffer();
5174  OprInfo *oprinfo;
5175  int i_tableoid;
5176  int i_oid;
5177  int i_oprname;
5178  int i_oprnamespace;
5179  int i_rolname;
5180  int i_oprkind;
5181  int i_oprcode;
5182 
5183  /*
5184  * find all operators, including builtin operators; we filter out
5185  * system-defined operators at dump-out time.
5186  */
5187 
5188  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
5189  "oprnamespace, "
5190  "(%s oprowner) AS rolname, "
5191  "oprkind, "
5192  "oprcode::oid AS oprcode "
5193  "FROM pg_operator",
5195 
5196  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5197 
5198  ntups = PQntuples(res);
5199  *numOprs = ntups;
5200 
5201  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
5202 
5203  i_tableoid = PQfnumber(res, "tableoid");
5204  i_oid = PQfnumber(res, "oid");
5205  i_oprname = PQfnumber(res, "oprname");
5206  i_oprnamespace = PQfnumber(res, "oprnamespace");
5207  i_rolname = PQfnumber(res, "rolname");
5208  i_oprkind = PQfnumber(res, "oprkind");
5209  i_oprcode = PQfnumber(res, "oprcode");
5210 
5211  for (i = 0; i < ntups; i++)
5212  {
5213  oprinfo[i].dobj.objType = DO_OPERATOR;
5214  oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5215  oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5216  AssignDumpId(&oprinfo[i].dobj);
5217  oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
5218  oprinfo[i].dobj.namespace =
5219  findNamespace(atooid(PQgetvalue(res, i, i_oprnamespace)));
5220  oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5221  oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
5222  oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
5223 
5224  /* Decide whether we want to dump it */
5225  selectDumpableObject(&(oprinfo[i].dobj), fout);
5226 
5227  /* Operators do not currently have ACLs. */
5228  oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5229 
5230  if (strlen(oprinfo[i].rolname) == 0)
5231  pg_log_warning("owner of operator \"%s\" appears to be invalid",
5232  oprinfo[i].dobj.name);
5233  }
5234 
5235  PQclear(res);
5236 
5237  destroyPQExpBuffer(query);
5238 
5239  return oprinfo;
5240 }
5241 
5242 /*
5243  * getCollations:
5244  * read all collations in the system catalogs and return them in the
5245  * CollInfo* structure
5246  *
5247  * numCollations is set to the number of collations read in
5248  */
5249 CollInfo *
5251 {
5252  PGresult *res;
5253  int ntups;
5254  int i;
5255  PQExpBuffer query;
5256  CollInfo *collinfo;
5257  int i_tableoid;
5258  int i_oid;
5259  int i_collname;
5260  int i_collnamespace;
5261  int i_rolname;
5262 
5263  /* Collations didn't exist pre-9.1 */
5264  if (fout->remoteVersion < 90100)
5265  {
5266  *numCollations = 0;
5267  return NULL;
5268  }
5269 
5270  query = createPQExpBuffer();
5271 
5272  /*
5273  * find all collations, including builtin collations; we filter out
5274  * system-defined collations at dump-out time.
5275  */
5276 
5277  appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
5278  "collnamespace, "
5279  "(%s collowner) AS rolname "
5280  "FROM pg_collation",
5282 
5283  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5284 
5285  ntups = PQntuples(res);
5286  *numCollations = ntups;
5287 
5288  collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
5289 
5290  i_tableoid = PQfnumber(res, "tableoid");
5291  i_oid = PQfnumber(res, "oid");
5292  i_collname = PQfnumber(res, "collname");
5293  i_collnamespace = PQfnumber(res, "collnamespace");
5294  i_rolname =