PostgreSQL Source Code  git master
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #include <limits.h>
37 #ifdef HAVE_TERMIOS_H
38 #include <termios.h>
39 #endif
40 
41 #include "access/attnum.h"
42 #include "access/sysattr.h"
43 #include "access/transam.h"
44 #include "catalog/pg_aggregate_d.h"
45 #include "catalog/pg_am_d.h"
46 #include "catalog/pg_attribute_d.h"
47 #include "catalog/pg_cast_d.h"
48 #include "catalog/pg_class_d.h"
49 #include "catalog/pg_default_acl_d.h"
50 #include "catalog/pg_largeobject_d.h"
51 #include "catalog/pg_largeobject_metadata_d.h"
52 #include "catalog/pg_proc_d.h"
53 #include "catalog/pg_trigger_d.h"
54 #include "catalog/pg_type_d.h"
55 #include "common/connect.h"
56 #include "dumputils.h"
57 #include "fe_utils/string_utils.h"
58 #include "getopt_long.h"
59 #include "libpq/libpq-fs.h"
60 #include "parallel.h"
61 #include "pg_backup_db.h"
62 #include "pg_backup_utils.h"
63 #include "pg_dump.h"
64 #include "storage/block.h"
65 
66 typedef struct
67 {
68  const char *descr; /* comment for an object */
69  Oid classoid; /* object class (catalog OID) */
70  Oid objoid; /* object OID */
71  int objsubid; /* subobject (table column #) */
72 } CommentItem;
73 
74 typedef struct
75 {
76  const char *provider; /* label provider of this security label */
77  const char *label; /* security label for an object */
78  Oid classoid; /* object class (catalog OID) */
79  Oid objoid; /* object OID */
80  int objsubid; /* subobject (table column #) */
81 } SecLabelItem;
82 
83 typedef enum OidOptions
84 {
88 } OidOptions;
89 
90 /* global decls */
91 static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
92 
93 /* subquery used to convert user ID (eg, datdba) to user name */
94 static const char *username_subquery;
95 
96 /*
97  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
98  * FirstNormalObjectId - 1.
99  */
100 static Oid g_last_builtin_oid; /* value of the last builtin oid */
101 
102 /* The specified names/patterns should to match at least one entity */
103 static int strict_names = 0;
104 
105 /*
106  * Object inclusion/exclusion lists
107  *
108  * The string lists record the patterns given by command-line switches,
109  * which we then convert to lists of OIDs of matching objects.
110  */
112 static SimpleOidList schema_include_oids = {NULL, NULL};
114 static SimpleOidList schema_exclude_oids = {NULL, NULL};
115 
117 static SimpleOidList table_include_oids = {NULL, NULL};
119 static SimpleOidList table_exclude_oids = {NULL, NULL};
121 static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
124 
126 static SimpleOidList extension_include_oids = {NULL, NULL};
127 
128 static const CatalogId nilCatalogId = {0, 0};
129 
130 /* override for standard extra_float_digits setting */
131 static bool have_extra_float_digits = false;
133 
134 /*
135  * The default number of rows per INSERT when
136  * --inserts is specified without --rows-per-insert
137  */
138 #define DUMP_DEFAULT_ROWS_PER_INSERT 1
139 
140 /*
141  * Macro for producing quoted, schema-qualified name of a dumpable object.
142  */
143 #define fmtQualifiedDumpable(obj) \
144  fmtQualifiedId((obj)->dobj.namespace->dobj.name, \
145  (obj)->dobj.name)
146 
147 static void help(const char *progname);
148 static void setup_connection(Archive *AH,
149  const char *dumpencoding, const char *dumpsnapshot,
150  char *use_role);
152 static void expand_schema_name_patterns(Archive *fout,
153  SimpleStringList *patterns,
154  SimpleOidList *oids,
155  bool strict_names);
156 static void expand_extension_name_patterns(Archive *fout,
157  SimpleStringList *patterns,
158  SimpleOidList *oids,
159  bool strict_names);
161  SimpleStringList *patterns,
162  SimpleOidList *oids);
163 static void expand_table_name_patterns(Archive *fout,
164  SimpleStringList *patterns,
165  SimpleOidList *oids,
166  bool strict_names);
167 static NamespaceInfo *findNamespace(Oid nsoid);
168 static void dumpTableData(Archive *fout, const TableDataInfo *tdinfo);
169 static void refreshMatViewData(Archive *fout, const TableDataInfo *tdinfo);
170 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
171 static void dumpComment(Archive *fout, const char *type, const char *name,
172  const char *namespace, const char *owner,
173  CatalogId catalogId, int subid, DumpId dumpId);
174 static int findComments(Archive *fout, Oid classoid, Oid objoid,
175  CommentItem **items);
176 static int collectComments(Archive *fout, CommentItem **items);
177 static void dumpSecLabel(Archive *fout, const char *type, const char *name,
178  const char *namespace, const char *owner,
179  CatalogId catalogId, int subid, DumpId dumpId);
180 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
181  SecLabelItem **items);
182 static int collectSecLabels(Archive *fout, SecLabelItem **items);
183 static void dumpDumpableObject(Archive *fout, const DumpableObject *dobj);
184 static void dumpNamespace(Archive *fout, const NamespaceInfo *nspinfo);
185 static void dumpExtension(Archive *fout, const ExtensionInfo *extinfo);
186 static void dumpType(Archive *fout, const TypeInfo *tyinfo);
187 static void dumpBaseType(Archive *fout, const TypeInfo *tyinfo);
188 static void dumpEnumType(Archive *fout, const TypeInfo *tyinfo);
189 static void dumpRangeType(Archive *fout, const TypeInfo *tyinfo);
190 static void dumpUndefinedType(Archive *fout, const TypeInfo *tyinfo);
191 static void dumpDomain(Archive *fout, const TypeInfo *tyinfo);
192 static void dumpCompositeType(Archive *fout, const TypeInfo *tyinfo);
193 static void dumpCompositeTypeColComments(Archive *fout, const TypeInfo *tyinfo);
194 static void dumpShellType(Archive *fout, const ShellTypeInfo *stinfo);
195 static void dumpProcLang(Archive *fout, const ProcLangInfo *plang);
196 static void dumpFunc(Archive *fout, const FuncInfo *finfo);
197 static void dumpCast(Archive *fout, const CastInfo *cast);
198 static void dumpTransform(Archive *fout, const TransformInfo *transform);
199 static void dumpOpr(Archive *fout, const OprInfo *oprinfo);
200 static void dumpAccessMethod(Archive *fout, const AccessMethodInfo *oprinfo);
201 static void dumpOpclass(Archive *fout, const OpclassInfo *opcinfo);
202 static void dumpOpfamily(Archive *fout, const OpfamilyInfo *opfinfo);
203 static void dumpCollation(Archive *fout, const CollInfo *collinfo);
204 static void dumpConversion(Archive *fout, const ConvInfo *convinfo);
205 static void dumpRule(Archive *fout, const RuleInfo *rinfo);
206 static void dumpAgg(Archive *fout, const AggInfo *agginfo);
207 static void dumpTrigger(Archive *fout, const TriggerInfo *tginfo);
208 static void dumpEventTrigger(Archive *fout, const EventTriggerInfo *evtinfo);
209 static void dumpTable(Archive *fout, const TableInfo *tbinfo);
210 static void dumpTableSchema(Archive *fout, const TableInfo *tbinfo);
211 static void dumpTableAttach(Archive *fout, const TableAttachInfo *tbinfo);
212 static void dumpAttrDef(Archive *fout, const AttrDefInfo *adinfo);
213 static void dumpSequence(Archive *fout, const TableInfo *tbinfo);
214 static void dumpSequenceData(Archive *fout, const TableDataInfo *tdinfo);
215 static void dumpIndex(Archive *fout, const IndxInfo *indxinfo);
216 static void dumpIndexAttach(Archive *fout, const IndexAttachInfo *attachinfo);
217 static void dumpStatisticsExt(Archive *fout, const StatsExtInfo *statsextinfo);
218 static void dumpConstraint(Archive *fout, const ConstraintInfo *coninfo);
219 static void dumpTableConstraintComment(Archive *fout, const ConstraintInfo *coninfo);
220 static void dumpTSParser(Archive *fout, const TSParserInfo *prsinfo);
221 static void dumpTSDictionary(Archive *fout, const TSDictInfo *dictinfo);
222 static void dumpTSTemplate(Archive *fout, const TSTemplateInfo *tmplinfo);
223 static void dumpTSConfig(Archive *fout, const TSConfigInfo *cfginfo);
224 static void dumpForeignDataWrapper(Archive *fout, const FdwInfo *fdwinfo);
225 static void dumpForeignServer(Archive *fout, const ForeignServerInfo *srvinfo);
226 static void dumpUserMappings(Archive *fout,
227  const char *servername, const char *namespace,
228  const char *owner, CatalogId catalogId, DumpId dumpId);
229 static void dumpDefaultACL(Archive *fout, const DefaultACLInfo *daclinfo);
230 
231 static DumpId dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
232  const char *type, const char *name, const char *subname,
233  const char *nspname, const char *owner,
234  const char *acls, const char *racls,
235  const char *initacls, const char *initracls);
236 
237 static void getDependencies(Archive *fout);
238 static void BuildArchiveDependencies(Archive *fout);
239 static void findDumpableDependencies(ArchiveHandle *AH, const DumpableObject *dobj,
240  DumpId **dependencies, int *nDeps, int *allocDeps);
241 
243 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
244  DumpableObject *boundaryObjs);
245 
246 static void addConstrChildIdxDeps(DumpableObject *dobj, const IndxInfo *refidx);
247 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
248 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
249 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo);
250 static void buildMatViewRefreshDependencies(Archive *fout);
251 static void getTableDataFKConstraints(void);
252 static char *format_function_arguments(const FuncInfo *finfo, const char *funcargs,
253  bool is_agg);
254 static char *format_function_arguments_old(Archive *fout,
255  const FuncInfo *finfo, int nallargs,
256  char **allargtypes,
257  char **argmodes,
258  char **argnames);
259 static char *format_function_signature(Archive *fout,
260  const FuncInfo *finfo, bool honor_quotes);
261 static char *convertRegProcReference(const char *proc);
262 static char *getFormattedOperatorName(const char *oproid);
263 static char *convertTSFunction(Archive *fout, Oid funcOid);
264 static Oid findLastBuiltinOid_V71(Archive *fout);
265 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
266 static void getBlobs(Archive *fout);
267 static void dumpBlob(Archive *fout, const BlobInfo *binfo);
268 static int dumpBlobs(Archive *fout, const void *arg);
269 static void dumpPolicy(Archive *fout, const PolicyInfo *polinfo);
270 static void dumpPublication(Archive *fout, const PublicationInfo *pubinfo);
271 static void dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo);
272 static void dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo);
273 static void dumpDatabase(Archive *AH);
274 static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
275  const char *dbname, Oid dboid);
276 static void dumpEncoding(Archive *AH);
277 static void dumpStdStrings(Archive *AH);
278 static void dumpSearchPath(Archive *AH);
279 static void dumpToastCompression(Archive *AH);
281  PQExpBuffer upgrade_buffer,
282  Oid pg_type_oid,
283  bool force_array_type,
284  bool include_multirange_type);
286  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
287 static void binary_upgrade_set_pg_class_oids(Archive *fout,
288  PQExpBuffer upgrade_buffer,
289  Oid pg_class_oid, bool is_index);
290 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
291  const DumpableObject *dobj,
292  const char *objtype,
293  const char *objname,
294  const char *objnamespace);
295 static const char *getAttrName(int attrnum, const TableInfo *tblInfo);
296 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
297 static bool nonemptyReloptions(const char *reloptions);
298 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
299  const char *prefix, Archive *fout);
300 static char *get_synchronized_snapshot(Archive *fout);
301 static void setupDumpWorker(Archive *AHX);
302 static TableInfo *getRootTableInfo(const TableInfo *tbinfo);
303 
304 
305 int
306 main(int argc, char **argv)
307 {
308  int c;
309  const char *filename = NULL;
310  const char *format = "p";
311  TableInfo *tblinfo;
312  int numTables;
313  DumpableObject **dobjs;
314  int numObjs;
315  DumpableObject *boundaryObjs;
316  int i;
317  int optindex;
318  char *endptr;
319  RestoreOptions *ropt;
320  Archive *fout; /* the script file */
321  bool g_verbose = false;
322  const char *dumpencoding = NULL;
323  const char *dumpsnapshot = NULL;
324  char *use_role = NULL;
325  long rowsPerInsert;
326  int numWorkers = 1;
327  int compressLevel = -1;
328  int plainText = 0;
329  ArchiveFormat archiveFormat = archUnknown;
330  ArchiveMode archiveMode;
331 
332  static DumpOptions dopt;
333 
334  static struct option long_options[] = {
335  {"data-only", no_argument, NULL, 'a'},
336  {"blobs", no_argument, NULL, 'b'},
337  {"no-blobs", no_argument, NULL, 'B'},
338  {"clean", no_argument, NULL, 'c'},
339  {"create", no_argument, NULL, 'C'},
340  {"dbname", required_argument, NULL, 'd'},
341  {"extension", required_argument, NULL, 'e'},
342  {"file", required_argument, NULL, 'f'},
343  {"format", required_argument, NULL, 'F'},
344  {"host", required_argument, NULL, 'h'},
345  {"jobs", 1, NULL, 'j'},
346  {"no-reconnect", no_argument, NULL, 'R'},
347  {"no-owner", no_argument, NULL, 'O'},
348  {"port", required_argument, NULL, 'p'},
349  {"schema", required_argument, NULL, 'n'},
350  {"exclude-schema", required_argument, NULL, 'N'},
351  {"schema-only", no_argument, NULL, 's'},
352  {"superuser", required_argument, NULL, 'S'},
353  {"table", required_argument, NULL, 't'},
354  {"exclude-table", required_argument, NULL, 'T'},
355  {"no-password", no_argument, NULL, 'w'},
356  {"password", no_argument, NULL, 'W'},
357  {"username", required_argument, NULL, 'U'},
358  {"verbose", no_argument, NULL, 'v'},
359  {"no-privileges", no_argument, NULL, 'x'},
360  {"no-acl", no_argument, NULL, 'x'},
361  {"compress", required_argument, NULL, 'Z'},
362  {"encoding", required_argument, NULL, 'E'},
363  {"help", no_argument, NULL, '?'},
364  {"version", no_argument, NULL, 'V'},
365 
366  /*
367  * the following options don't have an equivalent short option letter
368  */
369  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
370  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
371  {"column-inserts", no_argument, &dopt.column_inserts, 1},
372  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
373  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
374  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
375  {"exclude-table-data", required_argument, NULL, 4},
376  {"extra-float-digits", required_argument, NULL, 8},
377  {"if-exists", no_argument, &dopt.if_exists, 1},
378  {"inserts", no_argument, NULL, 9},
379  {"lock-wait-timeout", required_argument, NULL, 2},
380  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
381  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
382  {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
383  {"role", required_argument, NULL, 3},
384  {"section", required_argument, NULL, 5},
385  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
386  {"snapshot", required_argument, NULL, 6},
387  {"strict-names", no_argument, &strict_names, 1},
388  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
389  {"no-comments", no_argument, &dopt.no_comments, 1},
390  {"no-publications", no_argument, &dopt.no_publications, 1},
391  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
392  {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
393  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
394  {"no-toast-compression", no_argument, &dopt.no_toast_compression, 1},
395  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
396  {"no-sync", no_argument, NULL, 7},
397  {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
398  {"rows-per-insert", required_argument, NULL, 10},
399  {"include-foreign-data", required_argument, NULL, 11},
400 
401  {NULL, 0, NULL, 0}
402  };
403 
404  pg_logging_init(argv[0]);
406  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
407 
408  /*
409  * Initialize what we need for parallel execution, especially for thread
410  * support on Windows.
411  */
413 
414  progname = get_progname(argv[0]);
415 
416  if (argc > 1)
417  {
418  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
419  {
420  help(progname);
421  exit_nicely(0);
422  }
423  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
424  {
425  puts("pg_dump (PostgreSQL) " PG_VERSION);
426  exit_nicely(0);
427  }
428  }
429 
430  InitDumpOptions(&dopt);
431 
432  while ((c = getopt_long(argc, argv, "abBcCd:e:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxZ:",
433  long_options, &optindex)) != -1)
434  {
435  switch (c)
436  {
437  case 'a': /* Dump data only */
438  dopt.dataOnly = true;
439  break;
440 
441  case 'b': /* Dump blobs */
442  dopt.outputBlobs = true;
443  break;
444 
445  case 'B': /* Don't dump blobs */
446  dopt.dontOutputBlobs = true;
447  break;
448 
449  case 'c': /* clean (i.e., drop) schema prior to create */
450  dopt.outputClean = 1;
451  break;
452 
453  case 'C': /* Create DB */
454  dopt.outputCreateDB = 1;
455  break;
456 
457  case 'd': /* database name */
458  dopt.cparams.dbname = pg_strdup(optarg);
459  break;
460 
461  case 'e': /* include extension(s) */
462  simple_string_list_append(&extension_include_patterns, optarg);
463  dopt.include_everything = false;
464  break;
465 
466  case 'E': /* Dump encoding */
467  dumpencoding = pg_strdup(optarg);
468  break;
469 
470  case 'f':
471  filename = pg_strdup(optarg);
472  break;
473 
474  case 'F':
475  format = pg_strdup(optarg);
476  break;
477 
478  case 'h': /* server host */
479  dopt.cparams.pghost = pg_strdup(optarg);
480  break;
481 
482  case 'j': /* number of dump jobs */
483  numWorkers = atoi(optarg);
484  break;
485 
486  case 'n': /* include schema(s) */
487  simple_string_list_append(&schema_include_patterns, optarg);
488  dopt.include_everything = false;
489  break;
490 
491  case 'N': /* exclude schema(s) */
492  simple_string_list_append(&schema_exclude_patterns, optarg);
493  break;
494 
495  case 'O': /* Don't reconnect to match owner */
496  dopt.outputNoOwner = 1;
497  break;
498 
499  case 'p': /* server port */
500  dopt.cparams.pgport = pg_strdup(optarg);
501  break;
502 
503  case 'R':
504  /* no-op, still accepted for backwards compatibility */
505  break;
506 
507  case 's': /* dump schema only */
508  dopt.schemaOnly = true;
509  break;
510 
511  case 'S': /* Username for superuser in plain text output */
513  break;
514 
515  case 't': /* include table(s) */
516  simple_string_list_append(&table_include_patterns, optarg);
517  dopt.include_everything = false;
518  break;
519 
520  case 'T': /* exclude table(s) */
521  simple_string_list_append(&table_exclude_patterns, optarg);
522  break;
523 
524  case 'U':
526  break;
527 
528  case 'v': /* verbose */
529  g_verbose = true;
531  break;
532 
533  case 'w':
535  break;
536 
537  case 'W':
539  break;
540 
541  case 'x': /* skip ACL dump */
542  dopt.aclsSkip = true;
543  break;
544 
545  case 'Z': /* Compression Level */
546  compressLevel = atoi(optarg);
547  if (compressLevel < 0 || compressLevel > 9)
548  {
549  pg_log_error("compression level must be in range 0..9");
550  exit_nicely(1);
551  }
552  break;
553 
554  case 0:
555  /* This covers the long options. */
556  break;
557 
558  case 2: /* lock-wait-timeout */
560  break;
561 
562  case 3: /* SET ROLE */
563  use_role = pg_strdup(optarg);
564  break;
565 
566  case 4: /* exclude table(s) data */
567  simple_string_list_append(&tabledata_exclude_patterns, optarg);
568  break;
569 
570  case 5: /* section */
572  break;
573 
574  case 6: /* snapshot */
575  dumpsnapshot = pg_strdup(optarg);
576  break;
577 
578  case 7: /* no-sync */
579  dosync = false;
580  break;
581 
582  case 8:
584  extra_float_digits = atoi(optarg);
585  if (extra_float_digits < -15 || extra_float_digits > 3)
586  {
587  pg_log_error("extra_float_digits must be in range -15..3");
588  exit_nicely(1);
589  }
590  break;
591 
592  case 9: /* inserts */
593 
594  /*
595  * dump_inserts also stores --rows-per-insert, careful not to
596  * overwrite that.
597  */
598  if (dopt.dump_inserts == 0)
600  break;
601 
602  case 10: /* rows per insert */
603  errno = 0;
604  rowsPerInsert = strtol(optarg, &endptr, 10);
605 
606  if (endptr == optarg || *endptr != '\0' ||
607  rowsPerInsert <= 0 || rowsPerInsert > INT_MAX ||
608  errno == ERANGE)
609  {
610  pg_log_error("rows-per-insert must be in range %d..%d",
611  1, INT_MAX);
612  exit_nicely(1);
613  }
614  dopt.dump_inserts = (int) rowsPerInsert;
615  break;
616 
617  case 11: /* include foreign data */
618  simple_string_list_append(&foreign_servers_include_patterns,
619  optarg);
620  break;
621 
622  default:
623  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
624  exit_nicely(1);
625  }
626  }
627 
628  /*
629  * Non-option argument specifies database name as long as it wasn't
630  * already specified with -d / --dbname
631  */
632  if (optind < argc && dopt.cparams.dbname == NULL)
633  dopt.cparams.dbname = argv[optind++];
634 
635  /* Complain if any arguments remain */
636  if (optind < argc)
637  {
638  pg_log_error("too many command-line arguments (first is \"%s\")",
639  argv[optind]);
640  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
641  progname);
642  exit_nicely(1);
643  }
644 
645  /* --column-inserts implies --inserts */
646  if (dopt.column_inserts && dopt.dump_inserts == 0)
648 
649  /*
650  * Binary upgrade mode implies dumping sequence data even in schema-only
651  * mode. This is not exposed as a separate option, but kept separate
652  * internally for clarity.
653  */
654  if (dopt.binary_upgrade)
655  dopt.sequence_data = 1;
656 
657  if (dopt.dataOnly && dopt.schemaOnly)
658  {
659  pg_log_error("options -s/--schema-only and -a/--data-only cannot be used together");
660  exit_nicely(1);
661  }
662 
663  if (dopt.schemaOnly && foreign_servers_include_patterns.head != NULL)
664  fatal("options -s/--schema-only and --include-foreign-data cannot be used together");
665 
666  if (numWorkers > 1 && foreign_servers_include_patterns.head != NULL)
667  fatal("option --include-foreign-data is not supported with parallel backup");
668 
669  if (dopt.dataOnly && dopt.outputClean)
670  {
671  pg_log_error("options -c/--clean and -a/--data-only cannot be used together");
672  exit_nicely(1);
673  }
674 
675  if (dopt.if_exists && !dopt.outputClean)
676  fatal("option --if-exists requires option -c/--clean");
677 
678  /*
679  * --inserts are already implied above if --column-inserts or
680  * --rows-per-insert were specified.
681  */
682  if (dopt.do_nothing && dopt.dump_inserts == 0)
683  fatal("option --on-conflict-do-nothing requires option --inserts, --rows-per-insert, or --column-inserts");
684 
685  /* Identify archive format to emit */
686  archiveFormat = parseArchiveFormat(format, &archiveMode);
687 
688  /* archiveFormat specific setup */
689  if (archiveFormat == archNull)
690  plainText = 1;
691 
692  /* Custom and directory formats are compressed by default, others not */
693  if (compressLevel == -1)
694  {
695 #ifdef HAVE_LIBZ
696  if (archiveFormat == archCustom || archiveFormat == archDirectory)
697  compressLevel = Z_DEFAULT_COMPRESSION;
698  else
699 #endif
700  compressLevel = 0;
701  }
702 
703 #ifndef HAVE_LIBZ
704  if (compressLevel != 0)
705  pg_log_warning("requested compression not available in this installation -- archive will be uncompressed");
706  compressLevel = 0;
707 #endif
708 
709  /*
710  * If emitting an archive format, we always want to emit a DATABASE item,
711  * in case --create is specified at pg_restore time.
712  */
713  if (!plainText)
714  dopt.outputCreateDB = 1;
715 
716  /*
717  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
718  * parallel jobs because that's the maximum limit for the
719  * WaitForMultipleObjects() call.
720  */
721  if (numWorkers <= 0
722 #ifdef WIN32
723  || numWorkers > MAXIMUM_WAIT_OBJECTS
724 #endif
725  )
726  fatal("invalid number of parallel jobs");
727 
728  /* Parallel backup only in the directory archive format so far */
729  if (archiveFormat != archDirectory && numWorkers > 1)
730  fatal("parallel backup only supported by the directory format");
731 
732  /* Open the output file */
733  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
734  archiveMode, setupDumpWorker);
735 
736  /* Make dump options accessible right away */
737  SetArchiveOptions(fout, &dopt, NULL);
738 
739  /* Register the cleanup hook */
740  on_exit_close_archive(fout);
741 
742  /* Let the archiver know how noisy to be */
743  fout->verbose = g_verbose;
744 
745 
746  /*
747  * We allow the server to be back to 8.0, and up to any minor release of
748  * our own major version. (See also version check in pg_dumpall.c.)
749  */
750  fout->minRemoteVersion = 80000;
751  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
752 
753  fout->numWorkers = numWorkers;
754 
755  /*
756  * Open the database using the Archiver, so it knows about it. Errors mean
757  * death.
758  */
759  ConnectDatabase(fout, &dopt.cparams, false);
760  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
761 
762  /*
763  * Disable security label support if server version < v9.1.x (prevents
764  * access to nonexistent pg_seclabel catalog)
765  */
766  if (fout->remoteVersion < 90100)
767  dopt.no_security_labels = 1;
768 
769  /*
770  * On hot standbys, never try to dump unlogged table data, since it will
771  * just throw an error.
772  */
773  if (fout->isStandby)
774  dopt.no_unlogged_table_data = true;
775 
776  /* Select the appropriate subquery to convert user IDs to names */
777  if (fout->remoteVersion >= 80100)
778  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
779  else
780  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
781 
782  /* check the version for the synchronized snapshots feature */
783  if (numWorkers > 1 && fout->remoteVersion < 90200
784  && !dopt.no_synchronized_snapshots)
785  fatal("Synchronized snapshots are not supported by this server version.\n"
786  "Run with --no-synchronized-snapshots instead if you do not need\n"
787  "synchronized snapshots.");
788 
789  /* check the version when a snapshot is explicitly specified by user */
790  if (dumpsnapshot && fout->remoteVersion < 90200)
791  fatal("Exported snapshots are not supported by this server version.");
792 
793  /*
794  * Find the last built-in OID, if needed (prior to 8.1)
795  *
796  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
797  */
798  if (fout->remoteVersion < 80100)
800  else
802 
803  pg_log_info("last built-in OID is %u", g_last_builtin_oid);
804 
805  /* Expand schema selection patterns into OID lists */
806  if (schema_include_patterns.head != NULL)
807  {
808  expand_schema_name_patterns(fout, &schema_include_patterns,
809  &schema_include_oids,
810  strict_names);
811  if (schema_include_oids.head == NULL)
812  fatal("no matching schemas were found");
813  }
814  expand_schema_name_patterns(fout, &schema_exclude_patterns,
815  &schema_exclude_oids,
816  false);
817  /* non-matching exclusion patterns aren't an error */
818 
819  /* Expand table selection patterns into OID lists */
820  if (table_include_patterns.head != NULL)
821  {
822  expand_table_name_patterns(fout, &table_include_patterns,
823  &table_include_oids,
824  strict_names);
825  if (table_include_oids.head == NULL)
826  fatal("no matching tables were found");
827  }
828  expand_table_name_patterns(fout, &table_exclude_patterns,
829  &table_exclude_oids,
830  false);
831 
832  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
833  &tabledata_exclude_oids,
834  false);
835 
836  expand_foreign_server_name_patterns(fout, &foreign_servers_include_patterns,
837  &foreign_servers_include_oids);
838 
839  /* non-matching exclusion patterns aren't an error */
840 
841  /* Expand extension selection patterns into OID lists */
842  if (extension_include_patterns.head != NULL)
843  {
844  expand_extension_name_patterns(fout, &extension_include_patterns,
845  &extension_include_oids,
846  strict_names);
847  if (extension_include_oids.head == NULL)
848  fatal("no matching extensions were found");
849  }
850 
851  /*
852  * Dumping blobs is the default for dumps where an inclusion switch is not
853  * used (an "include everything" dump). -B can be used to exclude blobs
854  * from those dumps. -b can be used to include blobs even when an
855  * inclusion switch is used.
856  *
857  * -s means "schema only" and blobs are data, not schema, so we never
858  * include blobs when -s is used.
859  */
860  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
861  dopt.outputBlobs = true;
862 
863  /*
864  * Now scan the database and create DumpableObject structs for all the
865  * objects we intend to dump.
866  */
867  tblinfo = getSchemaData(fout, &numTables);
868 
869  if (fout->remoteVersion < 80400)
870  guessConstraintInheritance(tblinfo, numTables);
871 
872  if (!dopt.schemaOnly)
873  {
874  getTableData(&dopt, tblinfo, numTables, 0);
876  if (dopt.dataOnly)
878  }
879 
880  if (dopt.schemaOnly && dopt.sequence_data)
881  getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
882 
883  /*
884  * In binary-upgrade mode, we do not have to worry about the actual blob
885  * data or the associated metadata that resides in the pg_largeobject and
886  * pg_largeobject_metadata tables, respectively.
887  *
888  * However, we do need to collect blob information as there may be
889  * comments or other information on blobs that we do need to dump out.
890  */
891  if (dopt.outputBlobs || dopt.binary_upgrade)
892  getBlobs(fout);
893 
894  /*
895  * Collect dependency data to assist in ordering the objects.
896  */
897  getDependencies(fout);
898 
899  /* Lastly, create dummy objects to represent the section boundaries */
900  boundaryObjs = createBoundaryObjects();
901 
902  /* Get pointers to all the known DumpableObjects */
903  getDumpableObjects(&dobjs, &numObjs);
904 
905  /*
906  * Add dummy dependencies to enforce the dump section ordering.
907  */
908  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
909 
910  /*
911  * Sort the objects into a safe dump order (no forward references).
912  *
913  * We rely on dependency information to help us determine a safe order, so
914  * the initial sort is mostly for cosmetic purposes: we sort by name to
915  * ensure that logically identical schemas will dump identically.
916  */
917  sortDumpableObjectsByTypeName(dobjs, numObjs);
918 
919  sortDumpableObjects(dobjs, numObjs,
920  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
921 
922  /*
923  * Create archive TOC entries for all the objects to be dumped, in a safe
924  * order.
925  */
926 
927  /*
928  * First the special entries for ENCODING, STDSTRINGS, SEARCHPATH and
929  * TOASTCOMPRESSION.
930  */
931  dumpEncoding(fout);
932  dumpStdStrings(fout);
933  dumpSearchPath(fout);
934  dumpToastCompression(fout);
935 
936  /* The database items are always next, unless we don't want them at all */
937  if (dopt.outputCreateDB)
938  dumpDatabase(fout);
939 
940  /* Now the rearrangeable objects. */
941  for (i = 0; i < numObjs; i++)
942  dumpDumpableObject(fout, dobjs[i]);
943 
944  /*
945  * Set up options info to ensure we dump what we want.
946  */
947  ropt = NewRestoreOptions();
948  ropt->filename = filename;
949 
950  /* if you change this list, see dumpOptionsFromRestoreOptions */
951  ropt->cparams.dbname = dopt.cparams.dbname ? pg_strdup(dopt.cparams.dbname) : NULL;
952  ropt->cparams.pgport = dopt.cparams.pgport ? pg_strdup(dopt.cparams.pgport) : NULL;
953  ropt->cparams.pghost = dopt.cparams.pghost ? pg_strdup(dopt.cparams.pghost) : NULL;
954  ropt->cparams.username = dopt.cparams.username ? pg_strdup(dopt.cparams.username) : NULL;
956  ropt->dropSchema = dopt.outputClean;
957  ropt->dataOnly = dopt.dataOnly;
958  ropt->schemaOnly = dopt.schemaOnly;
959  ropt->if_exists = dopt.if_exists;
960  ropt->column_inserts = dopt.column_inserts;
961  ropt->dumpSections = dopt.dumpSections;
962  ropt->aclsSkip = dopt.aclsSkip;
963  ropt->superuser = dopt.outputSuperuser;
964  ropt->createDB = dopt.outputCreateDB;
965  ropt->noOwner = dopt.outputNoOwner;
966  ropt->noTablespace = dopt.outputNoTablespaces;
967  ropt->disable_triggers = dopt.disable_triggers;
968  ropt->use_setsessauth = dopt.use_setsessauth;
970  ropt->dump_inserts = dopt.dump_inserts;
971  ropt->no_comments = dopt.no_comments;
972  ropt->no_publications = dopt.no_publications;
974  ropt->no_subscriptions = dopt.no_subscriptions;
975  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
978  ropt->sequence_data = dopt.sequence_data;
979  ropt->binary_upgrade = dopt.binary_upgrade;
980 
981  if (compressLevel == -1)
982  ropt->compression = 0;
983  else
984  ropt->compression = compressLevel;
985 
986  ropt->suppressDumpWarnings = true; /* We've already shown them */
987 
988  SetArchiveOptions(fout, &dopt, ropt);
989 
990  /* Mark which entries should be output */
992 
993  /*
994  * The archive's TOC entries are now marked as to which ones will actually
995  * be output, so we can set up their dependency lists properly. This isn't
996  * necessary for plain-text output, though.
997  */
998  if (!plainText)
1000 
1001  /*
1002  * And finally we can do the actual output.
1003  *
1004  * Note: for non-plain-text output formats, the output file is written
1005  * inside CloseArchive(). This is, um, bizarre; but not worth changing
1006  * right now.
1007  */
1008  if (plainText)
1009  RestoreArchive(fout);
1010 
1011  CloseArchive(fout);
1012 
1013  exit_nicely(0);
1014 }
1015 
1016 
1017 static void
1018 help(const char *progname)
1019 {
1020  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
1021  printf(_("Usage:\n"));
1022  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
1023 
1024  printf(_("\nGeneral options:\n"));
1025  printf(_(" -f, --file=FILENAME output file or directory name\n"));
1026  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
1027  " plain text (default))\n"));
1028  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
1029  printf(_(" -v, --verbose verbose mode\n"));
1030  printf(_(" -V, --version output version information, then exit\n"));
1031  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
1032  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
1033  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
1034  printf(_(" -?, --help show this help, then exit\n"));
1035 
1036  printf(_("\nOptions controlling the output content:\n"));
1037  printf(_(" -a, --data-only dump only the data, not the schema\n"));
1038  printf(_(" -b, --blobs include large objects in dump\n"));
1039  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
1040  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
1041  printf(_(" -C, --create include commands to create database in dump\n"));
1042  printf(_(" -e, --extension=PATTERN dump the specified extension(s) only\n"));
1043  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
1044  printf(_(" -n, --schema=PATTERN dump the specified schema(s) only\n"));
1045  printf(_(" -N, --exclude-schema=PATTERN do NOT dump the specified schema(s)\n"));
1046  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
1047  " plain-text format\n"));
1048  printf(_(" -s, --schema-only dump only the schema, no data\n"));
1049  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
1050  printf(_(" -t, --table=PATTERN dump the specified table(s) only\n"));
1051  printf(_(" -T, --exclude-table=PATTERN do NOT dump the specified table(s)\n"));
1052  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
1053  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
1054  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
1055  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
1056  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
1057  printf(_(" --enable-row-security enable row security (dump only content user has\n"
1058  " access to)\n"));
1059  printf(_(" --exclude-table-data=PATTERN do NOT dump data for the specified table(s)\n"));
1060  printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
1061  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
1062  printf(_(" --include-foreign-data=PATTERN\n"
1063  " include data of foreign tables on foreign\n"
1064  " servers matching PATTERN\n"));
1065  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
1066  printf(_(" --load-via-partition-root load partitions via the root table\n"));
1067  printf(_(" --no-comments do not dump comments\n"));
1068  printf(_(" --no-publications do not dump publications\n"));
1069  printf(_(" --no-security-labels do not dump security label assignments\n"));
1070  printf(_(" --no-subscriptions do not dump subscriptions\n"));
1071  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
1072  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
1073  printf(_(" --no-toast-compression do not dump TOAST compression methods\n"));
1074  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
1075  printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
1076  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
1077  printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
1078  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
1079  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
1080  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
1081  printf(_(" --strict-names require table and/or schema include patterns to\n"
1082  " match at least one entity each\n"));
1083  printf(_(" --use-set-session-authorization\n"
1084  " use SET SESSION AUTHORIZATION commands instead of\n"
1085  " ALTER OWNER commands to set ownership\n"));
1086 
1087  printf(_("\nConnection options:\n"));
1088  printf(_(" -d, --dbname=DBNAME database to dump\n"));
1089  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
1090  printf(_(" -p, --port=PORT database server port number\n"));
1091  printf(_(" -U, --username=NAME connect as specified database user\n"));
1092  printf(_(" -w, --no-password never prompt for password\n"));
1093  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
1094  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
1095 
1096  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
1097  "variable value is used.\n\n"));
1098  printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
1099  printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
1100 }
1101 
1102 static void
1103 setup_connection(Archive *AH, const char *dumpencoding,
1104  const char *dumpsnapshot, char *use_role)
1105 {
1106  DumpOptions *dopt = AH->dopt;
1107  PGconn *conn = GetConnection(AH);
1108  const char *std_strings;
1109 
1111 
1112  /*
1113  * Set the client encoding if requested.
1114  */
1115  if (dumpencoding)
1116  {
1117  if (PQsetClientEncoding(conn, dumpencoding) < 0)
1118  fatal("invalid client encoding \"%s\" specified",
1119  dumpencoding);
1120  }
1121 
1122  /*
1123  * Get the active encoding and the standard_conforming_strings setting, so
1124  * we know how to escape strings.
1125  */
1126  AH->encoding = PQclientEncoding(conn);
1127 
1128  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1129  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1130 
1131  /*
1132  * Set the role if requested. In a parallel dump worker, we'll be passed
1133  * use_role == NULL, but AH->use_role is already set (if user specified it
1134  * originally) and we should use that.
1135  */
1136  if (!use_role && AH->use_role)
1137  use_role = AH->use_role;
1138 
1139  /* Set the role if requested */
1140  if (use_role && AH->remoteVersion >= 80100)
1141  {
1142  PQExpBuffer query = createPQExpBuffer();
1143 
1144  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1145  ExecuteSqlStatement(AH, query->data);
1146  destroyPQExpBuffer(query);
1147 
1148  /* save it for possible later use by parallel workers */
1149  if (!AH->use_role)
1150  AH->use_role = pg_strdup(use_role);
1151  }
1152 
1153  /* Set the datestyle to ISO to ensure the dump's portability */
1154  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1155 
1156  /* Likewise, avoid using sql_standard intervalstyle */
1157  if (AH->remoteVersion >= 80400)
1158  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1159 
1160  /*
1161  * Use an explicitly specified extra_float_digits if it has been provided.
1162  * Otherwise, set extra_float_digits so that we can dump float data
1163  * exactly (given correctly implemented float I/O code, anyway).
1164  */
1166  {
1168 
1169  appendPQExpBuffer(q, "SET extra_float_digits TO %d",
1171  ExecuteSqlStatement(AH, q->data);
1172  destroyPQExpBuffer(q);
1173  }
1174  else if (AH->remoteVersion >= 90000)
1175  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1176  else
1177  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1178 
1179  /*
1180  * If synchronized scanning is supported, disable it, to prevent
1181  * unpredictable changes in row ordering across a dump and reload.
1182  */
1183  if (AH->remoteVersion >= 80300)
1184  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1185 
1186  /*
1187  * Disable timeouts if supported.
1188  */
1189  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1190  if (AH->remoteVersion >= 90300)
1191  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1192  if (AH->remoteVersion >= 90600)
1193  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1194 
1195  /*
1196  * Quote all identifiers, if requested.
1197  */
1198  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1199  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1200 
1201  /*
1202  * Adjust row-security mode, if supported.
1203  */
1204  if (AH->remoteVersion >= 90500)
1205  {
1206  if (dopt->enable_row_security)
1207  ExecuteSqlStatement(AH, "SET row_security = on");
1208  else
1209  ExecuteSqlStatement(AH, "SET row_security = off");
1210  }
1211 
1212  /*
1213  * Start transaction-snapshot mode transaction to dump consistent data.
1214  */
1215  ExecuteSqlStatement(AH, "BEGIN");
1216  if (AH->remoteVersion >= 90100)
1217  {
1218  /*
1219  * To support the combination of serializable_deferrable with the jobs
1220  * option we use REPEATABLE READ for the worker connections that are
1221  * passed a snapshot. As long as the snapshot is acquired in a
1222  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1223  * REPEATABLE READ transaction provides the appropriate integrity
1224  * guarantees. This is a kluge, but safe for back-patching.
1225  */
1226  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1228  "SET TRANSACTION ISOLATION LEVEL "
1229  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1230  else
1232  "SET TRANSACTION ISOLATION LEVEL "
1233  "REPEATABLE READ, READ ONLY");
1234  }
1235  else
1236  {
1238  "SET TRANSACTION ISOLATION LEVEL "
1239  "SERIALIZABLE, READ ONLY");
1240  }
1241 
1242  /*
1243  * If user specified a snapshot to use, select that. In a parallel dump
1244  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1245  * is already set (if the server can handle it) and we should use that.
1246  */
1247  if (dumpsnapshot)
1248  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1249 
1250  if (AH->sync_snapshot_id)
1251  {
1252  PQExpBuffer query = createPQExpBuffer();
1253 
1254  appendPQExpBufferStr(query, "SET TRANSACTION SNAPSHOT ");
1255  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1256  ExecuteSqlStatement(AH, query->data);
1257  destroyPQExpBuffer(query);
1258  }
1259  else if (AH->numWorkers > 1 &&
1260  AH->remoteVersion >= 90200 &&
1262  {
1263  if (AH->isStandby && AH->remoteVersion < 100000)
1264  fatal("Synchronized snapshots on standby servers are not supported by this server version.\n"
1265  "Run with --no-synchronized-snapshots instead if you do not need\n"
1266  "synchronized snapshots.");
1267 
1268 
1270  }
1271 }
1272 
1273 /* Set up connection for a parallel worker process */
1274 static void
1276 {
1277  /*
1278  * We want to re-select all the same values the leader connection is
1279  * using. We'll have inherited directly-usable values in
1280  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1281  * inherited encoding value back to a string to pass to setup_connection.
1282  */
1283  setup_connection(AH,
1285  NULL,
1286  NULL);
1287 }
1288 
1289 static char *
1291 {
1292  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1293  char *result;
1294  PGresult *res;
1295 
1296  res = ExecuteSqlQueryForSingleRow(fout, query);
1297  result = pg_strdup(PQgetvalue(res, 0, 0));
1298  PQclear(res);
1299 
1300  return result;
1301 }
1302 
1303 static ArchiveFormat
1305 {
1306  ArchiveFormat archiveFormat;
1307 
1308  *mode = archModeWrite;
1309 
1310  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1311  {
1312  /* This is used by pg_dumpall, and is not documented */
1313  archiveFormat = archNull;
1314  *mode = archModeAppend;
1315  }
1316  else if (pg_strcasecmp(format, "c") == 0)
1317  archiveFormat = archCustom;
1318  else if (pg_strcasecmp(format, "custom") == 0)
1319  archiveFormat = archCustom;
1320  else if (pg_strcasecmp(format, "d") == 0)
1321  archiveFormat = archDirectory;
1322  else if (pg_strcasecmp(format, "directory") == 0)
1323  archiveFormat = archDirectory;
1324  else if (pg_strcasecmp(format, "p") == 0)
1325  archiveFormat = archNull;
1326  else if (pg_strcasecmp(format, "plain") == 0)
1327  archiveFormat = archNull;
1328  else if (pg_strcasecmp(format, "t") == 0)
1329  archiveFormat = archTar;
1330  else if (pg_strcasecmp(format, "tar") == 0)
1331  archiveFormat = archTar;
1332  else
1333  fatal("invalid output format \"%s\" specified", format);
1334  return archiveFormat;
1335 }
1336 
1337 /*
1338  * Find the OIDs of all schemas matching the given list of patterns,
1339  * and append them to the given OID list.
1340  */
1341 static void
1343  SimpleStringList *patterns,
1344  SimpleOidList *oids,
1345  bool strict_names)
1346 {
1347  PQExpBuffer query;
1348  PGresult *res;
1349  SimpleStringListCell *cell;
1350  int i;
1351 
1352  if (patterns->head == NULL)
1353  return; /* nothing to do */
1354 
1355  query = createPQExpBuffer();
1356 
1357  /*
1358  * The loop below runs multiple SELECTs might sometimes result in
1359  * duplicate entries in the OID list, but we don't care.
1360  */
1361 
1362  for (cell = patterns->head; cell; cell = cell->next)
1363  {
1364  appendPQExpBufferStr(query,
1365  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1366  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1367  false, NULL, "n.nspname", NULL, NULL);
1368 
1369  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1370  if (strict_names && PQntuples(res) == 0)
1371  fatal("no matching schemas were found for pattern \"%s\"", cell->val);
1372 
1373  for (i = 0; i < PQntuples(res); i++)
1374  {
1375  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1376  }
1377 
1378  PQclear(res);
1379  resetPQExpBuffer(query);
1380  }
1381 
1382  destroyPQExpBuffer(query);
1383 }
1384 
1385 /*
1386  * Find the OIDs of all extensions matching the given list of patterns,
1387  * and append them to the given OID list.
1388  */
1389 static void
1391  SimpleStringList *patterns,
1392  SimpleOidList *oids,
1393  bool strict_names)
1394 {
1395  PQExpBuffer query;
1396  PGresult *res;
1397  SimpleStringListCell *cell;
1398  int i;
1399 
1400  if (patterns->head == NULL)
1401  return; /* nothing to do */
1402 
1403  query = createPQExpBuffer();
1404 
1405  /*
1406  * The loop below runs multiple SELECTs might sometimes result in
1407  * duplicate entries in the OID list, but we don't care.
1408  */
1409  for (cell = patterns->head; cell; cell = cell->next)
1410  {
1411  appendPQExpBufferStr(query,
1412  "SELECT oid FROM pg_catalog.pg_extension e\n");
1413  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1414  false, NULL, "e.extname", NULL, NULL);
1415 
1416  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1417  if (strict_names && PQntuples(res) == 0)
1418  fatal("no matching extensions were found for pattern \"%s\"", cell->val);
1419 
1420  for (i = 0; i < PQntuples(res); i++)
1421  {
1422  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1423  }
1424 
1425  PQclear(res);
1426  resetPQExpBuffer(query);
1427  }
1428 
1429  destroyPQExpBuffer(query);
1430 }
1431 
1432 /*
1433  * Find the OIDs of all foreign servers matching the given list of patterns,
1434  * and append them to the given OID list.
1435  */
1436 static void
1438  SimpleStringList *patterns,
1439  SimpleOidList *oids)
1440 {
1441  PQExpBuffer query;
1442  PGresult *res;
1443  SimpleStringListCell *cell;
1444  int i;
1445 
1446  if (patterns->head == NULL)
1447  return; /* nothing to do */
1448 
1449  query = createPQExpBuffer();
1450 
1451  /*
1452  * The loop below runs multiple SELECTs might sometimes result in
1453  * duplicate entries in the OID list, but we don't care.
1454  */
1455 
1456  for (cell = patterns->head; cell; cell = cell->next)
1457  {
1458  appendPQExpBufferStr(query,
1459  "SELECT oid FROM pg_catalog.pg_foreign_server s\n");
1460  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1461  false, NULL, "s.srvname", NULL, NULL);
1462 
1463  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1464  if (PQntuples(res) == 0)
1465  fatal("no matching foreign servers were found for pattern \"%s\"", cell->val);
1466 
1467  for (i = 0; i < PQntuples(res); i++)
1468  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1469 
1470  PQclear(res);
1471  resetPQExpBuffer(query);
1472  }
1473 
1474  destroyPQExpBuffer(query);
1475 }
1476 
1477 /*
1478  * Find the OIDs of all tables matching the given list of patterns,
1479  * and append them to the given OID list. See also expand_dbname_patterns()
1480  * in pg_dumpall.c
1481  */
1482 static void
1484  SimpleStringList *patterns, SimpleOidList *oids,
1485  bool strict_names)
1486 {
1487  PQExpBuffer query;
1488  PGresult *res;
1489  SimpleStringListCell *cell;
1490  int i;
1491 
1492  if (patterns->head == NULL)
1493  return; /* nothing to do */
1494 
1495  query = createPQExpBuffer();
1496 
1497  /*
1498  * this might sometimes result in duplicate entries in the OID list, but
1499  * we don't care.
1500  */
1501 
1502  for (cell = patterns->head; cell; cell = cell->next)
1503  {
1504  /*
1505  * Query must remain ABSOLUTELY devoid of unqualified names. This
1506  * would be unnecessary given a pg_table_is_visible() variant taking a
1507  * search_path argument.
1508  */
1509  appendPQExpBuffer(query,
1510  "SELECT c.oid"
1511  "\nFROM pg_catalog.pg_class c"
1512  "\n LEFT JOIN pg_catalog.pg_namespace n"
1513  "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1514  "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1515  "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1516  RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
1517  RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
1518  RELKIND_PARTITIONED_TABLE);
1519  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1520  false, "n.nspname", "c.relname", NULL,
1521  "pg_catalog.pg_table_is_visible(c.oid)");
1522 
1523  ExecuteSqlStatement(fout, "RESET search_path");
1524  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1527  if (strict_names && PQntuples(res) == 0)
1528  fatal("no matching tables were found for pattern \"%s\"", cell->val);
1529 
1530  for (i = 0; i < PQntuples(res); i++)
1531  {
1532  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1533  }
1534 
1535  PQclear(res);
1536  resetPQExpBuffer(query);
1537  }
1538 
1539  destroyPQExpBuffer(query);
1540 }
1541 
1542 /*
1543  * checkExtensionMembership
1544  * Determine whether object is an extension member, and if so,
1545  * record an appropriate dependency and set the object's dump flag.
1546  *
1547  * It's important to call this for each object that could be an extension
1548  * member. Generally, we integrate this with determining the object's
1549  * to-be-dumped-ness, since extension membership overrides other rules for that.
1550  *
1551  * Returns true if object is an extension member, else false.
1552  */
1553 static bool
1555 {
1556  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1557 
1558  if (ext == NULL)
1559  return false;
1560 
1561  dobj->ext_member = true;
1562 
1563  /* Record dependency so that getDependencies needn't deal with that */
1564  addObjectDependency(dobj, ext->dobj.dumpId);
1565 
1566  /*
1567  * In 9.6 and above, mark the member object to have any non-initial ACL,
1568  * policies, and security labels dumped.
1569  *
1570  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1571  * extract the information about the object. We don't provide support for
1572  * initial policies and security labels and it seems unlikely for those to
1573  * ever exist, but we may have to revisit this later.
1574  *
1575  * Prior to 9.6, we do not include any extension member components.
1576  *
1577  * In binary upgrades, we still dump all components of the members
1578  * individually, since the idea is to exactly reproduce the database
1579  * contents rather than replace the extension contents with something
1580  * different.
1581  */
1582  if (fout->dopt->binary_upgrade)
1583  dobj->dump = ext->dobj.dump;
1584  else
1585  {
1586  if (fout->remoteVersion < 90600)
1587  dobj->dump = DUMP_COMPONENT_NONE;
1588  else
1589  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1592  }
1593 
1594  return true;
1595 }
1596 
1597 /*
1598  * selectDumpableNamespace: policy-setting subroutine
1599  * Mark a namespace as to be dumped or not
1600  */
1601 static void
1603 {
1604  /*
1605  * If specific tables are being dumped, do not dump any complete
1606  * namespaces. If specific namespaces are being dumped, dump just those
1607  * namespaces. Otherwise, dump all non-system namespaces.
1608  */
1609  if (table_include_oids.head != NULL)
1610  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1611  else if (schema_include_oids.head != NULL)
1612  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1613  simple_oid_list_member(&schema_include_oids,
1614  nsinfo->dobj.catId.oid) ?
1616  else if (fout->remoteVersion >= 90600 &&
1617  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1618  {
1619  /*
1620  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1621  * they are interesting (and not the original ACLs which were set at
1622  * initdb time, see pg_init_privs).
1623  */
1624  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1625  }
1626  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1627  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1628  {
1629  /* Other system schemas don't get dumped */
1630  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1631  }
1632  else if (strcmp(nsinfo->dobj.name, "public") == 0)
1633  {
1634  /*
1635  * The public schema is a strange beast that sits in a sort of
1636  * no-mans-land between being a system object and a user object. We
1637  * don't want to dump creation or comment commands for it, because
1638  * that complicates matters for non-superuser use of pg_dump. But we
1639  * should dump any ACL changes that have occurred for it, and of
1640  * course we should dump contained objects.
1641  */
1642  nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1644  }
1645  else
1646  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1647 
1648  /*
1649  * In any case, a namespace can be excluded by an exclusion switch
1650  */
1651  if (nsinfo->dobj.dump_contains &&
1652  simple_oid_list_member(&schema_exclude_oids,
1653  nsinfo->dobj.catId.oid))
1654  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1655 
1656  /*
1657  * If the schema belongs to an extension, allow extension membership to
1658  * override the dump decision for the schema itself. However, this does
1659  * not change dump_contains, so this won't change what we do with objects
1660  * within the schema. (If they belong to the extension, they'll get
1661  * suppressed by it, otherwise not.)
1662  */
1663  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1664 }
1665 
1666 /*
1667  * selectDumpableTable: policy-setting subroutine
1668  * Mark a table as to be dumped or not
1669  */
1670 static void
1672 {
1673  if (checkExtensionMembership(&tbinfo->dobj, fout))
1674  return; /* extension membership overrides all else */
1675 
1676  /*
1677  * If specific tables are being dumped, dump just those tables; else, dump
1678  * according to the parent namespace's dump flag.
1679  */
1680  if (table_include_oids.head != NULL)
1681  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1682  tbinfo->dobj.catId.oid) ?
1684  else
1685  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1686 
1687  /*
1688  * In any case, a table can be excluded by an exclusion switch
1689  */
1690  if (tbinfo->dobj.dump &&
1691  simple_oid_list_member(&table_exclude_oids,
1692  tbinfo->dobj.catId.oid))
1693  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1694 }
1695 
1696 /*
1697  * selectDumpableType: policy-setting subroutine
1698  * Mark a type as to be dumped or not
1699  *
1700  * If it's a table's rowtype or an autogenerated array type, we also apply a
1701  * special type code to facilitate sorting into the desired order. (We don't
1702  * want to consider those to be ordinary types because that would bring tables
1703  * up into the datatype part of the dump order.) We still set the object's
1704  * dump flag; that's not going to cause the dummy type to be dumped, but we
1705  * need it so that casts involving such types will be dumped correctly -- see
1706  * dumpCast. This means the flag should be set the same as for the underlying
1707  * object (the table or base type).
1708  */
1709 static void
1711 {
1712  /* skip complex types, except for standalone composite types */
1713  if (OidIsValid(tyinfo->typrelid) &&
1714  tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
1715  {
1716  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1717 
1718  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1719  if (tytable != NULL)
1720  tyinfo->dobj.dump = tytable->dobj.dump;
1721  else
1722  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1723  return;
1724  }
1725 
1726  /* skip auto-generated array types */
1727  if (tyinfo->isArray || tyinfo->isMultirange)
1728  {
1729  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1730 
1731  /*
1732  * Fall through to set the dump flag; we assume that the subsequent
1733  * rules will do the same thing as they would for the array's base
1734  * type. (We cannot reliably look up the base type here, since
1735  * getTypes may not have processed it yet.)
1736  */
1737  }
1738 
1739  if (checkExtensionMembership(&tyinfo->dobj, fout))
1740  return; /* extension membership overrides all else */
1741 
1742  /* Dump based on if the contents of the namespace are being dumped */
1743  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1744 }
1745 
1746 /*
1747  * selectDumpableDefaultACL: policy-setting subroutine
1748  * Mark a default ACL as to be dumped or not
1749  *
1750  * For per-schema default ACLs, dump if the schema is to be dumped.
1751  * Otherwise dump if we are dumping "everything". Note that dataOnly
1752  * and aclsSkip are checked separately.
1753  */
1754 static void
1756 {
1757  /* Default ACLs can't be extension members */
1758 
1759  if (dinfo->dobj.namespace)
1760  /* default ACLs are considered part of the namespace */
1761  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1762  else
1763  dinfo->dobj.dump = dopt->include_everything ?
1765 }
1766 
1767 /*
1768  * selectDumpableCast: policy-setting subroutine
1769  * Mark a cast as to be dumped or not
1770  *
1771  * Casts do not belong to any particular namespace (since they haven't got
1772  * names), nor do they have identifiable owners. To distinguish user-defined
1773  * casts from built-in ones, we must resort to checking whether the cast's
1774  * OID is in the range reserved for initdb.
1775  */
1776 static void
1778 {
1779  if (checkExtensionMembership(&cast->dobj, fout))
1780  return; /* extension membership overrides all else */
1781 
1782  /*
1783  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1784  * support ACLs currently.
1785  */
1786  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1787  cast->dobj.dump = DUMP_COMPONENT_NONE;
1788  else
1789  cast->dobj.dump = fout->dopt->include_everything ?
1791 }
1792 
1793 /*
1794  * selectDumpableProcLang: policy-setting subroutine
1795  * Mark a procedural language as to be dumped or not
1796  *
1797  * Procedural languages do not belong to any particular namespace. To
1798  * identify built-in languages, we must resort to checking whether the
1799  * language's OID is in the range reserved for initdb.
1800  */
1801 static void
1803 {
1804  if (checkExtensionMembership(&plang->dobj, fout))
1805  return; /* extension membership overrides all else */
1806 
1807  /*
1808  * Only include procedural languages when we are dumping everything.
1809  *
1810  * For from-initdb procedural languages, only include ACLs, as we do for
1811  * the pg_catalog namespace. We need this because procedural languages do
1812  * not live in any namespace.
1813  */
1814  if (!fout->dopt->include_everything)
1815  plang->dobj.dump = DUMP_COMPONENT_NONE;
1816  else
1817  {
1818  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1819  plang->dobj.dump = fout->remoteVersion < 90600 ?
1821  else
1822  plang->dobj.dump = DUMP_COMPONENT_ALL;
1823  }
1824 }
1825 
1826 /*
1827  * selectDumpableAccessMethod: policy-setting subroutine
1828  * Mark an access method as to be dumped or not
1829  *
1830  * Access methods do not belong to any particular namespace. To identify
1831  * built-in access methods, we must resort to checking whether the
1832  * method's OID is in the range reserved for initdb.
1833  */
1834 static void
1836 {
1837  if (checkExtensionMembership(&method->dobj, fout))
1838  return; /* extension membership overrides all else */
1839 
1840  /*
1841  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1842  * they do not support ACLs currently.
1843  */
1844  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1845  method->dobj.dump = DUMP_COMPONENT_NONE;
1846  else
1847  method->dobj.dump = fout->dopt->include_everything ?
1849 }
1850 
1851 /*
1852  * selectDumpableExtension: policy-setting subroutine
1853  * Mark an extension as to be dumped or not
1854  *
1855  * Built-in extensions should be skipped except for checking ACLs, since we
1856  * assume those will already be installed in the target database. We identify
1857  * such extensions by their having OIDs in the range reserved for initdb.
1858  * We dump all user-added extensions by default. No extensions are dumped
1859  * if include_everything is false (i.e., a --schema or --table switch was
1860  * given), except if --extension specifies a list of extensions to dump.
1861  */
1862 static void
1864 {
1865  /*
1866  * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
1867  * change permissions on their member objects, if they wish to, and have
1868  * those changes preserved.
1869  */
1870  if (extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1871  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1872  else
1873  {
1874  /* check if there is a list of extensions to dump */
1875  if (extension_include_oids.head != NULL)
1876  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1877  simple_oid_list_member(&extension_include_oids,
1878  extinfo->dobj.catId.oid) ?
1880  else
1881  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1882  dopt->include_everything ?
1884  }
1885 }
1886 
1887 /*
1888  * selectDumpablePublicationTable: policy-setting subroutine
1889  * Mark a publication table as to be dumped or not
1890  *
1891  * Publication tables have schemas, but those are ignored in decision making,
1892  * because publications are only dumped when we are dumping everything.
1893  */
1894 static void
1896 {
1897  if (checkExtensionMembership(dobj, fout))
1898  return; /* extension membership overrides all else */
1899 
1900  dobj->dump = fout->dopt->include_everything ?
1902 }
1903 
1904 /*
1905  * selectDumpableObject: policy-setting subroutine
1906  * Mark a generic dumpable object as to be dumped or not
1907  *
1908  * Use this only for object types without a special-case routine above.
1909  */
1910 static void
1912 {
1913  if (checkExtensionMembership(dobj, fout))
1914  return; /* extension membership overrides all else */
1915 
1916  /*
1917  * Default policy is to dump if parent namespace is dumpable, or for
1918  * non-namespace-associated items, dump if we're dumping "everything".
1919  */
1920  if (dobj->namespace)
1921  dobj->dump = dobj->namespace->dobj.dump_contains;
1922  else
1923  dobj->dump = fout->dopt->include_everything ?
1925 }
1926 
1927 /*
1928  * Dump a table's contents for loading using the COPY command
1929  * - this routine is called by the Archiver when it wants the table
1930  * to be dumped.
1931  */
1932 static int
1933 dumpTableData_copy(Archive *fout, const void *dcontext)
1934 {
1935  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1936  TableInfo *tbinfo = tdinfo->tdtable;
1937  const char *classname = tbinfo->dobj.name;
1939 
1940  /*
1941  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1942  * which uses it already.
1943  */
1944  PQExpBuffer clistBuf = createPQExpBuffer();
1945  PGconn *conn = GetConnection(fout);
1946  PGresult *res;
1947  int ret;
1948  char *copybuf;
1949  const char *column_list;
1950 
1951  pg_log_info("dumping contents of table \"%s.%s\"",
1952  tbinfo->dobj.namespace->dobj.name, classname);
1953 
1954  /*
1955  * Specify the column list explicitly so that we have no possibility of
1956  * retrieving data in the wrong column order. (The default column
1957  * ordering of COPY will not be what we want in certain corner cases
1958  * involving ADD COLUMN and inheritance.)
1959  */
1960  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1961 
1962  /*
1963  * Use COPY (SELECT ...) TO when dumping a foreign table's data, and when
1964  * a filter condition was specified. For other cases a simple COPY
1965  * suffices.
1966  */
1967  if (tdinfo->filtercond || tbinfo->relkind == RELKIND_FOREIGN_TABLE)
1968  {
1969  /* Note: this syntax is only supported in 8.2 and up */
1970  appendPQExpBufferStr(q, "COPY (SELECT ");
1971  /* klugery to get rid of parens in column list */
1972  if (strlen(column_list) > 2)
1973  {
1974  appendPQExpBufferStr(q, column_list + 1);
1975  q->data[q->len - 1] = ' ';
1976  }
1977  else
1978  appendPQExpBufferStr(q, "* ");
1979 
1980  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1981  fmtQualifiedDumpable(tbinfo),
1982  tdinfo->filtercond ? tdinfo->filtercond : "");
1983  }
1984  else
1985  {
1986  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1987  fmtQualifiedDumpable(tbinfo),
1988  column_list);
1989  }
1990  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1991  PQclear(res);
1992  destroyPQExpBuffer(clistBuf);
1993 
1994  for (;;)
1995  {
1996  ret = PQgetCopyData(conn, &copybuf, 0);
1997 
1998  if (ret < 0)
1999  break; /* done or error */
2000 
2001  if (copybuf)
2002  {
2003  WriteData(fout, copybuf, ret);
2004  PQfreemem(copybuf);
2005  }
2006 
2007  /* ----------
2008  * THROTTLE:
2009  *
2010  * There was considerable discussion in late July, 2000 regarding
2011  * slowing down pg_dump when backing up large tables. Users with both
2012  * slow & fast (multi-processor) machines experienced performance
2013  * degradation when doing a backup.
2014  *
2015  * Initial attempts based on sleeping for a number of ms for each ms
2016  * of work were deemed too complex, then a simple 'sleep in each loop'
2017  * implementation was suggested. The latter failed because the loop
2018  * was too tight. Finally, the following was implemented:
2019  *
2020  * If throttle is non-zero, then
2021  * See how long since the last sleep.
2022  * Work out how long to sleep (based on ratio).
2023  * If sleep is more than 100ms, then
2024  * sleep
2025  * reset timer
2026  * EndIf
2027  * EndIf
2028  *
2029  * where the throttle value was the number of ms to sleep per ms of
2030  * work. The calculation was done in each loop.
2031  *
2032  * Most of the hard work is done in the backend, and this solution
2033  * still did not work particularly well: on slow machines, the ratio
2034  * was 50:1, and on medium paced machines, 1:1, and on fast
2035  * multi-processor machines, it had little or no effect, for reasons
2036  * that were unclear.
2037  *
2038  * Further discussion ensued, and the proposal was dropped.
2039  *
2040  * For those people who want this feature, it can be implemented using
2041  * gettimeofday in each loop, calculating the time since last sleep,
2042  * multiplying that by the sleep ratio, then if the result is more
2043  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
2044  * function to sleep for a subsecond period ie.
2045  *
2046  * select(0, NULL, NULL, NULL, &tvi);
2047  *
2048  * This will return after the interval specified in the structure tvi.
2049  * Finally, call gettimeofday again to save the 'last sleep time'.
2050  * ----------
2051  */
2052  }
2053  archprintf(fout, "\\.\n\n\n");
2054 
2055  if (ret == -2)
2056  {
2057  /* copy data transfer failed */
2058  pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
2059  pg_log_error("Error message from server: %s", PQerrorMessage(conn));
2060  pg_log_error("The command was: %s", q->data);
2061  exit_nicely(1);
2062  }
2063 
2064  /* Check command status and return to normal libpq state */
2065  res = PQgetResult(conn);
2066  if (PQresultStatus(res) != PGRES_COMMAND_OK)
2067  {
2068  pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
2069  pg_log_error("Error message from server: %s", PQerrorMessage(conn));
2070  pg_log_error("The command was: %s", q->data);
2071  exit_nicely(1);
2072  }
2073  PQclear(res);
2074 
2075  /* Do this to ensure we've pumped libpq back to idle state */
2076  if (PQgetResult(conn) != NULL)
2077  pg_log_warning("unexpected extra results during COPY of table \"%s\"",
2078  classname);
2079 
2080  destroyPQExpBuffer(q);
2081  return 1;
2082 }
2083 
2084 /*
2085  * Dump table data using INSERT commands.
2086  *
2087  * Caution: when we restore from an archive file direct to database, the
2088  * INSERT commands emitted by this function have to be parsed by
2089  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
2090  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
2091  */
2092 static int
2093 dumpTableData_insert(Archive *fout, const void *dcontext)
2094 {
2095  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
2096  TableInfo *tbinfo = tdinfo->tdtable;
2097  DumpOptions *dopt = fout->dopt;
2099  PQExpBuffer insertStmt = NULL;
2100  PGresult *res;
2101  int nfields;
2102  int rows_per_statement = dopt->dump_inserts;
2103  int rows_this_statement = 0;
2104 
2105  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
2106  "SELECT * FROM ONLY %s",
2107  fmtQualifiedDumpable(tbinfo));
2108  if (tdinfo->filtercond)
2109  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
2110 
2111  ExecuteSqlStatement(fout, q->data);
2112 
2113  while (1)
2114  {
2115  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
2116  PGRES_TUPLES_OK);
2117  nfields = PQnfields(res);
2118 
2119  /*
2120  * First time through, we build as much of the INSERT statement as
2121  * possible in "insertStmt", which we can then just print for each
2122  * statement. If the table happens to have zero columns then this will
2123  * be a complete statement, otherwise it will end in "VALUES" and be
2124  * ready to have the row's column values printed.
2125  */
2126  if (insertStmt == NULL)
2127  {
2128  TableInfo *targettab;
2129 
2130  insertStmt = createPQExpBuffer();
2131 
2132  /*
2133  * When load-via-partition-root is set, get the root table name
2134  * for the partition table, so that we can reload data through the
2135  * root table.
2136  */
2137  if (dopt->load_via_partition_root && tbinfo->ispartition)
2138  targettab = getRootTableInfo(tbinfo);
2139  else
2140  targettab = tbinfo;
2141 
2142  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
2143  fmtQualifiedDumpable(targettab));
2144 
2145  /* corner case for zero-column table */
2146  if (nfields == 0)
2147  {
2148  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
2149  }
2150  else
2151  {
2152  /* append the list of column names if required */
2153  if (dopt->column_inserts)
2154  {
2155  appendPQExpBufferChar(insertStmt, '(');
2156  for (int field = 0; field < nfields; field++)
2157  {
2158  if (field > 0)
2159  appendPQExpBufferStr(insertStmt, ", ");
2160  appendPQExpBufferStr(insertStmt,
2161  fmtId(PQfname(res, field)));
2162  }
2163  appendPQExpBufferStr(insertStmt, ") ");
2164  }
2165 
2166  if (tbinfo->needs_override)
2167  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
2168 
2169  appendPQExpBufferStr(insertStmt, "VALUES");
2170  }
2171  }
2172 
2173  for (int tuple = 0; tuple < PQntuples(res); tuple++)
2174  {
2175  /* Write the INSERT if not in the middle of a multi-row INSERT. */
2176  if (rows_this_statement == 0)
2177  archputs(insertStmt->data, fout);
2178 
2179  /*
2180  * If it is zero-column table then we've already written the
2181  * complete statement, which will mean we've disobeyed
2182  * --rows-per-insert when it's set greater than 1. We do support
2183  * a way to make this multi-row with: SELECT UNION ALL SELECT
2184  * UNION ALL ... but that's non-standard so we should avoid it
2185  * given that using INSERTs is mostly only ever needed for
2186  * cross-database exports.
2187  */
2188  if (nfields == 0)
2189  continue;
2190 
2191  /* Emit a row heading */
2192  if (rows_per_statement == 1)
2193  archputs(" (", fout);
2194  else if (rows_this_statement > 0)
2195  archputs(",\n\t(", fout);
2196  else
2197  archputs("\n\t(", fout);
2198 
2199  for (int field = 0; field < nfields; field++)
2200  {
2201  if (field > 0)
2202  archputs(", ", fout);
2203  if (tbinfo->attgenerated[field])
2204  {
2205  archputs("DEFAULT", fout);
2206  continue;
2207  }
2208  if (PQgetisnull(res, tuple, field))
2209  {
2210  archputs("NULL", fout);
2211  continue;
2212  }
2213 
2214  /* XXX This code is partially duplicated in ruleutils.c */
2215  switch (PQftype(res, field))
2216  {
2217  case INT2OID:
2218  case INT4OID:
2219  case INT8OID:
2220  case OIDOID:
2221  case FLOAT4OID:
2222  case FLOAT8OID:
2223  case NUMERICOID:
2224  {
2225  /*
2226  * These types are printed without quotes unless
2227  * they contain values that aren't accepted by the
2228  * scanner unquoted (e.g., 'NaN'). Note that
2229  * strtod() and friends might accept NaN, so we
2230  * can't use that to test.
2231  *
2232  * In reality we only need to defend against
2233  * infinity and NaN, so we need not get too crazy
2234  * about pattern matching here.
2235  */
2236  const char *s = PQgetvalue(res, tuple, field);
2237 
2238  if (strspn(s, "0123456789 +-eE.") == strlen(s))
2239  archputs(s, fout);
2240  else
2241  archprintf(fout, "'%s'", s);
2242  }
2243  break;
2244 
2245  case BITOID:
2246  case VARBITOID:
2247  archprintf(fout, "B'%s'",
2248  PQgetvalue(res, tuple, field));
2249  break;
2250 
2251  case BOOLOID:
2252  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2253  archputs("true", fout);
2254  else
2255  archputs("false", fout);
2256  break;
2257 
2258  default:
2259  /* All other types are printed as string literals. */
2260  resetPQExpBuffer(q);
2262  PQgetvalue(res, tuple, field),
2263  fout);
2264  archputs(q->data, fout);
2265  break;
2266  }
2267  }
2268 
2269  /* Terminate the row ... */
2270  archputs(")", fout);
2271 
2272  /* ... and the statement, if the target no. of rows is reached */
2273  if (++rows_this_statement >= rows_per_statement)
2274  {
2275  if (dopt->do_nothing)
2276  archputs(" ON CONFLICT DO NOTHING;\n", fout);
2277  else
2278  archputs(";\n", fout);
2279  /* Reset the row counter */
2280  rows_this_statement = 0;
2281  }
2282  }
2283 
2284  if (PQntuples(res) <= 0)
2285  {
2286  PQclear(res);
2287  break;
2288  }
2289  PQclear(res);
2290  }
2291 
2292  /* Terminate any statements that didn't make the row count. */
2293  if (rows_this_statement > 0)
2294  {
2295  if (dopt->do_nothing)
2296  archputs(" ON CONFLICT DO NOTHING;\n", fout);
2297  else
2298  archputs(";\n", fout);
2299  }
2300 
2301  archputs("\n\n", fout);
2302 
2303  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2304 
2305  destroyPQExpBuffer(q);
2306  if (insertStmt != NULL)
2307  destroyPQExpBuffer(insertStmt);
2308 
2309  return 1;
2310 }
2311 
2312 /*
2313  * getRootTableInfo:
2314  * get the root TableInfo for the given partition table.
2315  */
2316 static TableInfo *
2318 {
2319  TableInfo *parentTbinfo;
2320 
2321  Assert(tbinfo->ispartition);
2322  Assert(tbinfo->numParents == 1);
2323 
2324  parentTbinfo = tbinfo->parents[0];
2325  while (parentTbinfo->ispartition)
2326  {
2327  Assert(parentTbinfo->numParents == 1);
2328  parentTbinfo = parentTbinfo->parents[0];
2329  }
2330 
2331  return parentTbinfo;
2332 }
2333 
2334 /*
2335  * dumpTableData -
2336  * dump the contents of a single table
2337  *
2338  * Actually, this just makes an ArchiveEntry for the table contents.
2339  */
2340 static void
2341 dumpTableData(Archive *fout, const TableDataInfo *tdinfo)
2342 {
2343  DumpOptions *dopt = fout->dopt;
2344  TableInfo *tbinfo = tdinfo->tdtable;
2345  PQExpBuffer copyBuf = createPQExpBuffer();
2346  PQExpBuffer clistBuf = createPQExpBuffer();
2347  DataDumperPtr dumpFn;
2348  char *copyStmt;
2349  const char *copyFrom;
2350 
2351  /* We had better have loaded per-column details about this table */
2352  Assert(tbinfo->interesting);
2353 
2354  if (dopt->dump_inserts == 0)
2355  {
2356  /* Dump/restore using COPY */
2357  dumpFn = dumpTableData_copy;
2358 
2359  /*
2360  * When load-via-partition-root is set, get the root table name for
2361  * the partition table, so that we can reload data through the root
2362  * table.
2363  */
2364  if (dopt->load_via_partition_root && tbinfo->ispartition)
2365  {
2366  TableInfo *parentTbinfo;
2367 
2368  parentTbinfo = getRootTableInfo(tbinfo);
2369  copyFrom = fmtQualifiedDumpable(parentTbinfo);
2370  }
2371  else
2372  copyFrom = fmtQualifiedDumpable(tbinfo);
2373 
2374  /* must use 2 steps here 'cause fmtId is nonreentrant */
2375  appendPQExpBuffer(copyBuf, "COPY %s ",
2376  copyFrom);
2377  appendPQExpBuffer(copyBuf, "%s FROM stdin;\n",
2378  fmtCopyColumnList(tbinfo, clistBuf));
2379  copyStmt = copyBuf->data;
2380  }
2381  else
2382  {
2383  /* Restore using INSERT */
2384  dumpFn = dumpTableData_insert;
2385  copyStmt = NULL;
2386  }
2387 
2388  /*
2389  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2390  * dependency on its table as "special" and pass it to ArchiveEntry now.
2391  * See comments for BuildArchiveDependencies.
2392  */
2393  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2394  {
2395  TocEntry *te;
2396 
2397  te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2398  ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2399  .namespace = tbinfo->dobj.namespace->dobj.name,
2400  .owner = tbinfo->rolname,
2401  .description = "TABLE DATA",
2402  .section = SECTION_DATA,
2403  .copyStmt = copyStmt,
2404  .deps = &(tbinfo->dobj.dumpId),
2405  .nDeps = 1,
2406  .dumpFn = dumpFn,
2407  .dumpArg = tdinfo));
2408 
2409  /*
2410  * Set the TocEntry's dataLength in case we are doing a parallel dump
2411  * and want to order dump jobs by table size. We choose to measure
2412  * dataLength in table pages during dump, so no scaling is needed.
2413  * However, relpages is declared as "integer" in pg_class, and hence
2414  * also in TableInfo, but it's really BlockNumber a/k/a unsigned int.
2415  * Cast so that we get the right interpretation of table sizes
2416  * exceeding INT_MAX pages.
2417  */
2418  te->dataLength = (BlockNumber) tbinfo->relpages;
2419  }
2420 
2421  destroyPQExpBuffer(copyBuf);
2422  destroyPQExpBuffer(clistBuf);
2423 }
2424 
2425 /*
2426  * refreshMatViewData -
2427  * load or refresh the contents of a single materialized view
2428  *
2429  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2430  * statement.
2431  */
2432 static void
2434 {
2435  TableInfo *tbinfo = tdinfo->tdtable;
2436  PQExpBuffer q;
2437 
2438  /* If the materialized view is not flagged as populated, skip this. */
2439  if (!tbinfo->relispopulated)
2440  return;
2441 
2442  q = createPQExpBuffer();
2443 
2444  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2445  fmtQualifiedDumpable(tbinfo));
2446 
2447  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2448  ArchiveEntry(fout,
2449  tdinfo->dobj.catId, /* catalog ID */
2450  tdinfo->dobj.dumpId, /* dump ID */
2451  ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2452  .namespace = tbinfo->dobj.namespace->dobj.name,
2453  .owner = tbinfo->rolname,
2454  .description = "MATERIALIZED VIEW DATA",
2455  .section = SECTION_POST_DATA,
2456  .createStmt = q->data,
2457  .deps = tdinfo->dobj.dependencies,
2458  .nDeps = tdinfo->dobj.nDeps));
2459 
2460  destroyPQExpBuffer(q);
2461 }
2462 
2463 /*
2464  * getTableData -
2465  * set up dumpable objects representing the contents of tables
2466  */
2467 static void
2468 getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind)
2469 {
2470  int i;
2471 
2472  for (i = 0; i < numTables; i++)
2473  {
2474  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2475  (!relkind || tblinfo[i].relkind == relkind))
2476  makeTableDataInfo(dopt, &(tblinfo[i]));
2477  }
2478 }
2479 
2480 /*
2481  * Make a dumpable object for the data of this specific table
2482  *
2483  * Note: we make a TableDataInfo if and only if we are going to dump the
2484  * table data; the "dump" flag in such objects isn't used.
2485  */
2486 static void
2488 {
2489  TableDataInfo *tdinfo;
2490 
2491  /*
2492  * Nothing to do if we already decided to dump the table. This will
2493  * happen for "config" tables.
2494  */
2495  if (tbinfo->dataObj != NULL)
2496  return;
2497 
2498  /* Skip VIEWs (no data to dump) */
2499  if (tbinfo->relkind == RELKIND_VIEW)
2500  return;
2501  /* Skip FOREIGN TABLEs (no data to dump) unless requested explicitly */
2502  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
2503  (foreign_servers_include_oids.head == NULL ||
2504  !simple_oid_list_member(&foreign_servers_include_oids,
2505  tbinfo->foreign_server)))
2506  return;
2507  /* Skip partitioned tables (data in partitions) */
2508  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2509  return;
2510 
2511  /* Don't dump data in unlogged tables, if so requested */
2512  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2513  dopt->no_unlogged_table_data)
2514  return;
2515 
2516  /* Check that the data is not explicitly excluded */
2517  if (simple_oid_list_member(&tabledata_exclude_oids,
2518  tbinfo->dobj.catId.oid))
2519  return;
2520 
2521  /* OK, let's dump it */
2522  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2523 
2524  if (tbinfo->relkind == RELKIND_MATVIEW)
2525  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2526  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2527  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2528  else
2529  tdinfo->dobj.objType = DO_TABLE_DATA;
2530 
2531  /*
2532  * Note: use tableoid 0 so that this object won't be mistaken for
2533  * something that pg_depend entries apply to.
2534  */
2535  tdinfo->dobj.catId.tableoid = 0;
2536  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2537  AssignDumpId(&tdinfo->dobj);
2538  tdinfo->dobj.name = tbinfo->dobj.name;
2539  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2540  tdinfo->tdtable = tbinfo;
2541  tdinfo->filtercond = NULL; /* might get set later */
2542  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2543 
2544  tbinfo->dataObj = tdinfo;
2545 
2546  /* Make sure that we'll collect per-column info for this table. */
2547  tbinfo->interesting = true;
2548 }
2549 
2550 /*
2551  * The refresh for a materialized view must be dependent on the refresh for
2552  * any materialized view that this one is dependent on.
2553  *
2554  * This must be called after all the objects are created, but before they are
2555  * sorted.
2556  */
2557 static void
2559 {
2560  PQExpBuffer query;
2561  PGresult *res;
2562  int ntups,
2563  i;
2564  int i_classid,
2565  i_objid,
2566  i_refobjid;
2567 
2568  /* No Mat Views before 9.3. */
2569  if (fout->remoteVersion < 90300)
2570  return;
2571 
2572  query = createPQExpBuffer();
2573 
2574  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2575  "( "
2576  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2577  "FROM pg_depend d1 "
2578  "JOIN pg_class c1 ON c1.oid = d1.objid "
2579  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2580  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2581  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2582  "AND d2.objid = r1.oid "
2583  "AND d2.refobjid <> d1.objid "
2584  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2585  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2586  CppAsString2(RELKIND_VIEW) ") "
2587  "WHERE d1.classid = 'pg_class'::regclass "
2588  "UNION "
2589  "SELECT w.objid, d3.refobjid, c3.relkind "
2590  "FROM w "
2591  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2592  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2593  "AND d3.objid = r3.oid "
2594  "AND d3.refobjid <> w.refobjid "
2595  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2596  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2597  CppAsString2(RELKIND_VIEW) ") "
2598  ") "
2599  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2600  "FROM w "
2601  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2602 
2603  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2604 
2605  ntups = PQntuples(res);
2606 
2607  i_classid = PQfnumber(res, "classid");
2608  i_objid = PQfnumber(res, "objid");
2609  i_refobjid = PQfnumber(res, "refobjid");
2610 
2611  for (i = 0; i < ntups; i++)
2612  {
2613  CatalogId objId;
2614  CatalogId refobjId;
2615  DumpableObject *dobj;
2616  DumpableObject *refdobj;
2617  TableInfo *tbinfo;
2618  TableInfo *reftbinfo;
2619 
2620  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2621  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2622  refobjId.tableoid = objId.tableoid;
2623  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2624 
2625  dobj = findObjectByCatalogId(objId);
2626  if (dobj == NULL)
2627  continue;
2628 
2629  Assert(dobj->objType == DO_TABLE);
2630  tbinfo = (TableInfo *) dobj;
2631  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2632  dobj = (DumpableObject *) tbinfo->dataObj;
2633  if (dobj == NULL)
2634  continue;
2635  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2636 
2637  refdobj = findObjectByCatalogId(refobjId);
2638  if (refdobj == NULL)
2639  continue;
2640 
2641  Assert(refdobj->objType == DO_TABLE);
2642  reftbinfo = (TableInfo *) refdobj;
2643  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2644  refdobj = (DumpableObject *) reftbinfo->dataObj;
2645  if (refdobj == NULL)
2646  continue;
2647  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2648 
2649  addObjectDependency(dobj, refdobj->dumpId);
2650 
2651  if (!reftbinfo->relispopulated)
2652  tbinfo->relispopulated = false;
2653  }
2654 
2655  PQclear(res);
2656 
2657  destroyPQExpBuffer(query);
2658 }
2659 
2660 /*
2661  * getTableDataFKConstraints -
2662  * add dump-order dependencies reflecting foreign key constraints
2663  *
2664  * This code is executed only in a data-only dump --- in schema+data dumps
2665  * we handle foreign key issues by not creating the FK constraints until
2666  * after the data is loaded. In a data-only dump, however, we want to
2667  * order the table data objects in such a way that a table's referenced
2668  * tables are restored first. (In the presence of circular references or
2669  * self-references this may be impossible; we'll detect and complain about
2670  * that during the dependency sorting step.)
2671  */
2672 static void
2674 {
2675  DumpableObject **dobjs;
2676  int numObjs;
2677  int i;
2678 
2679  /* Search through all the dumpable objects for FK constraints */
2680  getDumpableObjects(&dobjs, &numObjs);
2681  for (i = 0; i < numObjs; i++)
2682  {
2683  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2684  {
2685  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2686  TableInfo *ftable;
2687 
2688  /* Not interesting unless both tables are to be dumped */
2689  if (cinfo->contable == NULL ||
2690  cinfo->contable->dataObj == NULL)
2691  continue;
2692  ftable = findTableByOid(cinfo->confrelid);
2693  if (ftable == NULL ||
2694  ftable->dataObj == NULL)
2695  continue;
2696 
2697  /*
2698  * Okay, make referencing table's TABLE_DATA object depend on the
2699  * referenced table's TABLE_DATA object.
2700  */
2702  ftable->dataObj->dobj.dumpId);
2703  }
2704  }
2705  free(dobjs);
2706 }
2707 
2708 
2709 /*
2710  * guessConstraintInheritance:
2711  * In pre-8.4 databases, we can't tell for certain which constraints
2712  * are inherited. We assume a CHECK constraint is inherited if its name
2713  * matches the name of any constraint in the parent. Originally this code
2714  * tried to compare the expression texts, but that can fail for various
2715  * reasons --- for example, if the parent and child tables are in different
2716  * schemas, reverse-listing of function calls may produce different text
2717  * (schema-qualified or not) depending on search path.
2718  *
2719  * In 8.4 and up we can rely on the conislocal field to decide which
2720  * constraints must be dumped; much safer.
2721  *
2722  * This function assumes all conislocal flags were initialized to true.
2723  * It clears the flag on anything that seems to be inherited.
2724  */
2725 static void
2727 {
2728  int i,
2729  j,
2730  k;
2731 
2732  for (i = 0; i < numTables; i++)
2733  {
2734  TableInfo *tbinfo = &(tblinfo[i]);
2735  int numParents;
2736  TableInfo **parents;
2737  TableInfo *parent;
2738 
2739  /* Sequences and views never have parents */
2740  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2741  tbinfo->relkind == RELKIND_VIEW)
2742  continue;
2743 
2744  /* Don't bother computing anything for non-target tables, either */
2745  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2746  continue;
2747 
2748  numParents = tbinfo->numParents;
2749  parents = tbinfo->parents;
2750 
2751  if (numParents == 0)
2752  continue; /* nothing to see here, move along */
2753 
2754  /* scan for inherited CHECK constraints */
2755  for (j = 0; j < tbinfo->ncheck; j++)
2756  {
2757  ConstraintInfo *constr;
2758 
2759  constr = &(tbinfo->checkexprs[j]);
2760 
2761  for (k = 0; k < numParents; k++)
2762  {
2763  int l;
2764 
2765  parent = parents[k];
2766  for (l = 0; l < parent->ncheck; l++)
2767  {
2768  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2769 
2770  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2771  {
2772  constr->conislocal = false;
2773  break;
2774  }
2775  }
2776  if (!constr->conislocal)
2777  break;
2778  }
2779  }
2780  }
2781 }
2782 
2783 
2784 /*
2785  * dumpDatabase:
2786  * dump the database definition
2787  */
2788 static void
2790 {
2791  DumpOptions *dopt = fout->dopt;
2792  PQExpBuffer dbQry = createPQExpBuffer();
2793  PQExpBuffer delQry = createPQExpBuffer();
2794  PQExpBuffer creaQry = createPQExpBuffer();
2795  PQExpBuffer labelq = createPQExpBuffer();
2796  PGconn *conn = GetConnection(fout);
2797  PGresult *res;
2798  int i_tableoid,
2799  i_oid,
2800  i_datname,
2801  i_dba,
2802  i_encoding,
2803  i_collate,
2804  i_ctype,
2805  i_frozenxid,
2806  i_minmxid,
2807  i_datacl,
2808  i_rdatacl,
2809  i_datistemplate,
2810  i_datconnlimit,
2811  i_tablespace;
2812  CatalogId dbCatId;
2813  DumpId dbDumpId;
2814  const char *datname,
2815  *dba,
2816  *encoding,
2817  *collate,
2818  *ctype,
2819  *datacl,
2820  *rdatacl,
2821  *datistemplate,
2822  *datconnlimit,
2823  *tablespace;
2824  uint32 frozenxid,
2825  minmxid;
2826  char *qdatname;
2827 
2828  pg_log_info("saving database definition");
2829 
2830  /*
2831  * Fetch the database-level properties for this database.
2832  *
2833  * The order in which privileges are in the ACL string (the order they
2834  * have been GRANT'd in, which the backend maintains) must be preserved to
2835  * ensure that GRANTs WITH GRANT OPTION and subsequent GRANTs based on
2836  * those are dumped in the correct order. Note that initial privileges
2837  * (pg_init_privs) are not supported on databases, so this logic cannot
2838  * make use of buildACLQueries().
2839  */
2840  if (fout->remoteVersion >= 90600)
2841  {
2842  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2843  "(%s datdba) AS dba, "
2844  "pg_encoding_to_char(encoding) AS encoding, "
2845  "datcollate, datctype, datfrozenxid, datminmxid, "
2846  "(SELECT array_agg(acl ORDER BY row_n) FROM "
2847  " (SELECT acl, row_n FROM "
2848  " unnest(coalesce(datacl,acldefault('d',datdba))) "
2849  " WITH ORDINALITY AS perm(acl,row_n) "
2850  " WHERE NOT EXISTS ( "
2851  " SELECT 1 "
2852  " FROM unnest(acldefault('d',datdba)) "
2853  " AS init(init_acl) "
2854  " WHERE acl = init_acl)) AS datacls) "
2855  " AS datacl, "
2856  "(SELECT array_agg(acl ORDER BY row_n) FROM "
2857  " (SELECT acl, row_n FROM "
2858  " unnest(acldefault('d',datdba)) "
2859  " WITH ORDINALITY AS initp(acl,row_n) "
2860  " WHERE NOT EXISTS ( "
2861  " SELECT 1 "
2862  " FROM unnest(coalesce(datacl,acldefault('d',datdba))) "
2863  " AS permp(orig_acl) "
2864  " WHERE acl = orig_acl)) AS rdatacls) "
2865  " AS rdatacl, "
2866  "datistemplate, datconnlimit, "
2867  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2868  "shobj_description(oid, 'pg_database') AS description "
2869 
2870  "FROM pg_database "
2871  "WHERE datname = current_database()",
2873  }
2874  else if (fout->remoteVersion >= 90300)
2875  {
2876  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2877  "(%s datdba) AS dba, "
2878  "pg_encoding_to_char(encoding) AS encoding, "
2879  "datcollate, datctype, datfrozenxid, datminmxid, "
2880  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2881  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2882  "shobj_description(oid, 'pg_database') AS description "
2883 
2884  "FROM pg_database "
2885  "WHERE datname = current_database()",
2887  }
2888  else if (fout->remoteVersion >= 80400)
2889  {
2890  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2891  "(%s datdba) AS dba, "
2892  "pg_encoding_to_char(encoding) AS encoding, "
2893  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2894  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2895  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2896  "shobj_description(oid, 'pg_database') AS description "
2897 
2898  "FROM pg_database "
2899  "WHERE datname = current_database()",
2901  }
2902  else if (fout->remoteVersion >= 80200)
2903  {
2904  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2905  "(%s datdba) AS dba, "
2906  "pg_encoding_to_char(encoding) AS encoding, "
2907  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2908  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2909  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2910  "shobj_description(oid, 'pg_database') AS description "
2911 
2912  "FROM pg_database "
2913  "WHERE datname = current_database()",
2915  }
2916  else
2917  {
2918  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2919  "(%s datdba) AS dba, "
2920  "pg_encoding_to_char(encoding) AS encoding, "
2921  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2922  "datacl, '' as rdatacl, datistemplate, "
2923  "-1 as datconnlimit, "
2924  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2925  "FROM pg_database "
2926  "WHERE datname = current_database()",
2928  }
2929 
2930  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2931 
2932  i_tableoid = PQfnumber(res, "tableoid");
2933  i_oid = PQfnumber(res, "oid");
2934  i_datname = PQfnumber(res, "datname");
2935  i_dba = PQfnumber(res, "dba");
2936  i_encoding = PQfnumber(res, "encoding");
2937  i_collate = PQfnumber(res, "datcollate");
2938  i_ctype = PQfnumber(res, "datctype");
2939  i_frozenxid = PQfnumber(res, "datfrozenxid");
2940  i_minmxid = PQfnumber(res, "datminmxid");
2941  i_datacl = PQfnumber(res, "datacl");
2942  i_rdatacl = PQfnumber(res, "rdatacl");
2943  i_datistemplate = PQfnumber(res, "datistemplate");
2944  i_datconnlimit = PQfnumber(res, "datconnlimit");
2945  i_tablespace = PQfnumber(res, "tablespace");
2946 
2947  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2948  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2949  datname = PQgetvalue(res, 0, i_datname);
2950  dba = PQgetvalue(res, 0, i_dba);
2951  encoding = PQgetvalue(res, 0, i_encoding);
2952  collate = PQgetvalue(res, 0, i_collate);
2953  ctype = PQgetvalue(res, 0, i_ctype);
2954  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2955  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2956  datacl = PQgetvalue(res, 0, i_datacl);
2957  rdatacl = PQgetvalue(res, 0, i_rdatacl);
2958  datistemplate = PQgetvalue(res, 0, i_datistemplate);
2959  datconnlimit = PQgetvalue(res, 0, i_datconnlimit);
2960  tablespace = PQgetvalue(res, 0, i_tablespace);
2961 
2962  qdatname = pg_strdup(fmtId(datname));
2963 
2964  /*
2965  * Prepare the CREATE DATABASE command. We must specify encoding, locale,
2966  * and tablespace since those can't be altered later. Other DB properties
2967  * are left to the DATABASE PROPERTIES entry, so that they can be applied
2968  * after reconnecting to the target DB.
2969  */
2970  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2971  qdatname);
2972  if (strlen(encoding) > 0)
2973  {
2974  appendPQExpBufferStr(creaQry, " ENCODING = ");
2975  appendStringLiteralAH(creaQry, encoding, fout);
2976  }
2977  if (strlen(collate) > 0 && strcmp(collate, ctype) == 0)
2978  {
2979  appendPQExpBufferStr(creaQry, " LOCALE = ");
2980  appendStringLiteralAH(creaQry, collate, fout);
2981  }
2982  else
2983  {
2984  if (strlen(collate) > 0)
2985  {
2986  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2987  appendStringLiteralAH(creaQry, collate, fout);
2988  }
2989  if (strlen(ctype) > 0)
2990  {
2991  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2992  appendStringLiteralAH(creaQry, ctype, fout);
2993  }
2994  }
2995 
2996  /*
2997  * Note: looking at dopt->outputNoTablespaces here is completely the wrong
2998  * thing; the decision whether to specify a tablespace should be left till
2999  * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
3000  * label the DATABASE entry with the tablespace and let the normal
3001  * tablespace selection logic work ... but CREATE DATABASE doesn't pay
3002  * attention to default_tablespace, so that won't work.
3003  */
3004  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
3005  !dopt->outputNoTablespaces)
3006  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
3007  fmtId(tablespace));
3008  appendPQExpBufferStr(creaQry, ";\n");
3009 
3010  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
3011  qdatname);
3012 
3013  dbDumpId = createDumpId();
3014 
3015  ArchiveEntry(fout,
3016  dbCatId, /* catalog ID */
3017  dbDumpId, /* dump ID */
3018  ARCHIVE_OPTS(.tag = datname,
3019  .owner = dba,
3020  .description = "DATABASE",
3021  .section = SECTION_PRE_DATA,
3022  .createStmt = creaQry->data,
3023  .dropStmt = delQry->data));
3024 
3025  /* Compute correct tag for archive entry */
3026  appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
3027 
3028  /* Dump DB comment if any */
3029  if (fout->remoteVersion >= 80200)
3030  {
3031  /*
3032  * 8.2 and up keep comments on shared objects in a shared table, so we
3033  * cannot use the dumpComment() code used for other database objects.
3034  * Be careful that the ArchiveEntry parameters match that function.
3035  */
3036  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
3037 
3038  if (comment && *comment && !dopt->no_comments)
3039  {
3040  resetPQExpBuffer(dbQry);
3041 
3042  /*
3043  * Generates warning when loaded into a differently-named
3044  * database.
3045  */
3046  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
3047  appendStringLiteralAH(dbQry, comment, fout);
3048  appendPQExpBufferStr(dbQry, ";\n");
3049 
3050  ArchiveEntry(fout, nilCatalogId, createDumpId(),
3051  ARCHIVE_OPTS(.tag = labelq->data,
3052  .owner = dba,
3053  .description = "COMMENT",
3054  .section = SECTION_NONE,
3055  .createStmt = dbQry->data,
3056  .deps = &dbDumpId,
3057  .nDeps = 1));
3058  }
3059  }
3060  else
3061  {
3062  dumpComment(fout, "DATABASE", qdatname, NULL, dba,
3063  dbCatId, 0, dbDumpId);
3064  }
3065 
3066  /* Dump DB security label, if enabled */
3067  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
3068  {
3069  PGresult *shres;
3070  PQExpBuffer seclabelQry;
3071 
3072  seclabelQry = createPQExpBuffer();
3073 
3074  buildShSecLabelQuery("pg_database", dbCatId.oid, seclabelQry);
3075  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
3076  resetPQExpBuffer(seclabelQry);
3077  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
3078  if (seclabelQry->len > 0)
3079  ArchiveEntry(fout, nilCatalogId, createDumpId(),
3080  ARCHIVE_OPTS(.tag = labelq->data,
3081  .owner = dba,
3082  .description = "SECURITY LABEL",
3083  .section = SECTION_NONE,
3084  .createStmt = seclabelQry->data,
3085  .deps = &dbDumpId,
3086  .nDeps = 1));
3087  destroyPQExpBuffer(seclabelQry);
3088  PQclear(shres);
3089  }
3090 
3091  /*
3092  * Dump ACL if any. Note that we do not support initial privileges
3093  * (pg_init_privs) on databases.
3094  */
3095  dumpACL(fout, dbDumpId, InvalidDumpId, "DATABASE",
3096  qdatname, NULL, NULL,
3097  dba, datacl, rdatacl, "", "");
3098 
3099  /*
3100  * Now construct a DATABASE PROPERTIES archive entry to restore any
3101  * non-default database-level properties. (The reason this must be
3102  * separate is that we cannot put any additional commands into the TOC
3103  * entry that has CREATE DATABASE. pg_restore would execute such a group
3104  * in an implicit transaction block, and the backend won't allow CREATE
3105  * DATABASE in that context.)
3106  */
3107  resetPQExpBuffer(creaQry);
3108  resetPQExpBuffer(delQry);
3109 
3110  if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
3111  appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
3112  qdatname, datconnlimit);
3113 
3114  if (strcmp(datistemplate, "t") == 0)
3115  {
3116  appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
3117  qdatname);
3118 
3119  /*
3120  * The backend won't accept DROP DATABASE on a template database. We
3121  * can deal with that by removing the template marking before the DROP
3122  * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
3123  * since no such command is currently supported, fake it with a direct
3124  * UPDATE on pg_database.
3125  */
3126  appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
3127  "SET datistemplate = false WHERE datname = ");
3128  appendStringLiteralAH(delQry, datname, fout);
3129  appendPQExpBufferStr(delQry, ";\n");
3130  }
3131 
3132  /* Add database-specific SET options */
3133  dumpDatabaseConfig(fout, creaQry, datname, dbCatId.oid);
3134 
3135  /*
3136  * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
3137  * entry, too, for lack of a better place.
3138  */
3139  if (dopt->binary_upgrade)
3140  {
3141  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
3142  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
3143  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
3144  "WHERE datname = ",
3145  frozenxid, minmxid);
3146  appendStringLiteralAH(creaQry, datname, fout);
3147  appendPQExpBufferStr(creaQry, ";\n");
3148  }
3149 
3150  if (creaQry->len > 0)
3151  ArchiveEntry(fout, nilCatalogId, createDumpId(),
3152  ARCHIVE_OPTS(.tag = datname,
3153  .owner = dba,
3154  .description = "DATABASE PROPERTIES",
3155  .section = SECTION_PRE_DATA,
3156  .createStmt = creaQry->data,
3157  .dropStmt = delQry->data,
3158  .deps = &dbDumpId));
3159 
3160  /*
3161  * pg_largeobject comes from the old system intact, so set its
3162  * relfrozenxids and relminmxids.
3163  */
3164  if (dopt->binary_upgrade)
3165  {
3166  PGresult *lo_res;
3167  PQExpBuffer loFrozenQry = createPQExpBuffer();
3168  PQExpBuffer loOutQry = createPQExpBuffer();
3169  int i_relfrozenxid,
3170  i_relminmxid;
3171 
3172  /*
3173  * pg_largeobject
3174  */
3175  if (fout->remoteVersion >= 90300)
3176  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
3177  "FROM pg_catalog.pg_class\n"
3178  "WHERE oid = %u;\n",
3179  LargeObjectRelationId);
3180  else
3181  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
3182  "FROM pg_catalog.pg_class\n"
3183  "WHERE oid = %u;\n",
3184  LargeObjectRelationId);
3185 
3186  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
3187 
3188  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
3189  i_relminmxid = PQfnumber(lo_res, "relminmxid");
3190 
3191  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
3192  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
3193  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
3194  "WHERE oid = %u;\n",
3195  atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
3196  atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
3197  LargeObjectRelationId);
3198  ArchiveEntry(fout, nilCatalogId, createDumpId(),
3199  ARCHIVE_OPTS(.tag = "pg_largeobject",
3200  .description = "pg_largeobject",
3201  .section = SECTION_PRE_DATA,
3202  .createStmt = loOutQry->data));
3203 
3204  PQclear(lo_res);
3205 
3206  destroyPQExpBuffer(loFrozenQry);
3207  destroyPQExpBuffer(loOutQry);
3208  }
3209 
3210  PQclear(res);
3211 
3212  free(qdatname);
3213  destroyPQExpBuffer(dbQry);
3214  destroyPQExpBuffer(delQry);
3215  destroyPQExpBuffer(creaQry);
3216  destroyPQExpBuffer(labelq);
3217 }
3218 
3219 /*
3220  * Collect any database-specific or role-and-database-specific SET options
3221  * for this database, and append them to outbuf.
3222  */
3223 static void
3225  const char *dbname, Oid dboid)
3226 {
3227  PGconn *conn = GetConnection(AH);
3229  PGresult *res;
3230  int count = 1;
3231 
3232  /*
3233  * First collect database-specific options. Pre-8.4 server versions lack
3234  * unnest(), so we do this the hard way by querying once per subscript.
3235  */
3236  for (;;)
3237  {
3238  if (AH->remoteVersion >= 90000)
3239  printfPQExpBuffer(buf, "SELECT setconfig[%d] FROM pg_db_role_setting "
3240  "WHERE setrole = 0 AND setdatabase = '%u'::oid",
3241  count, dboid);
3242  else
3243  printfPQExpBuffer(buf, "SELECT datconfig[%d] FROM pg_database WHERE oid = '%u'::oid", count, dboid);
3244 
3245  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3246 
3247  if (PQntuples(res) == 1 &&
3248  !PQgetisnull(res, 0, 0))
3249  {
3250  makeAlterConfigCommand(conn, PQgetvalue(res, 0, 0),
3251  "DATABASE", dbname, NULL, NULL,
3252  outbuf);
3253  PQclear(res);
3254  count++;
3255  }
3256  else
3257  {
3258  PQclear(res);
3259  break;
3260  }
3261  }
3262 
3263  /* Now look for role-and-database-specific options */
3264  if (AH->remoteVersion >= 90000)
3265  {
3266  /* Here we can assume we have unnest() */
3267  printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
3268  "FROM pg_db_role_setting s, pg_roles r "
3269  "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
3270  dboid);
3271 
3272  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3273 
3274  if (PQntuples(res) > 0)
3275  {
3276  int i;
3277 
3278  for (i = 0; i < PQntuples(res); i++)
3279  makeAlterConfigCommand(conn, PQgetvalue(res, i, 1),
3280  "ROLE", PQgetvalue(res, i, 0),
3281  "DATABASE", dbname,
3282  outbuf);
3283  }
3284 
3285  PQclear(res);
3286  }
3287 
3288  destroyPQExpBuffer(buf);
3289 }
3290 
3291 /*
3292  * dumpEncoding: put the correct encoding into the archive
3293  */
3294 static void
3296 {
3297  const char *encname = pg_encoding_to_char(AH->encoding);
3299 
3300  pg_log_info("saving encoding = %s", encname);
3301 
3302  appendPQExpBufferStr(qry, "SET client_encoding = ");
3303  appendStringLiteralAH(qry, encname, AH);
3304  appendPQExpBufferStr(qry, ";\n");
3305 
3306  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3307  ARCHIVE_OPTS(.tag = "ENCODING",
3308  .description = "ENCODING",
3309  .section = SECTION_PRE_DATA,
3310  .createStmt = qry->data));
3311 
3312  destroyPQExpBuffer(qry);
3313 }
3314 
3315 
3316 /*
3317  * dumpStdStrings: put the correct escape string behavior into the archive
3318  */
3319 static void
3321 {
3322  const char *stdstrings = AH->std_strings ? "on" : "off";
3324 
3325  pg_log_info("saving standard_conforming_strings = %s",
3326  stdstrings);
3327 
3328  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3329  stdstrings);
3330 
3331  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3332  ARCHIVE_OPTS(.tag = "STDSTRINGS",
3333  .description = "STDSTRINGS",
3334  .section = SECTION_PRE_DATA,
3335  .createStmt = qry->data));
3336 
3337  destroyPQExpBuffer(qry);
3338 }
3339 
3340 /*
3341  * dumpSearchPath: record the active search_path in the archive
3342  */
3343 static void
3345 {
3347  PQExpBuffer path = createPQExpBuffer();
3348  PGresult *res;
3349  char **schemanames = NULL;
3350  int nschemanames = 0;
3351  int i;
3352 
3353  /*
3354  * We use the result of current_schemas(), not the search_path GUC,
3355  * because that might contain wildcards such as "$user", which won't
3356  * necessarily have the same value during restore. Also, this way avoids
3357  * listing schemas that may appear in search_path but not actually exist,
3358  * which seems like a prudent exclusion.
3359  */
3360  res = ExecuteSqlQueryForSingleRow(AH,
3361  "SELECT pg_catalog.current_schemas(false)");
3362 
3363  if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3364  fatal("could not parse result of current_schemas()");
3365 
3366  /*
3367  * We use set_config(), not a simple "SET search_path" command, because
3368  * the latter has less-clean behavior if the search path is empty. While
3369  * that's likely to get fixed at some point, it seems like a good idea to
3370  * be as backwards-compatible as possible in what we put into archives.
3371  */
3372  for (i = 0; i < nschemanames; i++)
3373  {
3374  if (i > 0)
3375  appendPQExpBufferStr(path, ", ");
3376  appendPQExpBufferStr(path, fmtId(schemanames[i]));
3377  }
3378 
3379  appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3380  appendStringLiteralAH(qry, path->data, AH);
3381  appendPQExpBufferStr(qry, ", false);\n");
3382 
3383  pg_log_info("saving search_path = %s", path->data);
3384 
3385  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3386  ARCHIVE_OPTS(.tag = "SEARCHPATH",
3387  .description = "SEARCHPATH",
3388  .section = SECTION_PRE_DATA,
3389  .createStmt = qry->data));
3390 
3391  /* Also save it in AH->searchpath, in case we're doing plain text dump */
3392  AH->searchpath = pg_strdup(qry->data);
3393 
3394  if (schemanames)
3395  free(schemanames);
3396  PQclear(res);
3397  destroyPQExpBuffer(qry);
3398  destroyPQExpBuffer(path);
3399 }
3400 
3401 /*
3402  * dumpToastCompression: save the dump-time default TOAST compression in the
3403  * archive
3404  */
3405 static void
3407 {
3408  char *toast_compression;
3409  PQExpBuffer qry;
3410 
3411  if (AH->dopt->no_toast_compression)
3412  {
3413  /* we don't intend to dump the info, so no need to fetch it either */
3414  return;
3415  }
3416 
3417  if (AH->remoteVersion < 140000)
3418  {
3419  /* pre-v14, the only method was pglz */
3420  toast_compression = pg_strdup("pglz");
3421  }
3422  else
3423  {
3424  PGresult *res;
3425 
3426  res = ExecuteSqlQueryForSingleRow(AH, "SHOW default_toast_compression");
3427  toast_compression = pg_strdup(PQgetvalue(res, 0, 0));
3428  PQclear(res);
3429  }
3430 
3431  qry = createPQExpBuffer();
3432  appendPQExpBufferStr(qry, "SET default_toast_compression = ");
3433  appendStringLiteralAH(qry, toast_compression, AH);
3434  appendPQExpBufferStr(qry, ";\n");
3435 
3436  pg_log_info("saving default_toast_compression = %s", toast_compression);
3437 
3438  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3439  ARCHIVE_OPTS(.tag = "TOASTCOMPRESSION",
3440  .description = "TOASTCOMPRESSION",
3441  .section = SECTION_PRE_DATA,
3442  .createStmt = qry->data));
3443 
3444  /*
3445  * Also save it in AH->default_toast_compression, in case we're doing
3446  * plain text dump.
3447  */
3448  AH->default_toast_compression = toast_compression;
3449 
3450  destroyPQExpBuffer(qry);
3451 }
3452 
3453 
3454 /*
3455  * getBlobs:
3456  * Collect schema-level data about large objects
3457  */
3458 static void
3460 {
3461  DumpOptions *dopt = fout->dopt;
3462  PQExpBuffer blobQry = createPQExpBuffer();
3463  BlobInfo *binfo;
3464  DumpableObject *bdata;
3465  PGresult *res;
3466  int ntups;
3467  int i;
3468  int i_oid;
3469  int i_lomowner;
3470  int i_lomacl;
3471  int i_rlomacl;
3472  int i_initlomacl;
3473  int i_initrlomacl;
3474 
3475  pg_log_info("reading large objects");
3476 
3477  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
3478  if (fout->remoteVersion >= 90600)
3479  {
3480  PQExpBuffer acl_subquery = createPQExpBuffer();
3481  PQExpBuffer racl_subquery = createPQExpBuffer();
3482  PQExpBuffer init_acl_subquery = createPQExpBuffer();
3483  PQExpBuffer init_racl_subquery = createPQExpBuffer();
3484 
3485  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
3486  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
3487  dopt->binary_upgrade);
3488 
3489  appendPQExpBuffer(blobQry,
3490  "SELECT l.oid, (%s l.lomowner) AS rolname, "
3491  "%s AS lomacl, "
3492  "%s AS rlomacl, "
3493  "%s AS initlomacl, "
3494  "%s AS initrlomacl "
3495  "FROM pg_largeobject_metadata l "
3496  "LEFT JOIN pg_init_privs pip ON "
3497  "(l.oid = pip.objoid "
3498  "AND pip.classoid = 'pg_largeobject'::regclass "
3499  "AND pip.objsubid = 0) ",
3501  acl_subquery->data,
3502  racl_subquery->data,
3503  init_acl_subquery->data,
3504  init_racl_subquery->data);
3505 
3506  destroyPQExpBuffer(acl_subquery);
3507  destroyPQExpBuffer(racl_subquery);
3508  destroyPQExpBuffer(init_acl_subquery);
3509  destroyPQExpBuffer(init_racl_subquery);
3510  }
3511  else if (fout->remoteVersion >= 90000)
3512  appendPQExpBuffer(blobQry,
3513  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
3514  "NULL AS rlomacl, NULL AS initlomacl, "
3515  "NULL AS initrlomacl "
3516  " FROM pg_largeobject_metadata",
3518  else
3519  appendPQExpBufferStr(blobQry,
3520  "SELECT DISTINCT loid AS oid, "
3521  "NULL::name AS rolname, NULL::oid AS lomacl, "
3522  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
3523  "NULL::oid AS initrlomacl "
3524  " FROM pg_largeobject");
3525 
3526  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
3527 
3528  i_oid = PQfnumber(res, "oid");
3529  i_lomowner = PQfnumber(res, "rolname");
3530  i_lomacl = PQfnumber(res, "lomacl");
3531  i_rlomacl = PQfnumber(res, "rlomacl");
3532  i_initlomacl = PQfnumber(res, "initlomacl");
3533  i_initrlomacl = PQfnumber(res, "initrlomacl");
3534 
3535  ntups = PQntuples(res);
3536 
3537  /*
3538  * Each large object has its own BLOB archive entry.
3539  */
3540  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
3541 
3542  for (i = 0; i < ntups; i++)
3543  {
3544  binfo[i].dobj.objType = DO_BLOB;
3545  binfo[i].dobj.catId.tableoid = LargeObjectRelationId;
3546  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3547  AssignDumpId(&binfo[i].dobj);
3548 
3549  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
3550  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
3551  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
3552  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
3553  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
3554  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
3555 
3556  if (PQgetisnull(res, i, i_lomacl) &&
3557  PQgetisnull(res, i, i_rlomacl) &&
3558  PQgetisnull(res, i, i_initlomacl) &&
3559  PQgetisnull(res, i, i_initrlomacl))
3560  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
3561 
3562  /*
3563  * In binary-upgrade mode for blobs, we do *not* dump out the blob
3564  * data, as it will be copied by pg_upgrade, which simply copies the
3565  * pg_largeobject table. We *do* however dump out anything but the
3566  * data, as pg_upgrade copies just pg_largeobject, but not
3567  * pg_largeobject_metadata, after the dump is restored.
3568  */
3569  if (dopt->binary_upgrade)
3570  binfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
3571  }
3572 
3573  /*
3574  * If we have any large objects, a "BLOBS" archive entry is needed. This
3575  * is just a placeholder for sorting; it carries no data now.
3576  */
3577  if (ntups > 0)
3578  {
3579  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
3580  bdata->objType = DO_BLOB_DATA;
3581  bdata->catId = nilCatalogId;
3582  AssignDumpId(bdata);
3583  bdata->name = pg_strdup("BLOBS");
3584  }
3585 
3586  PQclear(res);
3587  destroyPQExpBuffer(blobQry);
3588 }
3589 
3590 /*
3591  * dumpBlob
3592  *
3593  * dump the definition (metadata) of the given large object
3594  */
3595 static void
3596 dumpBlob(Archive *fout, const BlobInfo *binfo)
3597 {
3598  PQExpBuffer cquery = createPQExpBuffer();
3599  PQExpBuffer dquery = createPQExpBuffer();
3600 
3601  appendPQExpBuffer(cquery,
3602  "SELECT pg_catalog.lo_create('%s');\n",
3603  binfo->dobj.name);
3604 
3605  appendPQExpBuffer(dquery,
3606  "SELECT pg_catalog.lo_unlink('%s');\n",
3607  binfo->dobj.name);
3608 
3609  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
3610  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
3611  ARCHIVE_OPTS(.tag = binfo->dobj.name,
3612  .owner = binfo->rolname,
3613  .description = "BLOB",
3614  .section = SECTION_PRE_DATA,
3615  .createStmt = cquery->data,
3616  .dropStmt = dquery->data));
3617 
3618  /* Dump comment if any */
3619  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3620  dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
3621  NULL, binfo->rolname,
3622  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3623 
3624  /* Dump security label if any */
3625  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3626  dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
3627  NULL, binfo->rolname,
3628  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3629 
3630  /* Dump ACL if any */
3631  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3632  dumpACL(fout, binfo->dobj.dumpId, InvalidDumpId, "LARGE OBJECT",
3633  binfo->dobj.name, NULL,
3634  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3635  binfo->initblobacl, binfo->initrblobacl);
3636 
3637  destroyPQExpBuffer(cquery);
3638  destroyPQExpBuffer(dquery);
3639 }
3640 
3641 /*
3642  * dumpBlobs:
3643  * dump the data contents of all large objects
3644  */
3645 static int
3646 dumpBlobs(Archive *fout, const void *arg)
3647 {
3648  const char *blobQry;
3649  const char *blobFetchQry;
3650  PGconn *conn = GetConnection(fout);
3651  PGresult *res;
3652  char buf[LOBBUFSIZE];
3653  int ntups;
3654  int i;
3655  int cnt;
3656 
3657  pg_log_info("saving large objects");
3658 
3659  /*
3660  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3661  * the already-in-memory dumpable objects instead...
3662  */
3663  if (fout->remoteVersion >= 90000)
3664  blobQry =
3665  "DECLARE bloboid CURSOR FOR "
3666  "SELECT oid FROM pg_largeobject_metadata ORDER BY 1";
3667  else
3668  blobQry =
3669  "DECLARE bloboid CURSOR FOR "
3670  "SELECT DISTINCT loid FROM pg_largeobject ORDER BY 1";
3671 
3672  ExecuteSqlStatement(fout, blobQry);
3673 
3674  /* Command to fetch from cursor */
3675  blobFetchQry = "FETCH 1000 IN bloboid";
3676 
3677  do
3678  {
3679  /* Do a fetch */
3680  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3681 
3682  /* Process the tuples, if any */
3683  ntups = PQntuples(res);
3684  for (i = 0; i < ntups; i++)
3685  {
3686  Oid blobOid;
3687  int loFd;
3688 
3689  blobOid = atooid(PQgetvalue(res, i, 0));
3690  /* Open the BLOB */
3691  loFd = lo_open(conn, blobOid, INV_READ);
3692  if (loFd == -1)
3693  fatal("could not open large object %u: %s",
3694  blobOid, PQerrorMessage(conn));
3695 
3696  StartBlob(fout, blobOid);
3697 
3698  /* Now read it in chunks, sending data to archive */
3699  do
3700  {
3701  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3702  if (cnt < 0)
3703  fatal("error reading large object %u: %s",
3704  blobOid, PQerrorMessage(conn));
3705 
3706  WriteData(fout, buf, cnt);
3707  } while (cnt > 0);
3708 
3709  lo_close(conn, loFd);
3710 
3711  EndBlob(fout, blobOid);
3712  }
3713 
3714  PQclear(res);
3715  } while (ntups > 0);
3716 
3717  return 1;
3718 }
3719 
3720 /*
3721  * getPolicies
3722  * get information about policies on a dumpable table.
3723  */
3724 void
3725 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3726 {
3727  PQExpBuffer query;
3728  PGresult *res;
3729  PolicyInfo *polinfo;
3730  int i_oid;
3731  int i_tableoid;
3732  int i_polname;
3733  int i_polcmd;
3734  int i_polpermissive;
3735  int i_polroles;
3736  int i_polqual;
3737  int i_polwithcheck;
3738  int i,
3739  j,
3740  ntups;
3741 
3742  if (fout->remoteVersion < 90500)
3743  return;
3744 
3745  query = createPQExpBuffer();
3746 
3747  for (i = 0; i < numTables; i++)
3748  {
3749  TableInfo *tbinfo = &tblinfo[i];
3750 
3751  /* Ignore row security on tables not to be dumped */
3752  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3753  continue;
3754 
3755  pg_log_info("reading row security enabled for table \"%s.%s\"",
3756  tbinfo->dobj.namespace->dobj.name,
3757  tbinfo->dobj.name);
3758 
3759  /*
3760  * Get row security enabled information for the table. We represent
3761  * RLS being enabled on a table by creating a PolicyInfo object with
3762  * null polname.
3763  */
3764  if (tbinfo->rowsec)
3765  {
3766  /*
3767  * Note: use tableoid 0 so that this object won't be mistaken for
3768  * something that pg_depend entries apply to.
3769  */
3770  polinfo = pg_malloc(sizeof(PolicyInfo));
3771  polinfo->dobj.objType = DO_POLICY;
3772  polinfo->dobj.catId.tableoid = 0;
3773  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3774  AssignDumpId(&polinfo->dobj);
3775  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3776  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3777  polinfo->poltable = tbinfo;
3778  polinfo->polname = NULL;
3779  polinfo->polcmd = '\0';
3780  polinfo->polpermissive = 0;
3781  polinfo->polroles = NULL;
3782  polinfo->polqual = NULL;
3783  polinfo->polwithcheck = NULL;
3784  }
3785 
3786  pg_log_info("reading policies for table \"%s.%s\"",
3787  tbinfo->dobj.namespace->dobj.name,
3788  tbinfo->dobj.name);
3789 
3790  resetPQExpBuffer(query);
3791 
3792  /* Get the policies for the table. */
3793  if (fout->remoteVersion >= 100000)
3794  appendPQExpBuffer(query,
3795  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3796  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3797  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3798  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3799  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3800  "FROM pg_catalog.pg_policy pol "
3801  "WHERE polrelid = '%u'",
3802  tbinfo->dobj.catId.oid);
3803  else
3804  appendPQExpBuffer(query,
3805  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3806  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3807  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3808  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3809  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3810  "FROM pg_catalog.pg_policy pol "
3811  "WHERE polrelid = '%u'",
3812  tbinfo->dobj.catId.oid);
3813  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3814 
3815  ntups = PQntuples(res);
3816 
3817  if (ntups == 0)
3818  {
3819  /*
3820  * No explicit policies to handle (only the default-deny policy,
3821  * which is handled as part of the table definition). Clean up
3822  * and return.
3823  */
3824  PQclear(res);
3825  continue;
3826  }
3827 
3828  i_oid = PQfnumber(res, "oid");
3829  i_tableoid = PQfnumber(res, "tableoid");
3830  i_polname = PQfnumber(res, "polname");
3831  i_polcmd = PQfnumber(res, "polcmd");
3832  i_polpermissive = PQfnumber(res, "polpermissive");
3833  i_polroles = PQfnumber(res, "polroles");
3834  i_polqual = PQfnumber(res, "polqual");
3835  i_polwithcheck = PQfnumber(res, "polwithcheck");
3836 
3837  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3838 
3839  for (j = 0; j < ntups; j++)
3840  {
3841  polinfo[j].dobj.objType = DO_POLICY;
3842  polinfo[j].dobj.catId.tableoid =
3843  atooid(PQgetvalue(res, j, i_tableoid));
3844  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3845  AssignDumpId(&polinfo[j].dobj);
3846  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3847  polinfo[j].poltable = tbinfo;
3848  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3849  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3850 
3851  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3852  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3853 
3854  if (PQgetisnull(res, j, i_polroles))
3855  polinfo[j].polroles = NULL;
3856  else
3857  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3858 
3859  if (PQgetisnull(res, j, i_polqual))
3860  polinfo[j].polqual = NULL;
3861  else
3862  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3863 
3864  if (PQgetisnull(res, j, i_polwithcheck))
3865  polinfo[j].polwithcheck = NULL;
3866  else
3867  polinfo[j].polwithcheck
3868  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3869  }
3870  PQclear(res);
3871  }
3872  destroyPQExpBuffer(query);
3873 }
3874 
3875 /*
3876  * dumpPolicy
3877  * dump the definition of the given policy
3878  */
3879 static void
3880 dumpPolicy(Archive *fout, const PolicyInfo *polinfo)
3881 {
3882  DumpOptions *dopt = fout->dopt;
3883  TableInfo *tbinfo = polinfo->poltable;
3884  PQExpBuffer query;
3885  PQExpBuffer delqry;
3886  PQExpBuffer polprefix;
3887  char *qtabname;
3888  const char *cmd;
3889  char *tag;
3890 
3891  if (dopt->dataOnly)
3892  return;
3893 
3894  /*
3895  * If polname is NULL, then this record is just indicating that ROW LEVEL
3896  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3897  * ROW LEVEL SECURITY.
3898  */
3899  if (polinfo->polname == NULL)
3900  {
3901  query = createPQExpBuffer();
3902 
3903  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3904  fmtQualifiedDumpable(tbinfo));
3905 
3906  /*
3907  * We must emit the ROW SECURITY object's dependency on its table
3908  * explicitly, because it will not match anything in pg_depend (unlike
3909  * the case for other PolicyInfo objects).
3910  */
3911  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3912  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3913  ARCHIVE_OPTS(.tag = polinfo->dobj.name,
3914  .namespace = polinfo->dobj.namespace->dobj.name,
3915  .owner = tbinfo->rolname,
3916  .description = "ROW SECURITY",
3917  .section = SECTION_POST_DATA,
3918  .createStmt = query->data,
3919  .deps = &(tbinfo->dobj.dumpId),
3920  .nDeps = 1));
3921 
3922  destroyPQExpBuffer(query);
3923  return;
3924  }
3925 
3926  if (polinfo->polcmd == '*')
3927  cmd = "";
3928  else if (polinfo->polcmd == 'r')
3929  cmd = " FOR SELECT";
3930  else if (polinfo->polcmd == 'a')
3931  cmd = " FOR INSERT";
3932  else if (polinfo->polcmd == 'w')
3933  cmd = " FOR UPDATE";
3934  else if (polinfo->polcmd == 'd')
3935  cmd = " FOR DELETE";
3936  else
3937  {
3938  pg_log_error("unexpected policy command type: %c",
3939  polinfo->polcmd);
3940  exit_nicely(1);
3941  }
3942 
3943  query = createPQExpBuffer();
3944  delqry = createPQExpBuffer();
3945  polprefix = createPQExpBuffer();
3946 
3947  qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
3948 
3949  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3950 
3951  appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
3952  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3953 
3954  if (polinfo->polroles != NULL)
3955  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3956 
3957  if (polinfo->polqual != NULL)
3958  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3959 
3960  if (polinfo->polwithcheck != NULL)
3961  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3962 
3963  appendPQExpBufferStr(query, ";\n");
3964 
3965  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3966  appendPQExpBuffer(delqry, " ON %s;\n", fmtQualifiedDumpable(tbinfo));
3967 
3968  appendPQExpBuffer(polprefix, "POLICY %s ON",
3969  fmtId(polinfo->polname));
3970 
3971  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3972 
3973  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3974  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3975  ARCHIVE_OPTS(.tag = tag,
3976  .namespace = polinfo->dobj.namespace->dobj.name,
3977  .owner = tbinfo->rolname,
3978  .description = "POLICY",
3979  .section = SECTION_POST_DATA,
3980  .createStmt = query->data,
3981  .dropStmt = delqry->data));
3982 
3983  if (polinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3984  dumpComment(fout, polprefix->data, qtabname,
3985  tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
3986  polinfo->dobj.catId, 0, polinfo->dobj.dumpId);
3987 
3988  free(tag);
3989  destroyPQExpBuffer(query);
3990  destroyPQExpBuffer(delqry);
3991  destroyPQExpBuffer(polprefix);
3992  free(qtabname);
3993 }
3994 
3995 /*
3996  * getPublications
3997  * get information about publications
3998  */
4001 {
4002  DumpOptions *dopt = fout->dopt;
4003  PQExpBuffer query;
4004  PGresult *res;
4005  PublicationInfo *pubinfo;
4006  int i_tableoid;
4007  int i_oid;
4008  int i_pubname;
4009  int i_rolname;
4010  int i_puballtables;
4011  int i_pubinsert;
4012  int i_pubupdate;
4013  int i_pubdelete;
4014  int i_pubtruncate;
4015  int i_pubviaroot;
4016  int i,
4017  ntups;
4018 
4019  if (dopt->no_publications || fout->remoteVersion < 100000)
4020  {
4021  *numPublications = 0;
4022  return NULL;
4023  }
4024 
4025  query = createPQExpBuffer();
4026 
4027  resetPQExpBuffer(query);
4028 
4029  /* Get the publications. */
4030  if (fout->remoteVersion >= 130000)
4031  appendPQExpBuffer(query,
4032  "SELECT p.tableoid, p.oid, p.pubname, "
4033  "(%s p.pubowner) AS rolname, "
4034  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, p.pubviaroot "
4035  "FROM pg_publication p",
4037  else if (fout->remoteVersion >= 110000)
4038  appendPQExpBuffer(query,
4039  "SELECT p.tableoid, p.oid, p.pubname, "
4040  "(%s p.pubowner) AS rolname, "
4041  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, false AS pubviaroot "
4042  "FROM pg_publication p",
4044  else
4045  appendPQExpBuffer(query,
4046  "SELECT p.tableoid, p.oid, p.pubname, "
4047  "(%s p.pubowner) AS rolname, "
4048  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, false AS pubtruncate, false AS pubviaroot "
4049  "FROM pg_publication p",
4051 
4052  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4053 
4054  ntups = PQntuples(res);
4055 
4056  i_tableoid = PQfnumber(res, "tableoid");
4057  i_oid = PQfnumber(res, "oid");
4058  i_pubname = PQfnumber(res, "pubname");
4059  i_rolname = PQfnumber(res, "rolname");
4060  i_puballtables = PQfnumber(res, "puballtables");
4061  i_pubinsert = PQfnumber(res, "pubinsert");
4062  i_pubupdate = PQfnumber(res, "pubupdate");
4063  i_pubdelete = PQfnumber(res, "pubdelete");
4064  i_pubtruncate = PQfnumber(res, "pubtruncate");
4065  i_pubviaroot = PQfnumber(res, "pubviaroot");
4066 
4067  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
4068 
4069  for (i = 0; i < ntups; i++)
4070  {
4071  pubinfo[i].dobj.objType = DO_PUBLICATION;
4072  pubinfo[i].dobj.catId.tableoid =
4073  atooid(PQgetvalue(res, i, i_tableoid));
4074  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4075  AssignDumpId(&pubinfo[i].dobj);
4076  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
4077  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4078  pubinfo[i].puballtables =
4079  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
4080  pubinfo[i].pubinsert =
4081  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
4082  pubinfo[i].pubupdate =
4083  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
4084  pubinfo[i].pubdelete =
4085  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
4086  pubinfo[i].pubtruncate =
4087  (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
4088  pubinfo[i].pubviaroot =
4089  (strcmp(PQgetvalue(res, i, i_pubviaroot), "t") == 0);
4090 
4091  if (strlen(pubinfo[i].rolname) == 0)
4092  pg_log_warning("owner of publication \"%s\" appears to be invalid",
4093  pubinfo[i].dobj.name);
4094 
4095  /* Decide whether we want to dump it */
4096  selectDumpableObject(&(pubinfo[i].dobj), fout);
4097  }
4098  PQclear(res);
4099 
4100  destroyPQExpBuffer(query);
4101 
4102  *numPublications = ntups;
4103  return pubinfo;
4104 }
4105 
4106 /*
4107  * dumpPublication
4108  * dump the definition of the given publication
4109  */
4110 static void
4112 {
4113  PQExpBuffer delq;
4114  PQExpBuffer query;
4115  char *qpubname;
4116  bool first = true;
4117 
4118  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4119  return;
4120 
4121  delq = createPQExpBuffer();
4122  query = createPQExpBuffer();
4123 
4124  qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
4125 
4126  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
4127  qpubname);
4128 
4129  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
4130  qpubname);
4131 
4132  if (pubinfo->puballtables)
4133  appendPQExpBufferStr(query, " FOR ALL TABLES");
4134 
4135  appendPQExpBufferStr(query, " WITH (publish = '");
4136  if (pubinfo->pubinsert)
4137  {
4138  appendPQExpBufferStr(query, "insert");
4139  first = false;
4140  }
4141 
4142  if (pubinfo->pubupdate)
4143  {
4144  if (!first)
4145  appendPQExpBufferStr(query, ", ");
4146 
4147  appendPQExpBufferStr(query, "update");
4148  first = false;
4149  }
4150 
4151  if (pubinfo->pubdelete)
4152  {
4153  if (!first)
4154  appendPQExpBufferStr(query, ", ");
4155 
4156  appendPQExpBufferStr(query, "delete");
4157  first = false;
4158  }
4159 
4160  if (pubinfo->pubtruncate)
4161  {
4162  if (!first)
4163  appendPQExpBufferStr(query, ", ");
4164 
4165  appendPQExpBufferStr(query, "truncate");
4166  first = false;
4167  }
4168 
4169  appendPQExpBufferStr(query, "'");
4170 
4171  if (pubinfo->pubviaroot)
4172  appendPQExpBufferStr(query, ", publish_via_partition_root = true");
4173 
4174  appendPQExpBufferStr(query, ");\n");
4175 
4176  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
4177  ARCHIVE_OPTS(.tag = pubinfo->dobj.name,
4178  .owner = pubinfo->rolname,
4179  .description = "PUBLICATION",
4180  .section = SECTION_POST_DATA,
4181  .createStmt = query->data,
4182  .dropStmt = delq->data));
4183 
4184  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4185  dumpComment(fout, "PUBLICATION", qpubname,
4186  NULL, pubinfo->rolname,
4187  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4188 
4189  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4190  dumpSecLabel(fout, "PUBLICATION", qpubname,
4191  NULL, pubinfo->rolname,
4192  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4193 
4194  destroyPQExpBuffer(delq);
4195  destroyPQExpBuffer(query);
4196  free(qpubname);
4197 }
4198 
4199 /*
4200  * getPublicationTables
4201  * get information about publication membership for dumpable tables.
4202  */
4203 void
4205 {
4206  PQExpBuffer query;
4207  PGresult *res;
4208  PublicationRelInfo *pubrinfo;
4209  DumpOptions *dopt = fout->dopt;
4210  int i_tableoid;
4211  int i_oid;
4212  int i_prpubid;
4213  int i_prrelid;
4214  int i,
4215  j,
4216  ntups;
4217 
4218  if (dopt->no_publications || fout->remoteVersion < 100000)
4219  return;
4220 
4221  query = createPQExpBuffer();
4222 
4223  /* Collect all publication membership info. */
4224  appendPQExpBufferStr(query,
4225  "SELECT tableoid, oid, prpubid, prrelid "
4226  "FROM pg_catalog.pg_publication_rel");
4227  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4228 
4229  ntups = PQntuples(res);
4230 
4231  i_tableoid = PQfnumber(res, "tableoid");
4232  i_oid = PQfnumber(res, "oid");
4233  i_prpubid = PQfnumber(res, "prpubid");
4234  i_prrelid = PQfnumber(res, "prrelid");
4235 
4236  /* this allocation may be more than we need */
4237  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
4238  j = 0;
4239 
4240  for (i = 0; i < ntups; i++)
4241  {
4242  Oid prpubid = atooid(PQgetvalue(res, i, i_prpubid));
4243  Oid prrelid = atooid(PQgetvalue(res, i, i_prrelid));
4244  PublicationInfo *pubinfo;
4245  TableInfo *tbinfo;
4246 
4247  /*
4248  * Ignore any entries for which we aren't interested in either the
4249  * publication or the rel.
4250  */
4251  pubinfo = findPublicationByOid(prpubid);
4252  if (pubinfo == NULL)
4253  continue;
4254  tbinfo = findTableByOid(prrelid);
4255  if (tbinfo == NULL)
4256  continue;
4257 
4258  /*
4259  * Ignore publication membership of tables whose definitions are not
4260  * to be dumped.
4261  */
4262  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4263  continue;
4264 
4265  /* OK, make a DumpableObject for this relationship */
4266  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
4267  pubrinfo[j].dobj.catId.tableoid =
4268  atooid(PQgetvalue(res, i, i_tableoid));
4269  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4270  AssignDumpId(&pubrinfo[j].dobj);
4271  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4272  pubrinfo[j].dobj.name = tbinfo->dobj.name;
4273  pubrinfo[j].publication = pubinfo;
4274  pubrinfo[j].pubtable = tbinfo;
4275 
4276  /* Decide whether we want to dump it */
4277  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
4278 
4279  j++;
4280  }
4281 
4282  PQclear(res);
4283  destroyPQExpBuffer(query);
4284 }
4285 
4286 /*
4287  * dumpPublicationTable
4288  * dump the definition of the given publication table mapping
4289  */
4290 static void
4292 {
4293  PublicationInfo *pubinfo = pubrinfo->publication;
4294  TableInfo *tbinfo = pubrinfo->pubtable;
4295  PQExpBuffer query;
4296  char *tag;
4297 
4298  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4299  return;
4300 
4301  tag = psprintf("%s %s", pubinfo->dobj.name, tbinfo->dobj.name);
4302 
4303  query = createPQExpBuffer();
4304 
4305  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
4306  fmtId(pubinfo->dobj.name));
4307  appendPQExpBuffer(query, " %s;\n",
4308  fmtQualifiedDumpable(tbinfo));
4309 
4310  /*
4311  * There is no point in creating a drop query as the drop is done by table
4312  * drop. (If you think to change this, see also _printTocEntry().)
4313  * Although this object doesn't really have ownership as such, set the
4314  * owner field anyway to ensure that the command is run by the correct
4315  * role at restore time.
4316  */
4317  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
4318  ARCHIVE_OPTS(.tag = tag,
4319  .namespace = tbinfo->dobj.namespace->dobj.name,
4320  .owner = pubinfo->rolname,
4321  .description = "PUBLICATION TABLE",
4322  .section = SECTION_POST_DATA,
4323  .createStmt = query->data));
4324 
4325  free(tag);
4326  destroyPQExpBuffer(query);
4327 }
4328 
4329 /*
4330  * Is the currently connected user a superuser?
4331  */
4332 static bool
4334 {
4335  ArchiveHandle *AH = (ArchiveHandle *) fout;
4336  const char *val;
4337 
4338  val = PQparameterStatus(AH->connection, "is_superuser");
4339 
4340  if (val && strcmp(val, "on") == 0)
4341  return true;
4342 
4343  return false;
4344 }
4345 
4346 /*
4347  * getSubscriptions
4348  * get information about subscriptions
4349  */
4350 void
4352 {
4353  DumpOptions *dopt = fout->dopt;
4354  PQExpBuffer query;
4355  PGresult *res;
4356  SubscriptionInfo *subinfo;
4357  int i_tableoid;
4358  int i_oid;
4359  int i_subname;
4360  int i_rolname;
4361  int i_substream;
4362  int i_subconninfo;
4363  int i_subslotname;
4364  int i_subsynccommit;
4365  int i_subpublications;
4366  int i_subbinary;
4367  int i,
4368  ntups;
4369 
4370  if (dopt->no_subscriptions || fout->remoteVersion < 100000)
4371  return;
4372 
4373  if (!is_superuser(fout))
4374  {
4375  int n;
4376 
4377  res = ExecuteSqlQuery(fout,
4378  "SELECT count(*) FROM pg_subscription "
4379  "WHERE subdbid = (SELECT oid FROM pg_database"
4380  " WHERE datname = current_database())",
4381  PGRES_TUPLES_OK);
4382  n = atoi(PQgetvalue(res, 0, 0));
4383  if (n > 0)
4384  pg_log_warning("subscriptions not dumped because current user is not a superuser");
4385  PQclear(res);
4386  return;
4387  }
4388 
4389  query = createPQExpBuffer();
4390 
4391  /* Get the subscriptions in current database. */
4392  appendPQExpBuffer(query,
4393  "SELECT s.tableoid, s.oid, s.subname,\n"
4394  " (%s s.subowner) AS rolname,\n"
4395  " s.subconninfo, s.subslotname, s.subsynccommit,\n"
4396  " s.subpublications,\n",
4398 
4399  if (fout->remoteVersion >= 140000)
4400  appendPQExpBufferStr(query, " s.subbinary,\n");
4401  else
4402  appendPQExpBufferStr(query, " false AS subbinary,\n");
4403 
4404  if (fout->remoteVersion >= 140000)
4405  appendPQExpBufferStr(query, " s.substream\n");
4406  else
4407  appendPQExpBufferStr(query, " false AS substream\n");
4408 
4409  appendPQExpBufferStr(query,
4410  "FROM pg_subscription s\n"
4411  "WHERE s.subdbid = (SELECT oid FROM pg_database\n"
4412  " WHERE datname = current_database())");
4413 
4414  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4415 
4416  ntups = PQntuples(res);
4417 
4418  i_tableoid = PQfnumber(res, "tableoid");
4419  i_oid = PQfnumber(res, "oid");
4420  i_subname = PQfnumber(res, "subname");
4421  i_rolname = PQfnumber(res, "rolname");
4422  i_subconninfo = PQfnumber(res, "subconninfo");
4423  i_subslotname = PQfnumber(res, "subslotname");
4424  i_subsynccommit = PQfnumber(res, "subsynccommit");
4425  i_subpublications = PQfnumber(res, "subpublications");
4426  i_subbinary = PQfnumber(res, "subbinary");
4427  i_substream = PQfnumber(res, "substream");
4428 
4429  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
4430 
4431  for (i = 0; i < ntups; i++)
4432  {
4433  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
4434  subinfo[i].dobj.catId.tableoid =
4435  atooid(PQgetvalue(res, i, i_tableoid));
4436  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4437  AssignDumpId(&subinfo[i].dobj);
4438  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
4439  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4440  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
4441  if (PQgetisnull(res, i, i_subslotname))
4442  subinfo[i].subslotname = NULL;
4443  else
4444  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
4445  subinfo[i].subsynccommit =
4446  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
4447  subinfo[i].subpublications =
4448  pg_strdup(PQgetvalue(res, i, i_subpublications));
4449  subinfo[i].subbinary =
4450  pg_strdup(PQgetvalue(res, i, i_subbinary));
4451  subinfo[i].substream =
4452  pg_strdup(PQgetvalue(res, i, i_substream));
4453 
4454  if (strlen(subinfo[i].rolname) == 0)
4455  pg_log_warning("owner of subscription \"%s\" appears to be invalid",
4456  subinfo[i].dobj.name);
4457 
4458  /* Decide whether we want to dump it */
4459  selectDumpableObject(&(subinfo[i].dobj), fout);
4460  }
4461  PQclear(res);
4462 
4463  destroyPQExpBuffer(query);
4464 }
4465 
4466 /*
4467  * dumpSubscription
4468  * dump the definition of the given subscription
4469  */
4470 static void
4472 {
4473  PQExpBuffer delq;
4474  PQExpBuffer query;
4475  PQExpBuffer publications;
4476  char *qsubname;
4477  char **pubnames = NULL;
4478  int npubnames = 0;
4479  int i;
4480 
4481  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4482  return;
4483 
4484  delq = createPQExpBuffer();
4485  query = createPQExpBuffer();
4486 
4487  qsubname = pg_strdup(fmtId(subinfo->dobj.name));
4488 
4489  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
4490  qsubname);
4491 
4492  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
4493  qsubname);
4494  appendStringLiteralAH(query, subinfo->subconninfo, fout);
4495 
4496  /* Build list of quoted publications and append them to query. */
4497  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
4498  fatal("could not parse subpublications array");
4499 
4500  publications = createPQExpBuffer();
4501  for (i = 0; i < npubnames; i++)
4502  {
4503  if (i > 0)
4504  appendPQExpBufferStr(publications, ", ");
4505 
4506  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
4507  }
4508 
4509  appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
4510  if (subinfo->subslotname)
4511  appendStringLiteralAH(query, subinfo->subslotname, fout);
4512  else
4513  appendPQExpBufferStr(query, "NONE");
4514 
4515  if (strcmp(subinfo->subbinary, "t") == 0)
4516  appendPQExpBufferStr(query, ", binary = true");
4517 
4518  if (strcmp(subinfo->substream, "f") != 0)
4519  appendPQExpBufferStr(query, ", streaming = on");
4520 
4521  if (strcmp(subinfo->subsynccommit, "off") != 0)
4522  appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
4523 
4524  appendPQExpBufferStr(query, ");\n");
4525 
4526  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
4527  ARCHIVE_OPTS(.tag = subinfo->dobj.name,
4528  .owner = subinfo->rolname,
4529  .description = "SUBSCRIPTION",
4530  .section = SECTION_POST_DATA,
4531  .createStmt = query->data,
4532  .dropStmt = delq->data));
4533 
4534  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4535  dumpComment(fout, "SUBSCRIPTION", qsubname,
4536  NULL, subinfo->rolname,
4537  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4538 
4539  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4540  dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
4541  NULL, subinfo->rolname,
4542  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4543 
4544  destroyPQExpBuffer(publications);
4545  if (pubnames)
4546  free(pubnames);
4547 
4548  destroyPQExpBuffer(delq);
4549  destroyPQExpBuffer(query);
4550  free(qsubname);
4551 }
4552 
4553 /*
4554  * Given a "create query", append as many ALTER ... DEPENDS ON EXTENSION as
4555  * the object needs.
4556  */
4557 static void
4559  PQExpBuffer create,
4560  const DumpableObject *dobj,
4561  const char *catalog,
4562  const char *keyword,
4563  const char *objname)
4564 {
4565  if (dobj->depends_on_ext)
4566  {
4567  char *nm;
4568  PGresult *res;
4569  PQExpBuffer query;
4570  int ntups;
4571  int i_extname;
4572  int i;
4573 
4574  /* dodge fmtId() non-reentrancy */
4575  nm = pg_strdup(objname);
4576 
4577  query = createPQExpBuffer();
4578  appendPQExpBuffer(query,
4579  "SELECT e.extname "
4580  "FROM pg_catalog.pg_depend d, pg_catalog.pg_extension e "
4581  "WHERE d.refobjid = e.oid AND classid = '%s'::pg_catalog.regclass "
4582  "AND objid = '%u'::pg_catalog.oid AND deptype = 'x' "
4583  "AND refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass",
4584  catalog,
4585  dobj->catId.oid);
4586  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4587  ntups = PQntuples(res);
4588  i_extname = PQfnumber(res, "extname");
4589  for (i = 0; i < ntups; i++)
4590  {
4591  appendPQExpBuffer(create, "ALTER %s %s DEPENDS ON EXTENSION %s;\n",
4592  keyword, nm,
4593  fmtId(PQgetvalue(res, i, i_extname)));
4594  }
4595 
4596  PQclear(res);
4597  destroyPQExpBuffer(query);
4598  pg_free(nm);
4599  }
4600 }
4601 
4602 static Oid
4604 {
4605  /*
4606  * If the old version didn't assign an array type, but the new version
4607  * does, we must select an unused type OID to assign. This currently only
4608  * happens for domains, when upgrading pre-v11 to v11 and up.
4609  *
4610  * Note: local state here is kind of ugly, but we must have some, since we
4611  * mustn't choose the same unused OID more than once.
4612  */
4613  static Oid next_possible_free_oid = FirstNormalObjectId;
4614  PGresult *res;
4615  bool is_dup;
4616 
4617  do
4618  {
4619  ++next_possible_free_oid;
4620  printfPQExpBuffer(upgrade_query,
4621  "SELECT EXISTS(SELECT 1 "
4622  "FROM pg_catalog.pg_type "
4623  "WHERE oid = '%u'::pg_catalog.oid);",
4624  next_possible_free_oid);
4625  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4626  is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
4627  PQclear(res);
4628  } while (is_dup);
4629 
4630  return next_possible_free_oid;
4631 }
4632 
4633 static void
4635  PQExpBuffer upgrade_buffer,
4636  Oid pg_type_oid,
4637  bool force_array_type,
4638  bool include_multirange_type)
4639 {
4640  PQExpBuffer upgrade_query = createPQExpBuffer();
4641  PGresult *res;
4642  Oid pg_type_array_oid;
4643  Oid pg_type_multirange_oid;
4644  Oid pg_type_multirange_array_oid;
4645 
4646  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
4647  appendPQExpBuffer(upgrade_buffer,
4648  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4649  pg_type_oid);
4650 
4651  /* we only support old >= 8.3 for binary upgrades */
4652  appendPQExpBuffer(upgrade_query,
4653  "SELECT typarray "
4654  "FROM pg_catalog.pg_type "
4655  "WHERE oid = '%u'::pg_catalog.oid;",
4656  pg_type_oid);
4657 
4658  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4659 
4660  pg_type_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
4661 
4662  PQclear(res);
4663 
4664  if (!OidIsValid(pg_type_array_oid) && force_array_type)
4665  pg_type_array_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
4666 
4667  if (OidIsValid(pg_type_array_oid))
4668  {
4669  appendPQExpBufferStr(upgrade_buffer,
4670  "\n-- For binary upgrade, must preserve pg_type array oid\n");
4671  appendPQExpBuffer(upgrade_buffer,
4672  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4673  pg_type_array_oid);
4674  }
4675 
4676  /*
4677  * Pre-set the multirange type oid and its own array type oid.
4678  */
4679  if (include_multirange_type)
4680  {
4681  if (fout->remoteVersion >= 140000)
4682  {
4683  appendPQExpBuffer(upgrade_query,
4684  "SELECT t.oid, t.typarray "
4685  "FROM pg_catalog.pg_type t "
4686  "JOIN pg_catalog.pg_range r "
4687  "ON t.oid = r.rngmultitypid "
4688  "WHERE r.rngtypid = '%u'::pg_catalog.oid;",
4689  pg_type_oid);
4690 
4691  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4692 
4693  pg_type_multirange_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "oid")));
4694  pg_type_multirange_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
4695 
4696  PQclear(res);
4697  }
4698  else
4699  {
4700  pg_type_multirange_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
4701  pg_type_multirange_array_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
4702  }
4703 
4704  appendPQExpBufferStr(upgrade_buffer,
4705  "\n-- For binary upgrade, must preserve multirange pg_type oid\n");
4706  appendPQExpBuffer(upgrade_buffer,
4707  "SELECT pg_catalog.binary_upgrade_set_next_multirange_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4708  pg_type_multirange_oid);
4709  appendPQExpBufferStr(upgrade_buffer,
4710  "\n-- For binary upgrade, must preserve multirange pg_type array oid\n");
4711  appendPQExpBuffer(upgrade_buffer,
4712  "SELECT pg_catalog.binary_upgrade_set_next_multirange_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4713  pg_type_multirange_array_oid);
4714  }
4715 
4716  destroyPQExpBuffer(upgrade_query);
4717 }
4718 
4719 static void
4721  PQExpBuffer upgrade_buffer,
4722  Oid pg_rel_oid)
4723 {
4724  PQExpBuffer upgrade_query = createPQExpBuffer();
4725  PGresult *upgrade_res;
4726  Oid pg_type_oid;
4727 
4728  appendPQExpBuffer(upgrade_query,
4729  "SELECT c.reltype AS crel "
4730  "FROM pg_catalog.pg_class c "
4731  "WHERE c.oid = '%u'::pg_catalog.oid;",
4732  pg_rel_oid);
4733 
4734  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4735 
4736  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
4737 
4738  if (OidIsValid(pg_type_oid))
4739  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
4740  pg_type_oid, false, false);
4741 
4742  PQclear(upgrade_res);
4743  destroyPQExpBuffer(upgrade_query);
4744 }
4745 
4746 static void
4748  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
4749  bool is_index)
4750 {
4751  appendPQExpBufferStr(upgrade_buffer,
4752  "\n-- For binary upgrade, must preserve pg_class oids\n");
4753 
4754  if (!is_index)
4755  {
4756  PQExpBuffer upgrade_query = createPQExpBuffer();
4757  PGresult *upgrade_res;
4758  Oid pg_class_reltoastrelid;
4759  char pg_class_relkind;
4760  Oid pg_index_indexrelid;
4761 
4762  appendPQExpBuffer(upgrade_buffer,
4763  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
4764  pg_class_oid);
4765 
4766  /*
4767  * Preserve the OIDs of the table's toast table and index, if any.
4768  * Indexes cannot have toast tables, so we need not make this probe in
4769  * the index code path.
4770  *
4771  * One complexity is that the current table definition might not
4772  * require the creation of a TOAST table, but the old database might
4773  * have a TOAST table that was created earlier, before some wide
4774  * columns were dropped. By setting the TOAST oid we force creation
4775  * of the TOAST heap and index by the new backend, so we can copy the
4776  * files during binary upgrade without worrying about this case.
4777  */
4778  appendPQExpBuffer(upgrade_query,
4779  "SELECT c.reltoastrelid, c.relkind, i.indexrelid "
4780  "FROM pg_catalog.pg_class c LEFT JOIN "
4781  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
4782  "WHERE c.oid = '%u'::pg_catalog.oid;",
4783  pg_class_oid);
4784 
4785  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4786 
4787  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0,
4788  PQfnumber(upgrade_res, "reltoastrelid")));
4789  pg_class_relkind = *PQgetvalue(upgrade_res, 0,
4790  PQfnumber(upgrade_res, "relkind"));
4791  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0,
4792  PQfnumber(upgrade_res, "indexrelid")));
4793 
4794  /*
4795  * In a pre-v12 database, partitioned tables might be marked as having
4796  * toast tables, but we should ignore them if so.
4797  */
4798  if (OidIsValid(pg_class_reltoastrelid) &&
4799  pg_class_relkind != RELKIND_PARTITIONED_TABLE)
4800  {
4801  appendPQExpBuffer(upgrade_buffer,
4802  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
4803  pg_class_reltoastrelid);
4804 
4805  /* every toast table has an index */
4806  appendPQExpBuffer(upgrade_buffer,
4807  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4808  pg_index_indexrelid);
4809  }
4810 
4811  PQclear(upgrade_res);
4812  destroyPQExpBuffer(upgrade_query);
4813  }
4814  else
4815  appendPQExpBuffer(upgrade_buffer,
4816  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4817  pg_class_oid);
4818 
4819  appendPQExpBufferChar(upgrade_buffer, '\n');
4820 }
4821 
4822 /*
4823  * If the DumpableObject is a member of an extension, add a suitable
4824  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4825  *
4826  * For somewhat historical reasons, objname should already be quoted,
4827  * but not objnamespace (if any).
4828  */
4829 static void
4831  const DumpableObject *dobj,
4832  const char *objtype,
4833  const char *objname,
4834  const char *objnamespace)
4835 {
4836  DumpableObject *extobj = NULL;
4837  int i;
4838 
4839  if (!dobj->ext_member)
4840  return;
4841 
4842  /*
4843  * Find the parent extension. We could avoid this search if we wanted to
4844  * add a link field to DumpableObject, but the space costs of that would
4845  * be considerable. We assume that member objects could only have a
4846  * direct dependency on their own extension, not any others.
4847  */
4848  for (i = 0; i < dobj->nDeps; i++)
4849  {
4850  extobj = findObjectByDumpId(dobj->dependencies[i]);
4851  if (extobj && extobj->objType == DO_EXTENSION)
4852  break;
4853  extobj = NULL;
4854  }
4855  if (extobj == NULL)
4856  fatal("could not find parent extension for %s %s",
4857  objtype, objname);
4858 
4859  appendPQExpBufferStr(upgrade_buffer,
4860  "\n-- For binary upgrade, handle extension membership the hard way\n");
4861  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
4862  fmtId(extobj->name),
4863  objtype);
4864  if (objnamespace && *objnamespace)
4865  appendPQExpBuffer(upgrade_buffer, "%s.", fmtId(objnamespace));
4866  appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
4867 }
4868 
4869 /*
4870  * getNamespaces:
4871  * read all namespaces in the system catalogs and return them in the
4872  * NamespaceInfo* structure
4873  *
4874  * numNamespaces is set to the number of namespaces read in
4875  */
4876 NamespaceInfo *
4878 {
4879  DumpOptions *dopt = fout->dopt;
4880  PGresult *res;
4881  int ntups;
4882  int i;
4883  PQExpBuffer query;
4884  NamespaceInfo *nsinfo;
4885  int i_tableoid;
4886  int i_oid;
4887  int i_nspname;
4888  int i_rolname;
4889  int i_nspacl;
4890  int i_rnspacl;
4891  int i_initnspacl;
4892  int i_initrnspacl;
4893 
4894  query = createPQExpBuffer();
4895 
4896  /*
4897  * we fetch all namespaces including system ones, so that every object we
4898  * read in can be linked to a containing namespace.
4899  */
4900  if (fout->remoteVersion >= 90600)
4901  {
4902  PQExpBuffer acl_subquery = createPQExpBuffer();
4903  PQExpBuffer racl_subquery = createPQExpBuffer();
4904  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4905  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4906 
4907  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4908  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4909  dopt->binary_upgrade);
4910 
4911  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4912  "(%s nspowner) AS rolname, "
4913  "%s as nspacl, "
4914  "%s as rnspacl, "
4915  "%s as initnspacl, "
4916  "%s as initrnspacl "
4917  "FROM pg_namespace n "
4918  "LEFT JOIN pg_init_privs pip "
4919  "ON (n.oid = pip.objoid "
4920  "AND pip.classoid = 'pg_namespace'::regclass "
4921  "AND pip.objsubid = 0",
4923  acl_subquery->data,
4924  racl_subquery->data,
4925  init_acl_subquery->data,
4926  init_racl_subquery->data);
4927 
4928  appendPQExpBufferStr(query, ") ");
4929 
4930  destroyPQExpBuffer(acl_subquery);
4931  destroyPQExpBuffer(racl_subquery);
4932  destroyPQExpBuffer(init_acl_subquery);
4933  destroyPQExpBuffer(init_racl_subquery);
4934  }
4935  else
4936  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4937  "(%s nspowner) AS rolname, "
4938  "nspacl, NULL as rnspacl, "
4939  "NULL AS initnspacl, NULL as initrnspacl "
4940  "FROM pg_namespace",
4942 
4943  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4944 
4945  ntups = PQntuples(res);
4946 
4947  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4948 
4949  i_tableoid = PQfnumber(res, "tableoid");
4950  i_oid = PQfnumber(res, "oid");
4951  i_nspname = PQfnumber(res, "nspname");
4952  i_rolname = PQfnumber(res, "rolname");
4953  i_nspacl = PQfnumber(res, "nspacl");
4954  i_rnspacl = PQfnumber(res, "rnspacl");
4955  i_initnspacl = PQfnumber(res, "initnspacl");
4956  i_initrnspacl = PQfnumber(res, "initrnspacl");
4957 
4958  for (i = 0; i < ntups; i++)
4959  {
4960  nsinfo[i].dobj.objType = DO_NAMESPACE;
4961  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4962  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4963  AssignDumpId(&nsinfo[i].dobj);
4964  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4965  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4966  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4967  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4968  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4969  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4970 
4971  /* Decide whether to dump this namespace */
4972  selectDumpableNamespace(&nsinfo[i], fout);
4973 
4974  /*
4975  * Do not try to dump ACL if the ACL is empty or the default.
4976  *
4977  * This is useful because, for some schemas/objects, the only
4978  * component we are going to try and dump is the ACL and if we can
4979  * remove that then 'dump' goes to zero/false and we don't consider
4980  * this object for dumping at all later on.
4981  */
4982  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4983  PQgetisnull(res, i, i_initnspacl) &&
4984  PQgetisnull(res, i, i_initrnspacl))
4985  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4986 
4987  if (strlen(nsinfo[i].rolname) == 0)
4988  pg_log_warning("owner of schema \"%s\" appears to be invalid",
4989  nsinfo[i].dobj.name);
4990  }
4991 
4992  PQclear(res);
4993  destroyPQExpBuffer(query);
4994 
4995  *numNamespaces = ntups;
4996 
4997  return nsinfo;
4998 }
4999 
5000 /*
5001  * findNamespace:
5002  * given a namespace OID, look up the info read by getNamespaces
5003  */
5004 static NamespaceInfo *
5006 {
5007  NamespaceInfo *nsinfo;
5008 
5009  nsinfo = findNamespaceByOid(nsoid);
5010  if (nsinfo == NULL)
5011  fatal("schema with OID %u does not exist", nsoid);
5012  return nsinfo;
5013 }
5014 
5015 /*
5016  * getExtensions:
5017  * read all extensions in the system catalogs and return them in the
5018  * ExtensionInfo* structure
5019  *
5020  * numExtensions is set to the number of extensions read in
5021  */
5022 ExtensionInfo *
5024 {
5025  DumpOptions *dopt = fout->dopt;
5026  PGresult *res;
5027  int ntups;
5028  int i;
5029  PQExpBuffer query;
5030  ExtensionInfo *extinfo;
5031  int i_tableoid;
5032  int i_oid;
5033  int i_extname;
5034  int i_nspname;
5035  int i_extrelocatable;
5036  int i_extversion;
5037  int i_extconfig;
5038  int i_extcondition;
5039 
5040  /*
5041  * Before 9.1, there are no extensions.
5042  */
5043  if (fout->remoteVersion < 90100)
5044  {
5045  *numExtensions = 0;
5046  return NULL;
5047  }
5048 
5049  query = createPQExpBuffer();
5050 
5051  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
5052  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
5053  "FROM pg_extension x "
5054  "JOIN pg_namespace n ON n.oid = x.extnamespace");
5055 
5056  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5057 
5058  ntups = PQntuples(res);
5059 
5060  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
5061 
5062  i_tableoid = PQfnumber(res, "tableoid");
5063  i_oid = PQfnumber(res, "oid");
5064  i_extname = PQfnumber(res, "extname");
5065  i_nspname = PQfnumber(res, "nspname");
5066  i_extrelocatable = PQfnumber(res, "extrelocatable");
5067  i_extversion = PQfnumber(res, "extversion");
5068  i_extconfig = PQfnumber(res, "extconfig");
5069  i_extcondition = PQfnumber(res, "extcondition");
5070 
5071  for (i = 0; i < ntups; i++)
5072  {
5073  extinfo[i].dobj.objType = DO_EXTENSION;
5074  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5075  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5076  AssignDumpId(&extinfo[i].dobj);
5077  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
5078  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
5079  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
5080  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
5081  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
5082  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
5083 
5084  /* Decide whether we want to dump it */
5085  selectDumpableExtension(&(extinfo[i]), dopt);
5086  }
5087 
5088  PQclear(res);
5089  destroyPQExpBuffer(query);
5090 
5091  *numExtensions = ntups;
5092 
5093  return extinfo;
5094 }
5095 
5096 /*
5097  * getTypes:
5098  * read all types in the system catalogs and return them in the
5099  * TypeInfo* structure
5100  *
5101  * numTypes is set to the number of types read in
5102  *
5103  * NB: this must run after getFuncs() because we assume we can do
5104  * findFuncByOid().
5105  */
5106 TypeInfo *
5108 {
5109  DumpOptions *dopt = fout->dopt;
5110  PGresult *res;
5111  int ntups;
5112  int i;
5113  PQExpBuffer query = createPQExpBuffer();
5114  TypeInfo *tyinfo;
5115  ShellTypeInfo *stinfo;
5116  int i_tableoid;
5117  int i_oid;
5118  int i_typname;
5119  int i_typnamespace;
5120  int i_typacl;
5121  int i_rtypacl;
5122  int i_inittypacl;
5123  int i_initrtypacl;
5124  int i_rolname;
5125  int i_typelem;
5126  int i_typrelid;
5127  int i_typrelkind;
5128  int i_typtype;
5129  int i_typisdefined;
5130  int i_isarray;
5131 
5132  /*
5133  * we include even the built-in types because those may be used as array
5134  * elements by user-defined types
5135  *
5136  * we filter out the built-in types when we dump out the types
5137  *
5138  * same approach for undefined (shell) types and array types
5139  *
5140  * Note: as of 8.3 we can reliably detect whether a type is an
5141  * auto-generated array type by checking the element type's typarray.
5142  * (Before that the test is capable of generating false positives.) We
5143  * still check for name beginning with '_', though, so as to avoid the
5144  * cost of the subselect probe for all standard types. This would have to
5145  * be revisited if the backend ever allows renaming of array types.
5146  */
5147 
5148  if (fout->remoteVersion >= 90600)
5149  {
5150  PQExpBuffer acl_subquery = createPQExpBuffer();
5151  PQExpBuffer racl_subquery = createPQExpBuffer();
5152  PQExpBuffer initacl_subquery = createPQExpBuffer();
5153  PQExpBuffer initracl_subquery = createPQExpBuffer();
5154 
5155  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
5156  initracl_subquery, "t.typacl", "t.typowner", "'T'",
5157  dopt->binary_upgrade);
5158 
5159  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
5160  "t.typnamespace, "
5161  "%s AS typacl, "
5162  "%s AS rtypacl, "
5163  "%s AS inittypacl, "
5164  "%s AS initrtypacl, "
5165  "(%s t.typowner) AS rolname, "
5166  "t.typelem, t.typrelid, "
5167  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
5168  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
5169  "t.typtype, t.typisdefined, "
5170  "t.typname[0] = '_' AND t.typelem != 0 AND "
5171  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
5172  "FROM pg_type t "
5173  "LEFT JOIN pg_init_privs pip ON "
5174  "(t.oid = pip.objoid "
5175  "AND pip.classoid = 'pg_type'::regclass "
5176  "AND pip.objsubid = 0) ",
5177  acl_subquery->data,
5178  racl_subquery->data,
5179  initacl_subquery->data,
5180  initracl_subquery->data,
5182 
5183  destroyPQExpBuffer(acl_subquery);
5184  destroyPQExpBuffer(racl_subquery);
5185  destroyPQExpBuffer(initacl_subquery);
5186  destroyPQExpBuffer(initracl_subquery);
5187  }
5188  else if (fout->remoteVersion >= 90200)
5189  {
5190  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
5191  "typnamespace, typacl, NULL as rtypacl, "
5192  "NULL AS inittypacl, NULL AS initrtypacl, "
5193  "(%s typowner) AS rolname, "
5194  "typelem, typrelid, "
5195  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
5196  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
5197  "typtype, typisdefined, "
5198  "typname[0] = '_' AND typelem != 0 AND "
5199  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
5200  "FROM pg_type",
5202  }
5203  else if (fout->remoteVersion >= 80300)
5204  {
5205  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
5206  "typnamespace, NULL AS typacl, NULL as rtypacl, "
5207  "NULL AS inittypacl, NULL AS initrtypacl, "
5208  "(%s typowner) AS rolname, "
5209  "typelem, typrelid, "
5210  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
5211  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
5212  "typtype, typisdefined, "
5213  "typname[0] = '_' AND typelem != 0 AND "
5214  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
5215  "FROM pg_type",
5217  }
5218  else
5219  {
5220  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
5221  "typnamespace, NULL AS typacl, NULL as rtypacl, "
5222  "NULL AS inittypacl, NULL AS initrtypacl, "
5223  "(%s typowner) AS rolname, "
5224  "typelem, typrelid, "
5225  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
5226  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
5227  "typtype, typisdefined, "
5228  "typname[0] = '_' AND typelem != 0 AS isarray "
5229  "FROM pg_type",
5231  }
5232 
5233  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5234 
5235  ntups = PQntuples(res);
5236 
5237  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
5238 
5239  i_tableoid = PQfnumber(res, "tableoid");
5240  i_oid = PQfnumber(res, "oid");
5241  i_typname = PQfnumber(res, "typname");
5242  i_typnamespace = PQfnumber(res, "typnamespace");
5243  i_typacl = PQfnumber(res, "typacl");
5244  i_rtypacl = PQfnumber(res, "rtypacl");
5245  i_inittypacl = PQfnumber(res, "inittypacl");
5246  i_initrtypacl = PQfnumber(res, "initrtypacl");
5247  i_rolname = PQfnumber(res, "rolname");
5248  i_typelem = PQfnumber(res, "typelem");
5249  i_typrelid = PQfnumber(res, "typrelid");
5250  i_typrelkind = PQfnumber(res, "typrelkind");
5251  i_typtype = PQfnumber(res, "typtype");
5252  i_typisdefined = PQfnumber(res, "typisdefined");
5253  i_isarray = PQfnumber(res, "isarray");
5254 
5255  for (i = 0; i < ntups; i++)
5256  {
5257  tyinfo[i].dobj.objType = DO_TYPE;
5258  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5259  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5260  AssignDumpId(&tyinfo[i].dobj);
5261  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
5262  tyinfo[i].dobj.namespace =
5263  findNamespace(atooid(PQgetvalue(res, i, i_typnamespace)));
5264  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5265  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
5266  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
5267  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
5268  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
5269  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
5270  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
5271  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
5272  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
5273  tyinfo[i].shellType = NULL;
5274 
5275  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
5276  tyinfo[i].isDefined = true;
5277  else
5278  tyinfo[i].isDefined = false;
5279 
5280  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
5281