PostgreSQL Source Code  git master
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #include <limits.h>
37 #ifdef HAVE_TERMIOS_H
38 #include <termios.h>
39 #endif
40 
41 #include "access/attnum.h"
42 #include "access/sysattr.h"
43 #include "access/transam.h"
44 #include "catalog/pg_aggregate_d.h"
45 #include "catalog/pg_am_d.h"
46 #include "catalog/pg_attribute_d.h"
47 #include "catalog/pg_cast_d.h"
48 #include "catalog/pg_class_d.h"
49 #include "catalog/pg_default_acl_d.h"
50 #include "catalog/pg_largeobject_d.h"
51 #include "catalog/pg_largeobject_metadata_d.h"
52 #include "catalog/pg_proc_d.h"
53 #include "catalog/pg_trigger_d.h"
54 #include "catalog/pg_type_d.h"
55 #include "dumputils.h"
56 #include "fe_utils/connect.h"
57 #include "fe_utils/string_utils.h"
58 #include "getopt_long.h"
59 #include "libpq/libpq-fs.h"
60 #include "parallel.h"
61 #include "pg_backup_db.h"
62 #include "pg_backup_utils.h"
63 #include "pg_dump.h"
64 #include "storage/block.h"
65 
66 typedef struct
67 {
68  const char *descr; /* comment for an object */
69  Oid classoid; /* object class (catalog OID) */
70  Oid objoid; /* object OID */
71  int objsubid; /* subobject (table column #) */
72 } CommentItem;
73 
74 typedef struct
75 {
76  const char *provider; /* label provider of this security label */
77  const char *label; /* security label for an object */
78  Oid classoid; /* object class (catalog OID) */
79  Oid objoid; /* object OID */
80  int objsubid; /* subobject (table column #) */
81 } SecLabelItem;
82 
83 typedef enum OidOptions
84 {
86  zeroAsAny = 2,
89 } OidOptions;
90 
91 /* global decls */
92 static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
93 
94 /* subquery used to convert user ID (eg, datdba) to user name */
95 static const char *username_subquery;
96 
97 /*
98  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
99  * FirstNormalObjectId - 1.
100  */
101 static Oid g_last_builtin_oid; /* value of the last builtin oid */
102 
103 /* The specified names/patterns should to match at least one entity */
104 static int strict_names = 0;
105 
106 /*
107  * Object inclusion/exclusion lists
108  *
109  * The string lists record the patterns given by command-line switches,
110  * which we then convert to lists of OIDs of matching objects.
111  */
113 static SimpleOidList schema_include_oids = {NULL, NULL};
115 static SimpleOidList schema_exclude_oids = {NULL, NULL};
116 
118 static SimpleOidList table_include_oids = {NULL, NULL};
120 static SimpleOidList table_exclude_oids = {NULL, NULL};
122 static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
123 
124 
125 char g_opaque_type[10]; /* name for the opaque type */
126 
127 /* placeholders for the delimiters for comments */
129 char g_comment_end[10];
130 
131 static const CatalogId nilCatalogId = {0, 0};
132 
133 /* override for standard extra_float_digits setting */
134 static bool have_extra_float_digits = false;
136 
137 /*
138  * The default number of rows per INSERT when
139  * --inserts is specified without --rows-per-insert
140  */
141 #define DUMP_DEFAULT_ROWS_PER_INSERT 1
142 
143 /*
144  * Macro for producing quoted, schema-qualified name of a dumpable object.
145  */
146 #define fmtQualifiedDumpable(obj) \
147  fmtQualifiedId((obj)->dobj.namespace->dobj.name, \
148  (obj)->dobj.name)
149 
150 static void help(const char *progname);
151 static void setup_connection(Archive *AH,
152  const char *dumpencoding, const char *dumpsnapshot,
153  char *use_role);
155 static void expand_schema_name_patterns(Archive *fout,
156  SimpleStringList *patterns,
157  SimpleOidList *oids,
158  bool strict_names);
159 static void expand_table_name_patterns(Archive *fout,
160  SimpleStringList *patterns,
161  SimpleOidList *oids,
162  bool strict_names);
163 static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
164 static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
165 static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
166 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
167 static void dumpComment(Archive *fout, const char *type, const char *name,
168  const char *namespace, const char *owner,
169  CatalogId catalogId, int subid, DumpId dumpId);
170 static int findComments(Archive *fout, Oid classoid, Oid objoid,
171  CommentItem **items);
172 static int collectComments(Archive *fout, CommentItem **items);
173 static void dumpSecLabel(Archive *fout, const char *type, const char *name,
174  const char *namespace, const char *owner,
175  CatalogId catalogId, int subid, DumpId dumpId);
176 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
177  SecLabelItem **items);
178 static int collectSecLabels(Archive *fout, SecLabelItem **items);
179 static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
180 static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
181 static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
182 static void dumpType(Archive *fout, TypeInfo *tyinfo);
183 static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
184 static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
185 static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
186 static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
187 static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
188 static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
189 static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
190 static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
191 static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
192 static void dumpFunc(Archive *fout, FuncInfo *finfo);
193 static void dumpCast(Archive *fout, CastInfo *cast);
194 static void dumpTransform(Archive *fout, TransformInfo *transform);
195 static void dumpOpr(Archive *fout, OprInfo *oprinfo);
196 static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
197 static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
198 static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
199 static void dumpCollation(Archive *fout, CollInfo *collinfo);
200 static void dumpConversion(Archive *fout, ConvInfo *convinfo);
201 static void dumpRule(Archive *fout, RuleInfo *rinfo);
202 static void dumpAgg(Archive *fout, AggInfo *agginfo);
203 static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
204 static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
205 static void dumpTable(Archive *fout, TableInfo *tbinfo);
206 static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
207 static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
208 static void dumpSequence(Archive *fout, TableInfo *tbinfo);
209 static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
210 static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
211 static void dumpIndexAttach(Archive *fout, IndexAttachInfo *attachinfo);
212 static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
213 static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
214 static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
215 static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
216 static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
217 static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
218 static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
219 static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
220 static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
221 static void dumpUserMappings(Archive *fout,
222  const char *servername, const char *namespace,
223  const char *owner, CatalogId catalogId, DumpId dumpId);
224 static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
225 
226 static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
227  const char *type, const char *name, const char *subname,
228  const char *nspname, const char *owner,
229  const char *acls, const char *racls,
230  const char *initacls, const char *initracls);
231 
232 static void getDependencies(Archive *fout);
233 static void BuildArchiveDependencies(Archive *fout);
235  DumpId **dependencies, int *nDeps, int *allocDeps);
236 
238 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
239  DumpableObject *boundaryObjs);
240 
241 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
242 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
243 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo);
244 static void buildMatViewRefreshDependencies(Archive *fout);
245 static void getTableDataFKConstraints(void);
246 static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
247  bool is_agg);
248 static char *format_function_arguments_old(Archive *fout,
249  FuncInfo *finfo, int nallargs,
250  char **allargtypes,
251  char **argmodes,
252  char **argnames);
253 static char *format_function_signature(Archive *fout,
254  FuncInfo *finfo, bool honor_quotes);
255 static char *convertRegProcReference(Archive *fout,
256  const char *proc);
257 static char *getFormattedOperatorName(Archive *fout, const char *oproid);
258 static char *convertTSFunction(Archive *fout, Oid funcOid);
259 static Oid findLastBuiltinOid_V71(Archive *fout);
260 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
261 static void getBlobs(Archive *fout);
262 static void dumpBlob(Archive *fout, BlobInfo *binfo);
263 static int dumpBlobs(Archive *fout, void *arg);
264 static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
265 static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
266 static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
267 static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
268 static void dumpDatabase(Archive *AH);
269 static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
270  const char *dbname, Oid dboid);
271 static void dumpEncoding(Archive *AH);
272 static void dumpStdStrings(Archive *AH);
273 static void dumpSearchPath(Archive *AH);
275  PQExpBuffer upgrade_buffer,
276  Oid pg_type_oid,
277  bool force_array_type);
279  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
280 static void binary_upgrade_set_pg_class_oids(Archive *fout,
281  PQExpBuffer upgrade_buffer,
282  Oid pg_class_oid, bool is_index);
283 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
284  DumpableObject *dobj,
285  const char *objtype,
286  const char *objname,
287  const char *objnamespace);
288 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
289 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
290 static bool nonemptyReloptions(const char *reloptions);
291 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
292  const char *prefix, Archive *fout);
293 static char *get_synchronized_snapshot(Archive *fout);
294 static void setupDumpWorker(Archive *AHX);
295 static TableInfo *getRootTableInfo(TableInfo *tbinfo);
296 
297 
298 int
299 main(int argc, char **argv)
300 {
301  int c;
302  const char *filename = NULL;
303  const char *format = "p";
304  TableInfo *tblinfo;
305  int numTables;
306  DumpableObject **dobjs;
307  int numObjs;
308  DumpableObject *boundaryObjs;
309  int i;
310  int optindex;
311  char *endptr;
312  RestoreOptions *ropt;
313  Archive *fout; /* the script file */
314  bool g_verbose = false;
315  const char *dumpencoding = NULL;
316  const char *dumpsnapshot = NULL;
317  char *use_role = NULL;
318  long rowsPerInsert;
319  int numWorkers = 1;
320  trivalue prompt_password = TRI_DEFAULT;
321  int compressLevel = -1;
322  int plainText = 0;
323  ArchiveFormat archiveFormat = archUnknown;
324  ArchiveMode archiveMode;
325 
326  static DumpOptions dopt;
327 
328  static struct option long_options[] = {
329  {"data-only", no_argument, NULL, 'a'},
330  {"blobs", no_argument, NULL, 'b'},
331  {"no-blobs", no_argument, NULL, 'B'},
332  {"clean", no_argument, NULL, 'c'},
333  {"create", no_argument, NULL, 'C'},
334  {"dbname", required_argument, NULL, 'd'},
335  {"file", required_argument, NULL, 'f'},
336  {"format", required_argument, NULL, 'F'},
337  {"host", required_argument, NULL, 'h'},
338  {"jobs", 1, NULL, 'j'},
339  {"no-reconnect", no_argument, NULL, 'R'},
340  {"no-owner", no_argument, NULL, 'O'},
341  {"port", required_argument, NULL, 'p'},
342  {"schema", required_argument, NULL, 'n'},
343  {"exclude-schema", required_argument, NULL, 'N'},
344  {"schema-only", no_argument, NULL, 's'},
345  {"superuser", required_argument, NULL, 'S'},
346  {"table", required_argument, NULL, 't'},
347  {"exclude-table", required_argument, NULL, 'T'},
348  {"no-password", no_argument, NULL, 'w'},
349  {"password", no_argument, NULL, 'W'},
350  {"username", required_argument, NULL, 'U'},
351  {"verbose", no_argument, NULL, 'v'},
352  {"no-privileges", no_argument, NULL, 'x'},
353  {"no-acl", no_argument, NULL, 'x'},
354  {"compress", required_argument, NULL, 'Z'},
355  {"encoding", required_argument, NULL, 'E'},
356  {"help", no_argument, NULL, '?'},
357  {"version", no_argument, NULL, 'V'},
358 
359  /*
360  * the following options don't have an equivalent short option letter
361  */
362  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
363  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
364  {"column-inserts", no_argument, &dopt.column_inserts, 1},
365  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
366  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
367  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
368  {"exclude-table-data", required_argument, NULL, 4},
369  {"extra-float-digits", required_argument, NULL, 8},
370  {"if-exists", no_argument, &dopt.if_exists, 1},
371  {"inserts", no_argument, NULL, 9},
372  {"lock-wait-timeout", required_argument, NULL, 2},
373  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
374  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
375  {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
376  {"role", required_argument, NULL, 3},
377  {"section", required_argument, NULL, 5},
378  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
379  {"snapshot", required_argument, NULL, 6},
380  {"strict-names", no_argument, &strict_names, 1},
381  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
382  {"no-comments", no_argument, &dopt.no_comments, 1},
383  {"no-publications", no_argument, &dopt.no_publications, 1},
384  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
385  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
386  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
387  {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
388  {"no-sync", no_argument, NULL, 7},
389  {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
390  {"rows-per-insert", required_argument, NULL, 10},
391 
392  {NULL, 0, NULL, 0}
393  };
394 
395  pg_logging_init(argv[0]);
397  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
398 
399  /*
400  * Initialize what we need for parallel execution, especially for thread
401  * support on Windows.
402  */
404 
405  strcpy(g_comment_start, "-- ");
406  g_comment_end[0] = '\0';
407  strcpy(g_opaque_type, "opaque");
408 
409  progname = get_progname(argv[0]);
410 
411  if (argc > 1)
412  {
413  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
414  {
415  help(progname);
416  exit_nicely(0);
417  }
418  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
419  {
420  puts("pg_dump (PostgreSQL) " PG_VERSION);
421  exit_nicely(0);
422  }
423  }
424 
425  InitDumpOptions(&dopt);
426 
427  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxZ:",
428  long_options, &optindex)) != -1)
429  {
430  switch (c)
431  {
432  case 'a': /* Dump data only */
433  dopt.dataOnly = true;
434  break;
435 
436  case 'b': /* Dump blobs */
437  dopt.outputBlobs = true;
438  break;
439 
440  case 'B': /* Don't dump blobs */
441  dopt.dontOutputBlobs = true;
442  break;
443 
444  case 'c': /* clean (i.e., drop) schema prior to create */
445  dopt.outputClean = 1;
446  break;
447 
448  case 'C': /* Create DB */
449  dopt.outputCreateDB = 1;
450  break;
451 
452  case 'd': /* database name */
453  dopt.dbname = pg_strdup(optarg);
454  break;
455 
456  case 'E': /* Dump encoding */
457  dumpencoding = pg_strdup(optarg);
458  break;
459 
460  case 'f':
461  filename = pg_strdup(optarg);
462  break;
463 
464  case 'F':
465  format = pg_strdup(optarg);
466  break;
467 
468  case 'h': /* server host */
469  dopt.pghost = pg_strdup(optarg);
470  break;
471 
472  case 'j': /* number of dump jobs */
473  numWorkers = atoi(optarg);
474  break;
475 
476  case 'n': /* include schema(s) */
477  simple_string_list_append(&schema_include_patterns, optarg);
478  dopt.include_everything = false;
479  break;
480 
481  case 'N': /* exclude schema(s) */
482  simple_string_list_append(&schema_exclude_patterns, optarg);
483  break;
484 
485  case 'O': /* Don't reconnect to match owner */
486  dopt.outputNoOwner = 1;
487  break;
488 
489  case 'p': /* server port */
490  dopt.pgport = pg_strdup(optarg);
491  break;
492 
493  case 'R':
494  /* no-op, still accepted for backwards compatibility */
495  break;
496 
497  case 's': /* dump schema only */
498  dopt.schemaOnly = true;
499  break;
500 
501  case 'S': /* Username for superuser in plain text output */
503  break;
504 
505  case 't': /* include table(s) */
506  simple_string_list_append(&table_include_patterns, optarg);
507  dopt.include_everything = false;
508  break;
509 
510  case 'T': /* exclude table(s) */
511  simple_string_list_append(&table_exclude_patterns, optarg);
512  break;
513 
514  case 'U':
515  dopt.username = pg_strdup(optarg);
516  break;
517 
518  case 'v': /* verbose */
519  g_verbose = true;
521  break;
522 
523  case 'w':
524  prompt_password = TRI_NO;
525  break;
526 
527  case 'W':
528  prompt_password = TRI_YES;
529  break;
530 
531  case 'x': /* skip ACL dump */
532  dopt.aclsSkip = true;
533  break;
534 
535  case 'Z': /* Compression Level */
536  compressLevel = atoi(optarg);
537  if (compressLevel < 0 || compressLevel > 9)
538  {
539  pg_log_error("compression level must be in range 0..9");
540  exit_nicely(1);
541  }
542  break;
543 
544  case 0:
545  /* This covers the long options. */
546  break;
547 
548  case 2: /* lock-wait-timeout */
550  break;
551 
552  case 3: /* SET ROLE */
553  use_role = pg_strdup(optarg);
554  break;
555 
556  case 4: /* exclude table(s) data */
557  simple_string_list_append(&tabledata_exclude_patterns, optarg);
558  break;
559 
560  case 5: /* section */
562  break;
563 
564  case 6: /* snapshot */
565  dumpsnapshot = pg_strdup(optarg);
566  break;
567 
568  case 7: /* no-sync */
569  dosync = false;
570  break;
571 
572  case 8:
574  extra_float_digits = atoi(optarg);
575  if (extra_float_digits < -15 || extra_float_digits > 3)
576  {
577  pg_log_error("extra_float_digits must be in range -15..3");
578  exit_nicely(1);
579  }
580  break;
581 
582  case 9: /* inserts */
583 
584  /*
585  * dump_inserts also stores --rows-per-insert, careful not to
586  * overwrite that.
587  */
588  if (dopt.dump_inserts == 0)
590  break;
591 
592  case 10: /* rows per insert */
593  errno = 0;
594  rowsPerInsert = strtol(optarg, &endptr, 10);
595 
596  if (endptr == optarg || *endptr != '\0' ||
597  rowsPerInsert <= 0 || rowsPerInsert > INT_MAX ||
598  errno == ERANGE)
599  {
600  pg_log_error("rows-per-insert must be in range %d..%d",
601  1, INT_MAX);
602  exit_nicely(1);
603  }
604  dopt.dump_inserts = (int) rowsPerInsert;
605  break;
606 
607  default:
608  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
609  exit_nicely(1);
610  }
611  }
612 
613  /*
614  * Non-option argument specifies database name as long as it wasn't
615  * already specified with -d / --dbname
616  */
617  if (optind < argc && dopt.dbname == NULL)
618  dopt.dbname = argv[optind++];
619 
620  /* Complain if any arguments remain */
621  if (optind < argc)
622  {
623  pg_log_error("too many command-line arguments (first is \"%s\")",
624  argv[optind]);
625  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
626  progname);
627  exit_nicely(1);
628  }
629 
630  /* --column-inserts implies --inserts */
631  if (dopt.column_inserts && dopt.dump_inserts == 0)
633 
634  /*
635  * Binary upgrade mode implies dumping sequence data even in schema-only
636  * mode. This is not exposed as a separate option, but kept separate
637  * internally for clarity.
638  */
639  if (dopt.binary_upgrade)
640  dopt.sequence_data = 1;
641 
642  if (dopt.dataOnly && dopt.schemaOnly)
643  {
644  pg_log_error("options -s/--schema-only and -a/--data-only cannot be used together");
645  exit_nicely(1);
646  }
647 
648  if (dopt.dataOnly && dopt.outputClean)
649  {
650  pg_log_error("options -c/--clean and -a/--data-only cannot be used together");
651  exit_nicely(1);
652  }
653 
654  if (dopt.if_exists && !dopt.outputClean)
655  fatal("option --if-exists requires option -c/--clean");
656 
657  /*
658  * --inserts are already implied above if --column-inserts or
659  * --rows-per-insert were specified.
660  */
661  if (dopt.do_nothing && dopt.dump_inserts == 0)
662  fatal("option --on-conflict-do-nothing requires option --inserts, --rows-per-insert, or --column-inserts");
663 
664  /* Identify archive format to emit */
665  archiveFormat = parseArchiveFormat(format, &archiveMode);
666 
667  /* archiveFormat specific setup */
668  if (archiveFormat == archNull)
669  plainText = 1;
670 
671  /* Custom and directory formats are compressed by default, others not */
672  if (compressLevel == -1)
673  {
674 #ifdef HAVE_LIBZ
675  if (archiveFormat == archCustom || archiveFormat == archDirectory)
676  compressLevel = Z_DEFAULT_COMPRESSION;
677  else
678 #endif
679  compressLevel = 0;
680  }
681 
682 #ifndef HAVE_LIBZ
683  if (compressLevel != 0)
684  pg_log_warning("requested compression not available in this installation -- archive will be uncompressed");
685  compressLevel = 0;
686 #endif
687 
688  /*
689  * If emitting an archive format, we always want to emit a DATABASE item,
690  * in case --create is specified at pg_restore time.
691  */
692  if (!plainText)
693  dopt.outputCreateDB = 1;
694 
695  /*
696  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
697  * parallel jobs because that's the maximum limit for the
698  * WaitForMultipleObjects() call.
699  */
700  if (numWorkers <= 0
701 #ifdef WIN32
702  || numWorkers > MAXIMUM_WAIT_OBJECTS
703 #endif
704  )
705  fatal("invalid number of parallel jobs");
706 
707  /* Parallel backup only in the directory archive format so far */
708  if (archiveFormat != archDirectory && numWorkers > 1)
709  fatal("parallel backup only supported by the directory format");
710 
711  /* Open the output file */
712  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
713  archiveMode, setupDumpWorker);
714 
715  /* Make dump options accessible right away */
716  SetArchiveOptions(fout, &dopt, NULL);
717 
718  /* Register the cleanup hook */
719  on_exit_close_archive(fout);
720 
721  /* Let the archiver know how noisy to be */
722  fout->verbose = g_verbose;
723 
724 
725  /*
726  * We allow the server to be back to 8.0, and up to any minor release of
727  * our own major version. (See also version check in pg_dumpall.c.)
728  */
729  fout->minRemoteVersion = 80000;
730  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
731 
732  fout->numWorkers = numWorkers;
733 
734  /*
735  * Open the database using the Archiver, so it knows about it. Errors mean
736  * death.
737  */
738  ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
739  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
740 
741  /*
742  * Disable security label support if server version < v9.1.x (prevents
743  * access to nonexistent pg_seclabel catalog)
744  */
745  if (fout->remoteVersion < 90100)
746  dopt.no_security_labels = 1;
747 
748  /*
749  * On hot standbys, never try to dump unlogged table data, since it will
750  * just throw an error.
751  */
752  if (fout->isStandby)
753  dopt.no_unlogged_table_data = true;
754 
755  /* Select the appropriate subquery to convert user IDs to names */
756  if (fout->remoteVersion >= 80100)
757  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
758  else
759  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
760 
761  /* check the version for the synchronized snapshots feature */
762  if (numWorkers > 1 && fout->remoteVersion < 90200
763  && !dopt.no_synchronized_snapshots)
764  fatal("Synchronized snapshots are not supported by this server version.\n"
765  "Run with --no-synchronized-snapshots instead if you do not need\n"
766  "synchronized snapshots.");
767 
768  /* check the version when a snapshot is explicitly specified by user */
769  if (dumpsnapshot && fout->remoteVersion < 90200)
770  fatal("Exported snapshots are not supported by this server version.");
771 
772  /*
773  * Find the last built-in OID, if needed (prior to 8.1)
774  *
775  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
776  */
777  if (fout->remoteVersion < 80100)
779  else
781 
782  pg_log_info("last built-in OID is %u", g_last_builtin_oid);
783 
784  /* Expand schema selection patterns into OID lists */
785  if (schema_include_patterns.head != NULL)
786  {
787  expand_schema_name_patterns(fout, &schema_include_patterns,
788  &schema_include_oids,
789  strict_names);
790  if (schema_include_oids.head == NULL)
791  fatal("no matching schemas were found");
792  }
793  expand_schema_name_patterns(fout, &schema_exclude_patterns,
794  &schema_exclude_oids,
795  false);
796  /* non-matching exclusion patterns aren't an error */
797 
798  /* Expand table selection patterns into OID lists */
799  if (table_include_patterns.head != NULL)
800  {
801  expand_table_name_patterns(fout, &table_include_patterns,
802  &table_include_oids,
803  strict_names);
804  if (table_include_oids.head == NULL)
805  fatal("no matching tables were found");
806  }
807  expand_table_name_patterns(fout, &table_exclude_patterns,
808  &table_exclude_oids,
809  false);
810 
811  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
812  &tabledata_exclude_oids,
813  false);
814 
815  /* non-matching exclusion patterns aren't an error */
816 
817  /*
818  * Dumping blobs is the default for dumps where an inclusion switch is not
819  * used (an "include everything" dump). -B can be used to exclude blobs
820  * from those dumps. -b can be used to include blobs even when an
821  * inclusion switch is used.
822  *
823  * -s means "schema only" and blobs are data, not schema, so we never
824  * include blobs when -s is used.
825  */
826  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
827  dopt.outputBlobs = true;
828 
829  /*
830  * Now scan the database and create DumpableObject structs for all the
831  * objects we intend to dump.
832  */
833  tblinfo = getSchemaData(fout, &numTables);
834 
835  if (fout->remoteVersion < 80400)
836  guessConstraintInheritance(tblinfo, numTables);
837 
838  if (!dopt.schemaOnly)
839  {
840  getTableData(&dopt, tblinfo, numTables, 0);
842  if (dopt.dataOnly)
844  }
845 
846  if (dopt.schemaOnly && dopt.sequence_data)
847  getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
848 
849  /*
850  * In binary-upgrade mode, we do not have to worry about the actual blob
851  * data or the associated metadata that resides in the pg_largeobject and
852  * pg_largeobject_metadata tables, respectively.
853  *
854  * However, we do need to collect blob information as there may be
855  * comments or other information on blobs that we do need to dump out.
856  */
857  if (dopt.outputBlobs || dopt.binary_upgrade)
858  getBlobs(fout);
859 
860  /*
861  * Collect dependency data to assist in ordering the objects.
862  */
863  getDependencies(fout);
864 
865  /* Lastly, create dummy objects to represent the section boundaries */
866  boundaryObjs = createBoundaryObjects();
867 
868  /* Get pointers to all the known DumpableObjects */
869  getDumpableObjects(&dobjs, &numObjs);
870 
871  /*
872  * Add dummy dependencies to enforce the dump section ordering.
873  */
874  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
875 
876  /*
877  * Sort the objects into a safe dump order (no forward references).
878  *
879  * We rely on dependency information to help us determine a safe order, so
880  * the initial sort is mostly for cosmetic purposes: we sort by name to
881  * ensure that logically identical schemas will dump identically.
882  */
883  sortDumpableObjectsByTypeName(dobjs, numObjs);
884 
885  sortDumpableObjects(dobjs, numObjs,
886  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
887 
888  /*
889  * Create archive TOC entries for all the objects to be dumped, in a safe
890  * order.
891  */
892 
893  /* First the special ENCODING, STDSTRINGS, and SEARCHPATH entries. */
894  dumpEncoding(fout);
895  dumpStdStrings(fout);
896  dumpSearchPath(fout);
897 
898  /* The database items are always next, unless we don't want them at all */
899  if (dopt.outputCreateDB)
900  dumpDatabase(fout);
901 
902  /* Now the rearrangeable objects. */
903  for (i = 0; i < numObjs; i++)
904  dumpDumpableObject(fout, dobjs[i]);
905 
906  /*
907  * Set up options info to ensure we dump what we want.
908  */
909  ropt = NewRestoreOptions();
910  ropt->filename = filename;
911 
912  /* if you change this list, see dumpOptionsFromRestoreOptions */
913  ropt->dropSchema = dopt.outputClean;
914  ropt->dataOnly = dopt.dataOnly;
915  ropt->schemaOnly = dopt.schemaOnly;
916  ropt->if_exists = dopt.if_exists;
917  ropt->column_inserts = dopt.column_inserts;
918  ropt->dumpSections = dopt.dumpSections;
919  ropt->aclsSkip = dopt.aclsSkip;
920  ropt->superuser = dopt.outputSuperuser;
921  ropt->createDB = dopt.outputCreateDB;
922  ropt->noOwner = dopt.outputNoOwner;
923  ropt->noTablespace = dopt.outputNoTablespaces;
924  ropt->disable_triggers = dopt.disable_triggers;
925  ropt->use_setsessauth = dopt.use_setsessauth;
927  ropt->dump_inserts = dopt.dump_inserts;
928  ropt->no_comments = dopt.no_comments;
929  ropt->no_publications = dopt.no_publications;
931  ropt->no_subscriptions = dopt.no_subscriptions;
932  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
935  ropt->sequence_data = dopt.sequence_data;
936  ropt->binary_upgrade = dopt.binary_upgrade;
937 
938  if (compressLevel == -1)
939  ropt->compression = 0;
940  else
941  ropt->compression = compressLevel;
942 
943  ropt->suppressDumpWarnings = true; /* We've already shown them */
944 
945  SetArchiveOptions(fout, &dopt, ropt);
946 
947  /* Mark which entries should be output */
949 
950  /*
951  * The archive's TOC entries are now marked as to which ones will actually
952  * be output, so we can set up their dependency lists properly. This isn't
953  * necessary for plain-text output, though.
954  */
955  if (!plainText)
957 
958  /*
959  * And finally we can do the actual output.
960  *
961  * Note: for non-plain-text output formats, the output file is written
962  * inside CloseArchive(). This is, um, bizarre; but not worth changing
963  * right now.
964  */
965  if (plainText)
966  RestoreArchive(fout);
967 
968  CloseArchive(fout);
969 
970  exit_nicely(0);
971 }
972 
973 
974 static void
975 help(const char *progname)
976 {
977  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
978  printf(_("Usage:\n"));
979  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
980 
981  printf(_("\nGeneral options:\n"));
982  printf(_(" -f, --file=FILENAME output file or directory name\n"));
983  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
984  " plain text (default))\n"));
985  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
986  printf(_(" -v, --verbose verbose mode\n"));
987  printf(_(" -V, --version output version information, then exit\n"));
988  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
989  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
990  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
991  printf(_(" -?, --help show this help, then exit\n"));
992 
993  printf(_("\nOptions controlling the output content:\n"));
994  printf(_(" -a, --data-only dump only the data, not the schema\n"));
995  printf(_(" -b, --blobs include large objects in dump\n"));
996  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
997  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
998  printf(_(" -C, --create include commands to create database in dump\n"));
999  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
1000  printf(_(" -n, --schema=PATTERN dump the specified schema(s) only\n"));
1001  printf(_(" -N, --exclude-schema=PATTERN do NOT dump the specified schema(s)\n"));
1002  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
1003  " plain-text format\n"));
1004  printf(_(" -s, --schema-only dump only the schema, no data\n"));
1005  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
1006  printf(_(" -t, --table=PATTERN dump the specified table(s) only\n"));
1007  printf(_(" -T, --exclude-table=PATTERN do NOT dump the specified table(s)\n"));
1008  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
1009  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
1010  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
1011  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
1012  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
1013  printf(_(" --enable-row-security enable row security (dump only content user has\n"
1014  " access to)\n"));
1015  printf(_(" --exclude-table-data=PATTERN do NOT dump data for the specified table(s)\n"));
1016  printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
1017  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
1018  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
1019  printf(_(" --load-via-partition-root load partitions via the root table\n"));
1020  printf(_(" --no-comments do not dump comments\n"));
1021  printf(_(" --no-publications do not dump publications\n"));
1022  printf(_(" --no-security-labels do not dump security label assignments\n"));
1023  printf(_(" --no-subscriptions do not dump subscriptions\n"));
1024  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
1025  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
1026  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
1027  printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
1028  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
1029  printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
1030  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
1031  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
1032  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
1033  printf(_(" --strict-names require table and/or schema include patterns to\n"
1034  " match at least one entity each\n"));
1035  printf(_(" --use-set-session-authorization\n"
1036  " use SET SESSION AUTHORIZATION commands instead of\n"
1037  " ALTER OWNER commands to set ownership\n"));
1038 
1039  printf(_("\nConnection options:\n"));
1040  printf(_(" -d, --dbname=DBNAME database to dump\n"));
1041  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
1042  printf(_(" -p, --port=PORT database server port number\n"));
1043  printf(_(" -U, --username=NAME connect as specified database user\n"));
1044  printf(_(" -w, --no-password never prompt for password\n"));
1045  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
1046  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
1047 
1048  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
1049  "variable value is used.\n\n"));
1050  printf(_("Report bugs to <pgsql-bugs@lists.postgresql.org>.\n"));
1051 }
1052 
1053 static void
1054 setup_connection(Archive *AH, const char *dumpencoding,
1055  const char *dumpsnapshot, char *use_role)
1056 {
1057  DumpOptions *dopt = AH->dopt;
1058  PGconn *conn = GetConnection(AH);
1059  const char *std_strings;
1060 
1062 
1063  /*
1064  * Set the client encoding if requested.
1065  */
1066  if (dumpencoding)
1067  {
1068  if (PQsetClientEncoding(conn, dumpencoding) < 0)
1069  fatal("invalid client encoding \"%s\" specified",
1070  dumpencoding);
1071  }
1072 
1073  /*
1074  * Get the active encoding and the standard_conforming_strings setting, so
1075  * we know how to escape strings.
1076  */
1077  AH->encoding = PQclientEncoding(conn);
1078 
1079  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1080  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1081 
1082  /*
1083  * Set the role if requested. In a parallel dump worker, we'll be passed
1084  * use_role == NULL, but AH->use_role is already set (if user specified it
1085  * originally) and we should use that.
1086  */
1087  if (!use_role && AH->use_role)
1088  use_role = AH->use_role;
1089 
1090  /* Set the role if requested */
1091  if (use_role && AH->remoteVersion >= 80100)
1092  {
1093  PQExpBuffer query = createPQExpBuffer();
1094 
1095  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1096  ExecuteSqlStatement(AH, query->data);
1097  destroyPQExpBuffer(query);
1098 
1099  /* save it for possible later use by parallel workers */
1100  if (!AH->use_role)
1101  AH->use_role = pg_strdup(use_role);
1102  }
1103 
1104  /* Set the datestyle to ISO to ensure the dump's portability */
1105  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1106 
1107  /* Likewise, avoid using sql_standard intervalstyle */
1108  if (AH->remoteVersion >= 80400)
1109  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1110 
1111  /*
1112  * Use an explicitly specified extra_float_digits if it has been provided.
1113  * Otherwise, set extra_float_digits so that we can dump float data
1114  * exactly (given correctly implemented float I/O code, anyway).
1115  */
1117  {
1119 
1120  appendPQExpBuffer(q, "SET extra_float_digits TO %d",
1122  ExecuteSqlStatement(AH, q->data);
1123  destroyPQExpBuffer(q);
1124  }
1125  else if (AH->remoteVersion >= 90000)
1126  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1127  else
1128  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1129 
1130  /*
1131  * If synchronized scanning is supported, disable it, to prevent
1132  * unpredictable changes in row ordering across a dump and reload.
1133  */
1134  if (AH->remoteVersion >= 80300)
1135  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1136 
1137  /*
1138  * Disable timeouts if supported.
1139  */
1140  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1141  if (AH->remoteVersion >= 90300)
1142  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1143  if (AH->remoteVersion >= 90600)
1144  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1145 
1146  /*
1147  * Quote all identifiers, if requested.
1148  */
1149  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1150  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1151 
1152  /*
1153  * Adjust row-security mode, if supported.
1154  */
1155  if (AH->remoteVersion >= 90500)
1156  {
1157  if (dopt->enable_row_security)
1158  ExecuteSqlStatement(AH, "SET row_security = on");
1159  else
1160  ExecuteSqlStatement(AH, "SET row_security = off");
1161  }
1162 
1163  /*
1164  * Start transaction-snapshot mode transaction to dump consistent data.
1165  */
1166  ExecuteSqlStatement(AH, "BEGIN");
1167  if (AH->remoteVersion >= 90100)
1168  {
1169  /*
1170  * To support the combination of serializable_deferrable with the jobs
1171  * option we use REPEATABLE READ for the worker connections that are
1172  * passed a snapshot. As long as the snapshot is acquired in a
1173  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1174  * REPEATABLE READ transaction provides the appropriate integrity
1175  * guarantees. This is a kluge, but safe for back-patching.
1176  */
1177  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1179  "SET TRANSACTION ISOLATION LEVEL "
1180  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1181  else
1183  "SET TRANSACTION ISOLATION LEVEL "
1184  "REPEATABLE READ, READ ONLY");
1185  }
1186  else
1187  {
1189  "SET TRANSACTION ISOLATION LEVEL "
1190  "SERIALIZABLE, READ ONLY");
1191  }
1192 
1193  /*
1194  * If user specified a snapshot to use, select that. In a parallel dump
1195  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1196  * is already set (if the server can handle it) and we should use that.
1197  */
1198  if (dumpsnapshot)
1199  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1200 
1201  if (AH->sync_snapshot_id)
1202  {
1203  PQExpBuffer query = createPQExpBuffer();
1204 
1205  appendPQExpBufferStr(query, "SET TRANSACTION SNAPSHOT ");
1206  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1207  ExecuteSqlStatement(AH, query->data);
1208  destroyPQExpBuffer(query);
1209  }
1210  else if (AH->numWorkers > 1 &&
1211  AH->remoteVersion >= 90200 &&
1213  {
1214  if (AH->isStandby && AH->remoteVersion < 100000)
1215  fatal("Synchronized snapshots on standby servers are not supported by this server version.\n"
1216  "Run with --no-synchronized-snapshots instead if you do not need\n"
1217  "synchronized snapshots.");
1218 
1219 
1221  }
1222 }
1223 
1224 /* Set up connection for a parallel worker process */
1225 static void
1227 {
1228  /*
1229  * We want to re-select all the same values the master connection is
1230  * using. We'll have inherited directly-usable values in
1231  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1232  * inherited encoding value back to a string to pass to setup_connection.
1233  */
1234  setup_connection(AH,
1236  NULL,
1237  NULL);
1238 }
1239 
1240 static char *
1242 {
1243  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1244  char *result;
1245  PGresult *res;
1246 
1247  res = ExecuteSqlQueryForSingleRow(fout, query);
1248  result = pg_strdup(PQgetvalue(res, 0, 0));
1249  PQclear(res);
1250 
1251  return result;
1252 }
1253 
1254 static ArchiveFormat
1256 {
1257  ArchiveFormat archiveFormat;
1258 
1259  *mode = archModeWrite;
1260 
1261  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1262  {
1263  /* This is used by pg_dumpall, and is not documented */
1264  archiveFormat = archNull;
1265  *mode = archModeAppend;
1266  }
1267  else if (pg_strcasecmp(format, "c") == 0)
1268  archiveFormat = archCustom;
1269  else if (pg_strcasecmp(format, "custom") == 0)
1270  archiveFormat = archCustom;
1271  else if (pg_strcasecmp(format, "d") == 0)
1272  archiveFormat = archDirectory;
1273  else if (pg_strcasecmp(format, "directory") == 0)
1274  archiveFormat = archDirectory;
1275  else if (pg_strcasecmp(format, "p") == 0)
1276  archiveFormat = archNull;
1277  else if (pg_strcasecmp(format, "plain") == 0)
1278  archiveFormat = archNull;
1279  else if (pg_strcasecmp(format, "t") == 0)
1280  archiveFormat = archTar;
1281  else if (pg_strcasecmp(format, "tar") == 0)
1282  archiveFormat = archTar;
1283  else
1284  fatal("invalid output format \"%s\" specified", format);
1285  return archiveFormat;
1286 }
1287 
1288 /*
1289  * Find the OIDs of all schemas matching the given list of patterns,
1290  * and append them to the given OID list.
1291  */
1292 static void
1294  SimpleStringList *patterns,
1295  SimpleOidList *oids,
1296  bool strict_names)
1297 {
1298  PQExpBuffer query;
1299  PGresult *res;
1300  SimpleStringListCell *cell;
1301  int i;
1302 
1303  if (patterns->head == NULL)
1304  return; /* nothing to do */
1305 
1306  query = createPQExpBuffer();
1307 
1308  /*
1309  * The loop below runs multiple SELECTs might sometimes result in
1310  * duplicate entries in the OID list, but we don't care.
1311  */
1312 
1313  for (cell = patterns->head; cell; cell = cell->next)
1314  {
1315  appendPQExpBufferStr(query,
1316  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1317  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1318  false, NULL, "n.nspname", NULL, NULL);
1319 
1320  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1321  if (strict_names && PQntuples(res) == 0)
1322  fatal("no matching schemas were found for pattern \"%s\"", cell->val);
1323 
1324  for (i = 0; i < PQntuples(res); i++)
1325  {
1326  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1327  }
1328 
1329  PQclear(res);
1330  resetPQExpBuffer(query);
1331  }
1332 
1333  destroyPQExpBuffer(query);
1334 }
1335 
1336 /*
1337  * Find the OIDs of all tables matching the given list of patterns,
1338  * and append them to the given OID list. See also expand_dbname_patterns()
1339  * in pg_dumpall.c
1340  */
1341 static void
1343  SimpleStringList *patterns, SimpleOidList *oids,
1344  bool strict_names)
1345 {
1346  PQExpBuffer query;
1347  PGresult *res;
1348  SimpleStringListCell *cell;
1349  int i;
1350 
1351  if (patterns->head == NULL)
1352  return; /* nothing to do */
1353 
1354  query = createPQExpBuffer();
1355 
1356  /*
1357  * this might sometimes result in duplicate entries in the OID list, but
1358  * we don't care.
1359  */
1360 
1361  for (cell = patterns->head; cell; cell = cell->next)
1362  {
1363  /*
1364  * Query must remain ABSOLUTELY devoid of unqualified names. This
1365  * would be unnecessary given a pg_table_is_visible() variant taking a
1366  * search_path argument.
1367  */
1368  appendPQExpBuffer(query,
1369  "SELECT c.oid"
1370  "\nFROM pg_catalog.pg_class c"
1371  "\n LEFT JOIN pg_catalog.pg_namespace n"
1372  "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1373  "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1374  "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1375  RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
1376  RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
1377  RELKIND_PARTITIONED_TABLE);
1378  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1379  false, "n.nspname", "c.relname", NULL,
1380  "pg_catalog.pg_table_is_visible(c.oid)");
1381 
1382  ExecuteSqlStatement(fout, "RESET search_path");
1383  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1386  if (strict_names && PQntuples(res) == 0)
1387  fatal("no matching tables were found for pattern \"%s\"", cell->val);
1388 
1389  for (i = 0; i < PQntuples(res); i++)
1390  {
1391  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1392  }
1393 
1394  PQclear(res);
1395  resetPQExpBuffer(query);
1396  }
1397 
1398  destroyPQExpBuffer(query);
1399 }
1400 
1401 /*
1402  * checkExtensionMembership
1403  * Determine whether object is an extension member, and if so,
1404  * record an appropriate dependency and set the object's dump flag.
1405  *
1406  * It's important to call this for each object that could be an extension
1407  * member. Generally, we integrate this with determining the object's
1408  * to-be-dumped-ness, since extension membership overrides other rules for that.
1409  *
1410  * Returns true if object is an extension member, else false.
1411  */
1412 static bool
1414 {
1415  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1416 
1417  if (ext == NULL)
1418  return false;
1419 
1420  dobj->ext_member = true;
1421 
1422  /* Record dependency so that getDependencies needn't deal with that */
1423  addObjectDependency(dobj, ext->dobj.dumpId);
1424 
1425  /*
1426  * In 9.6 and above, mark the member object to have any non-initial ACL,
1427  * policies, and security labels dumped.
1428  *
1429  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1430  * extract the information about the object. We don't provide support for
1431  * initial policies and security labels and it seems unlikely for those to
1432  * ever exist, but we may have to revisit this later.
1433  *
1434  * Prior to 9.6, we do not include any extension member components.
1435  *
1436  * In binary upgrades, we still dump all components of the members
1437  * individually, since the idea is to exactly reproduce the database
1438  * contents rather than replace the extension contents with something
1439  * different.
1440  */
1441  if (fout->dopt->binary_upgrade)
1442  dobj->dump = ext->dobj.dump;
1443  else
1444  {
1445  if (fout->remoteVersion < 90600)
1446  dobj->dump = DUMP_COMPONENT_NONE;
1447  else
1448  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1451  }
1452 
1453  return true;
1454 }
1455 
1456 /*
1457  * selectDumpableNamespace: policy-setting subroutine
1458  * Mark a namespace as to be dumped or not
1459  */
1460 static void
1462 {
1463  /*
1464  * If specific tables are being dumped, do not dump any complete
1465  * namespaces. If specific namespaces are being dumped, dump just those
1466  * namespaces. Otherwise, dump all non-system namespaces.
1467  */
1468  if (table_include_oids.head != NULL)
1469  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1470  else if (schema_include_oids.head != NULL)
1471  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1472  simple_oid_list_member(&schema_include_oids,
1473  nsinfo->dobj.catId.oid) ?
1475  else if (fout->remoteVersion >= 90600 &&
1476  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1477  {
1478  /*
1479  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1480  * they are interesting (and not the original ACLs which were set at
1481  * initdb time, see pg_init_privs).
1482  */
1483  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1484  }
1485  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1486  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1487  {
1488  /* Other system schemas don't get dumped */
1489  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1490  }
1491  else if (strcmp(nsinfo->dobj.name, "public") == 0)
1492  {
1493  /*
1494  * The public schema is a strange beast that sits in a sort of
1495  * no-mans-land between being a system object and a user object. We
1496  * don't want to dump creation or comment commands for it, because
1497  * that complicates matters for non-superuser use of pg_dump. But we
1498  * should dump any ACL changes that have occurred for it, and of
1499  * course we should dump contained objects.
1500  */
1501  nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1503  }
1504  else
1505  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1506 
1507  /*
1508  * In any case, a namespace can be excluded by an exclusion switch
1509  */
1510  if (nsinfo->dobj.dump_contains &&
1511  simple_oid_list_member(&schema_exclude_oids,
1512  nsinfo->dobj.catId.oid))
1513  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1514 
1515  /*
1516  * If the schema belongs to an extension, allow extension membership to
1517  * override the dump decision for the schema itself. However, this does
1518  * not change dump_contains, so this won't change what we do with objects
1519  * within the schema. (If they belong to the extension, they'll get
1520  * suppressed by it, otherwise not.)
1521  */
1522  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1523 }
1524 
1525 /*
1526  * selectDumpableTable: policy-setting subroutine
1527  * Mark a table as to be dumped or not
1528  */
1529 static void
1531 {
1532  if (checkExtensionMembership(&tbinfo->dobj, fout))
1533  return; /* extension membership overrides all else */
1534 
1535  /*
1536  * If specific tables are being dumped, dump just those tables; else, dump
1537  * according to the parent namespace's dump flag.
1538  */
1539  if (table_include_oids.head != NULL)
1540  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1541  tbinfo->dobj.catId.oid) ?
1543  else
1544  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1545 
1546  /*
1547  * In any case, a table can be excluded by an exclusion switch
1548  */
1549  if (tbinfo->dobj.dump &&
1550  simple_oid_list_member(&table_exclude_oids,
1551  tbinfo->dobj.catId.oid))
1552  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1553 }
1554 
1555 /*
1556  * selectDumpableType: policy-setting subroutine
1557  * Mark a type as to be dumped or not
1558  *
1559  * If it's a table's rowtype or an autogenerated array type, we also apply a
1560  * special type code to facilitate sorting into the desired order. (We don't
1561  * want to consider those to be ordinary types because that would bring tables
1562  * up into the datatype part of the dump order.) We still set the object's
1563  * dump flag; that's not going to cause the dummy type to be dumped, but we
1564  * need it so that casts involving such types will be dumped correctly -- see
1565  * dumpCast. This means the flag should be set the same as for the underlying
1566  * object (the table or base type).
1567  */
1568 static void
1570 {
1571  /* skip complex types, except for standalone composite types */
1572  if (OidIsValid(tyinfo->typrelid) &&
1573  tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
1574  {
1575  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1576 
1577  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1578  if (tytable != NULL)
1579  tyinfo->dobj.dump = tytable->dobj.dump;
1580  else
1581  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1582  return;
1583  }
1584 
1585  /* skip auto-generated array types */
1586  if (tyinfo->isArray)
1587  {
1588  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1589 
1590  /*
1591  * Fall through to set the dump flag; we assume that the subsequent
1592  * rules will do the same thing as they would for the array's base
1593  * type. (We cannot reliably look up the base type here, since
1594  * getTypes may not have processed it yet.)
1595  */
1596  }
1597 
1598  if (checkExtensionMembership(&tyinfo->dobj, fout))
1599  return; /* extension membership overrides all else */
1600 
1601  /* Dump based on if the contents of the namespace are being dumped */
1602  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1603 }
1604 
1605 /*
1606  * selectDumpableDefaultACL: policy-setting subroutine
1607  * Mark a default ACL as to be dumped or not
1608  *
1609  * For per-schema default ACLs, dump if the schema is to be dumped.
1610  * Otherwise dump if we are dumping "everything". Note that dataOnly
1611  * and aclsSkip are checked separately.
1612  */
1613 static void
1615 {
1616  /* Default ACLs can't be extension members */
1617 
1618  if (dinfo->dobj.namespace)
1619  /* default ACLs are considered part of the namespace */
1620  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1621  else
1622  dinfo->dobj.dump = dopt->include_everything ?
1624 }
1625 
1626 /*
1627  * selectDumpableCast: policy-setting subroutine
1628  * Mark a cast as to be dumped or not
1629  *
1630  * Casts do not belong to any particular namespace (since they haven't got
1631  * names), nor do they have identifiable owners. To distinguish user-defined
1632  * casts from built-in ones, we must resort to checking whether the cast's
1633  * OID is in the range reserved for initdb.
1634  */
1635 static void
1637 {
1638  if (checkExtensionMembership(&cast->dobj, fout))
1639  return; /* extension membership overrides all else */
1640 
1641  /*
1642  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1643  * support ACLs currently.
1644  */
1645  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1646  cast->dobj.dump = DUMP_COMPONENT_NONE;
1647  else
1648  cast->dobj.dump = fout->dopt->include_everything ?
1650 }
1651 
1652 /*
1653  * selectDumpableProcLang: policy-setting subroutine
1654  * Mark a procedural language as to be dumped or not
1655  *
1656  * Procedural languages do not belong to any particular namespace. To
1657  * identify built-in languages, we must resort to checking whether the
1658  * language's OID is in the range reserved for initdb.
1659  */
1660 static void
1662 {
1663  if (checkExtensionMembership(&plang->dobj, fout))
1664  return; /* extension membership overrides all else */
1665 
1666  /*
1667  * Only include procedural languages when we are dumping everything.
1668  *
1669  * For from-initdb procedural languages, only include ACLs, as we do for
1670  * the pg_catalog namespace. We need this because procedural languages do
1671  * not live in any namespace.
1672  */
1673  if (!fout->dopt->include_everything)
1674  plang->dobj.dump = DUMP_COMPONENT_NONE;
1675  else
1676  {
1677  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1678  plang->dobj.dump = fout->remoteVersion < 90600 ?
1680  else
1681  plang->dobj.dump = DUMP_COMPONENT_ALL;
1682  }
1683 }
1684 
1685 /*
1686  * selectDumpableAccessMethod: policy-setting subroutine
1687  * Mark an access method as to be dumped or not
1688  *
1689  * Access methods do not belong to any particular namespace. To identify
1690  * built-in access methods, we must resort to checking whether the
1691  * method's OID is in the range reserved for initdb.
1692  */
1693 static void
1695 {
1696  if (checkExtensionMembership(&method->dobj, fout))
1697  return; /* extension membership overrides all else */
1698 
1699  /*
1700  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1701  * they do not support ACLs currently.
1702  */
1703  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1704  method->dobj.dump = DUMP_COMPONENT_NONE;
1705  else
1706  method->dobj.dump = fout->dopt->include_everything ?
1708 }
1709 
1710 /*
1711  * selectDumpableExtension: policy-setting subroutine
1712  * Mark an extension as to be dumped or not
1713  *
1714  * Built-in extensions should be skipped except for checking ACLs, since we
1715  * assume those will already be installed in the target database. We identify
1716  * such extensions by their having OIDs in the range reserved for initdb.
1717  * We dump all user-added extensions by default, or none of them if
1718  * include_everything is false (i.e., a --schema or --table switch was given).
1719  */
1720 static void
1722 {
1723  /*
1724  * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
1725  * change permissions on their member objects, if they wish to, and have
1726  * those changes preserved.
1727  */
1728  if (extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1729  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1730  else
1731  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1734 }
1735 
1736 /*
1737  * selectDumpablePublicationTable: policy-setting subroutine
1738  * Mark a publication table as to be dumped or not
1739  *
1740  * Publication tables have schemas, but those are ignored in decision making,
1741  * because publications are only dumped when we are dumping everything.
1742  */
1743 static void
1745 {
1746  if (checkExtensionMembership(dobj, fout))
1747  return; /* extension membership overrides all else */
1748 
1749  dobj->dump = fout->dopt->include_everything ?
1751 }
1752 
1753 /*
1754  * selectDumpableObject: policy-setting subroutine
1755  * Mark a generic dumpable object as to be dumped or not
1756  *
1757  * Use this only for object types without a special-case routine above.
1758  */
1759 static void
1761 {
1762  if (checkExtensionMembership(dobj, fout))
1763  return; /* extension membership overrides all else */
1764 
1765  /*
1766  * Default policy is to dump if parent namespace is dumpable, or for
1767  * non-namespace-associated items, dump if we're dumping "everything".
1768  */
1769  if (dobj->namespace)
1770  dobj->dump = dobj->namespace->dobj.dump_contains;
1771  else
1772  dobj->dump = fout->dopt->include_everything ?
1774 }
1775 
1776 /*
1777  * Dump a table's contents for loading using the COPY command
1778  * - this routine is called by the Archiver when it wants the table
1779  * to be dumped.
1780  */
1781 
1782 static int
1783 dumpTableData_copy(Archive *fout, void *dcontext)
1784 {
1785  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1786  TableInfo *tbinfo = tdinfo->tdtable;
1787  const char *classname = tbinfo->dobj.name;
1789 
1790  /*
1791  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1792  * which uses it already.
1793  */
1794  PQExpBuffer clistBuf = createPQExpBuffer();
1795  PGconn *conn = GetConnection(fout);
1796  PGresult *res;
1797  int ret;
1798  char *copybuf;
1799  const char *column_list;
1800 
1801  pg_log_info("dumping contents of table \"%s.%s\"",
1802  tbinfo->dobj.namespace->dobj.name, classname);
1803 
1804  /*
1805  * Specify the column list explicitly so that we have no possibility of
1806  * retrieving data in the wrong column order. (The default column
1807  * ordering of COPY will not be what we want in certain corner cases
1808  * involving ADD COLUMN and inheritance.)
1809  */
1810  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1811 
1812  if (tdinfo->filtercond)
1813  {
1814  /* Note: this syntax is only supported in 8.2 and up */
1815  appendPQExpBufferStr(q, "COPY (SELECT ");
1816  /* klugery to get rid of parens in column list */
1817  if (strlen(column_list) > 2)
1818  {
1819  appendPQExpBufferStr(q, column_list + 1);
1820  q->data[q->len - 1] = ' ';
1821  }
1822  else
1823  appendPQExpBufferStr(q, "* ");
1824  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1825  fmtQualifiedDumpable(tbinfo),
1826  tdinfo->filtercond);
1827  }
1828  else
1829  {
1830  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1831  fmtQualifiedDumpable(tbinfo),
1832  column_list);
1833  }
1834  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1835  PQclear(res);
1836  destroyPQExpBuffer(clistBuf);
1837 
1838  for (;;)
1839  {
1840  ret = PQgetCopyData(conn, &copybuf, 0);
1841 
1842  if (ret < 0)
1843  break; /* done or error */
1844 
1845  if (copybuf)
1846  {
1847  WriteData(fout, copybuf, ret);
1848  PQfreemem(copybuf);
1849  }
1850 
1851  /* ----------
1852  * THROTTLE:
1853  *
1854  * There was considerable discussion in late July, 2000 regarding
1855  * slowing down pg_dump when backing up large tables. Users with both
1856  * slow & fast (multi-processor) machines experienced performance
1857  * degradation when doing a backup.
1858  *
1859  * Initial attempts based on sleeping for a number of ms for each ms
1860  * of work were deemed too complex, then a simple 'sleep in each loop'
1861  * implementation was suggested. The latter failed because the loop
1862  * was too tight. Finally, the following was implemented:
1863  *
1864  * If throttle is non-zero, then
1865  * See how long since the last sleep.
1866  * Work out how long to sleep (based on ratio).
1867  * If sleep is more than 100ms, then
1868  * sleep
1869  * reset timer
1870  * EndIf
1871  * EndIf
1872  *
1873  * where the throttle value was the number of ms to sleep per ms of
1874  * work. The calculation was done in each loop.
1875  *
1876  * Most of the hard work is done in the backend, and this solution
1877  * still did not work particularly well: on slow machines, the ratio
1878  * was 50:1, and on medium paced machines, 1:1, and on fast
1879  * multi-processor machines, it had little or no effect, for reasons
1880  * that were unclear.
1881  *
1882  * Further discussion ensued, and the proposal was dropped.
1883  *
1884  * For those people who want this feature, it can be implemented using
1885  * gettimeofday in each loop, calculating the time since last sleep,
1886  * multiplying that by the sleep ratio, then if the result is more
1887  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1888  * function to sleep for a subsecond period ie.
1889  *
1890  * select(0, NULL, NULL, NULL, &tvi);
1891  *
1892  * This will return after the interval specified in the structure tvi.
1893  * Finally, call gettimeofday again to save the 'last sleep time'.
1894  * ----------
1895  */
1896  }
1897  archprintf(fout, "\\.\n\n\n");
1898 
1899  if (ret == -2)
1900  {
1901  /* copy data transfer failed */
1902  pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
1903  pg_log_error("Error message from server: %s", PQerrorMessage(conn));
1904  pg_log_error("The command was: %s", q->data);
1905  exit_nicely(1);
1906  }
1907 
1908  /* Check command status and return to normal libpq state */
1909  res = PQgetResult(conn);
1910  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1911  {
1912  pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
1913  pg_log_error("Error message from server: %s", PQerrorMessage(conn));
1914  pg_log_error("The command was: %s", q->data);
1915  exit_nicely(1);
1916  }
1917  PQclear(res);
1918 
1919  /* Do this to ensure we've pumped libpq back to idle state */
1920  if (PQgetResult(conn) != NULL)
1921  pg_log_warning("unexpected extra results during COPY of table \"%s\"",
1922  classname);
1923 
1924  destroyPQExpBuffer(q);
1925  return 1;
1926 }
1927 
1928 /*
1929  * Dump table data using INSERT commands.
1930  *
1931  * Caution: when we restore from an archive file direct to database, the
1932  * INSERT commands emitted by this function have to be parsed by
1933  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
1934  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
1935  */
1936 static int
1937 dumpTableData_insert(Archive *fout, void *dcontext)
1938 {
1939  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1940  TableInfo *tbinfo = tdinfo->tdtable;
1941  DumpOptions *dopt = fout->dopt;
1943  PQExpBuffer insertStmt = NULL;
1944  PGresult *res;
1945  int nfields;
1946  int rows_per_statement = dopt->dump_inserts;
1947  int rows_this_statement = 0;
1948 
1949  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
1950  "SELECT * FROM ONLY %s",
1951  fmtQualifiedDumpable(tbinfo));
1952  if (tdinfo->filtercond)
1953  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
1954 
1955  ExecuteSqlStatement(fout, q->data);
1956 
1957  while (1)
1958  {
1959  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
1960  PGRES_TUPLES_OK);
1961  nfields = PQnfields(res);
1962 
1963  /*
1964  * First time through, we build as much of the INSERT statement as
1965  * possible in "insertStmt", which we can then just print for each
1966  * statement. If the table happens to have zero columns then this will
1967  * be a complete statement, otherwise it will end in "VALUES" and be
1968  * ready to have the row's column values printed.
1969  */
1970  if (insertStmt == NULL)
1971  {
1972  TableInfo *targettab;
1973 
1974  insertStmt = createPQExpBuffer();
1975 
1976  /*
1977  * When load-via-partition-root is set, get the root table name
1978  * for the partition table, so that we can reload data through the
1979  * root table.
1980  */
1981  if (dopt->load_via_partition_root && tbinfo->ispartition)
1982  targettab = getRootTableInfo(tbinfo);
1983  else
1984  targettab = tbinfo;
1985 
1986  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
1987  fmtQualifiedDumpable(targettab));
1988 
1989  /* corner case for zero-column table */
1990  if (nfields == 0)
1991  {
1992  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
1993  }
1994  else
1995  {
1996  /* append the list of column names if required */
1997  if (dopt->column_inserts)
1998  {
1999  appendPQExpBufferChar(insertStmt, '(');
2000  for (int field = 0; field < nfields; field++)
2001  {
2002  if (field > 0)
2003  appendPQExpBufferStr(insertStmt, ", ");
2004  appendPQExpBufferStr(insertStmt,
2005  fmtId(PQfname(res, field)));
2006  }
2007  appendPQExpBufferStr(insertStmt, ") ");
2008  }
2009 
2010  if (tbinfo->needs_override)
2011  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
2012 
2013  appendPQExpBufferStr(insertStmt, "VALUES");
2014  }
2015  }
2016 
2017  for (int tuple = 0; tuple < PQntuples(res); tuple++)
2018  {
2019  /* Write the INSERT if not in the middle of a multi-row INSERT. */
2020  if (rows_this_statement == 0)
2021  archputs(insertStmt->data, fout);
2022 
2023  /*
2024  * If it is zero-column table then we've already written the
2025  * complete statement, which will mean we've disobeyed
2026  * --rows-per-insert when it's set greater than 1. We do support
2027  * a way to make this multi-row with: SELECT UNION ALL SELECT
2028  * UNION ALL ... but that's non-standard so we should avoid it
2029  * given that using INSERTs is mostly only ever needed for
2030  * cross-database exports.
2031  */
2032  if (nfields == 0)
2033  continue;
2034 
2035  /* Emit a row heading */
2036  if (rows_per_statement == 1)
2037  archputs(" (", fout);
2038  else if (rows_this_statement > 0)
2039  archputs(",\n\t(", fout);
2040  else
2041  archputs("\n\t(", fout);
2042 
2043  for (int field = 0; field < nfields; field++)
2044  {
2045  if (field > 0)
2046  archputs(", ", fout);
2047  if (tbinfo->attgenerated[field])
2048  {
2049  archputs("DEFAULT", fout);
2050  continue;
2051  }
2052  if (PQgetisnull(res, tuple, field))
2053  {
2054  archputs("NULL", fout);
2055  continue;
2056  }
2057 
2058  /* XXX This code is partially duplicated in ruleutils.c */
2059  switch (PQftype(res, field))
2060  {
2061  case INT2OID:
2062  case INT4OID:
2063  case INT8OID:
2064  case OIDOID:
2065  case FLOAT4OID:
2066  case FLOAT8OID:
2067  case NUMERICOID:
2068  {
2069  /*
2070  * These types are printed without quotes unless
2071  * they contain values that aren't accepted by the
2072  * scanner unquoted (e.g., 'NaN'). Note that
2073  * strtod() and friends might accept NaN, so we
2074  * can't use that to test.
2075  *
2076  * In reality we only need to defend against
2077  * infinity and NaN, so we need not get too crazy
2078  * about pattern matching here.
2079  */
2080  const char *s = PQgetvalue(res, tuple, field);
2081 
2082  if (strspn(s, "0123456789 +-eE.") == strlen(s))
2083  archputs(s, fout);
2084  else
2085  archprintf(fout, "'%s'", s);
2086  }
2087  break;
2088 
2089  case BITOID:
2090  case VARBITOID:
2091  archprintf(fout, "B'%s'",
2092  PQgetvalue(res, tuple, field));
2093  break;
2094 
2095  case BOOLOID:
2096  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2097  archputs("true", fout);
2098  else
2099  archputs("false", fout);
2100  break;
2101 
2102  default:
2103  /* All other types are printed as string literals. */
2104  resetPQExpBuffer(q);
2106  PQgetvalue(res, tuple, field),
2107  fout);
2108  archputs(q->data, fout);
2109  break;
2110  }
2111  }
2112 
2113  /* Terminate the row ... */
2114  archputs(")", fout);
2115 
2116  /* ... and the statement, if the target no. of rows is reached */
2117  if (++rows_this_statement >= rows_per_statement)
2118  {
2119  if (dopt->do_nothing)
2120  archputs(" ON CONFLICT DO NOTHING;\n", fout);
2121  else
2122  archputs(";\n", fout);
2123  /* Reset the row counter */
2124  rows_this_statement = 0;
2125  }
2126  }
2127 
2128  if (PQntuples(res) <= 0)
2129  {
2130  PQclear(res);
2131  break;
2132  }
2133  PQclear(res);
2134  }
2135 
2136  /* Terminate any statements that didn't make the row count. */
2137  if (rows_this_statement > 0)
2138  {
2139  if (dopt->do_nothing)
2140  archputs(" ON CONFLICT DO NOTHING;\n", fout);
2141  else
2142  archputs(";\n", fout);
2143  }
2144 
2145  archputs("\n\n", fout);
2146 
2147  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2148 
2149  destroyPQExpBuffer(q);
2150  if (insertStmt != NULL)
2151  destroyPQExpBuffer(insertStmt);
2152 
2153  return 1;
2154 }
2155 
2156 /*
2157  * getRootTableInfo:
2158  * get the root TableInfo for the given partition table.
2159  */
2160 static TableInfo *
2162 {
2163  TableInfo *parentTbinfo;
2164 
2165  Assert(tbinfo->ispartition);
2166  Assert(tbinfo->numParents == 1);
2167 
2168  parentTbinfo = tbinfo->parents[0];
2169  while (parentTbinfo->ispartition)
2170  {
2171  Assert(parentTbinfo->numParents == 1);
2172  parentTbinfo = parentTbinfo->parents[0];
2173  }
2174 
2175  return parentTbinfo;
2176 }
2177 
2178 /*
2179  * dumpTableData -
2180  * dump the contents of a single table
2181  *
2182  * Actually, this just makes an ArchiveEntry for the table contents.
2183  */
2184 static void
2186 {
2187  DumpOptions *dopt = fout->dopt;
2188  TableInfo *tbinfo = tdinfo->tdtable;
2189  PQExpBuffer copyBuf = createPQExpBuffer();
2190  PQExpBuffer clistBuf = createPQExpBuffer();
2191  DataDumperPtr dumpFn;
2192  char *copyStmt;
2193  const char *copyFrom;
2194 
2195  if (!dopt->dump_inserts)
2196  {
2197  /* Dump/restore using COPY */
2198  dumpFn = dumpTableData_copy;
2199 
2200  /*
2201  * When load-via-partition-root is set, get the root table name for
2202  * the partition table, so that we can reload data through the root
2203  * table.
2204  */
2205  if (dopt->load_via_partition_root && tbinfo->ispartition)
2206  {
2207  TableInfo *parentTbinfo;
2208 
2209  parentTbinfo = getRootTableInfo(tbinfo);
2210  copyFrom = fmtQualifiedDumpable(parentTbinfo);
2211  }
2212  else
2213  copyFrom = fmtQualifiedDumpable(tbinfo);
2214 
2215  /* must use 2 steps here 'cause fmtId is nonreentrant */
2216  appendPQExpBuffer(copyBuf, "COPY %s ",
2217  copyFrom);
2218  appendPQExpBuffer(copyBuf, "%s FROM stdin;\n",
2219  fmtCopyColumnList(tbinfo, clistBuf));
2220  copyStmt = copyBuf->data;
2221  }
2222  else
2223  {
2224  /* Restore using INSERT */
2225  dumpFn = dumpTableData_insert;
2226  copyStmt = NULL;
2227  }
2228 
2229  /*
2230  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2231  * dependency on its table as "special" and pass it to ArchiveEntry now.
2232  * See comments for BuildArchiveDependencies.
2233  */
2234  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2235  {
2236  TocEntry *te;
2237 
2238  te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2239  ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2240  .namespace = tbinfo->dobj.namespace->dobj.name,
2241  .owner = tbinfo->rolname,
2242  .description = "TABLE DATA",
2243  .section = SECTION_DATA,
2244  .copyStmt = copyStmt,
2245  .deps = &(tbinfo->dobj.dumpId),
2246  .nDeps = 1,
2247  .dumpFn = dumpFn,
2248  .dumpArg = tdinfo));
2249 
2250  /*
2251  * Set the TocEntry's dataLength in case we are doing a parallel dump
2252  * and want to order dump jobs by table size. We choose to measure
2253  * dataLength in table pages during dump, so no scaling is needed.
2254  * However, relpages is declared as "integer" in pg_class, and hence
2255  * also in TableInfo, but it's really BlockNumber a/k/a unsigned int.
2256  * Cast so that we get the right interpretation of table sizes
2257  * exceeding INT_MAX pages.
2258  */
2259  te->dataLength = (BlockNumber) tbinfo->relpages;
2260  }
2261 
2262  destroyPQExpBuffer(copyBuf);
2263  destroyPQExpBuffer(clistBuf);
2264 }
2265 
2266 /*
2267  * refreshMatViewData -
2268  * load or refresh the contents of a single materialized view
2269  *
2270  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2271  * statement.
2272  */
2273 static void
2275 {
2276  TableInfo *tbinfo = tdinfo->tdtable;
2277  PQExpBuffer q;
2278 
2279  /* If the materialized view is not flagged as populated, skip this. */
2280  if (!tbinfo->relispopulated)
2281  return;
2282 
2283  q = createPQExpBuffer();
2284 
2285  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2286  fmtQualifiedDumpable(tbinfo));
2287 
2288  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2289  ArchiveEntry(fout,
2290  tdinfo->dobj.catId, /* catalog ID */
2291  tdinfo->dobj.dumpId, /* dump ID */
2292  ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2293  .namespace = tbinfo->dobj.namespace->dobj.name,
2294  .owner = tbinfo->rolname,
2295  .description = "MATERIALIZED VIEW DATA",
2296  .section = SECTION_POST_DATA,
2297  .createStmt = q->data,
2298  .deps = tdinfo->dobj.dependencies,
2299  .nDeps = tdinfo->dobj.nDeps));
2300 
2301  destroyPQExpBuffer(q);
2302 }
2303 
2304 /*
2305  * getTableData -
2306  * set up dumpable objects representing the contents of tables
2307  */
2308 static void
2310 {
2311  int i;
2312 
2313  for (i = 0; i < numTables; i++)
2314  {
2315  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2316  (!relkind || tblinfo[i].relkind == relkind))
2317  makeTableDataInfo(dopt, &(tblinfo[i]));
2318  }
2319 }
2320 
2321 /*
2322  * Make a dumpable object for the data of this specific table
2323  *
2324  * Note: we make a TableDataInfo if and only if we are going to dump the
2325  * table data; the "dump" flag in such objects isn't used.
2326  */
2327 static void
2329 {
2330  TableDataInfo *tdinfo;
2331 
2332  /*
2333  * Nothing to do if we already decided to dump the table. This will
2334  * happen for "config" tables.
2335  */
2336  if (tbinfo->dataObj != NULL)
2337  return;
2338 
2339  /* Skip VIEWs (no data to dump) */
2340  if (tbinfo->relkind == RELKIND_VIEW)
2341  return;
2342  /* Skip FOREIGN TABLEs (no data to dump) */
2343  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2344  return;
2345  /* Skip partitioned tables (data in partitions) */
2346  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2347  return;
2348 
2349  /* Don't dump data in unlogged tables, if so requested */
2350  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2351  dopt->no_unlogged_table_data)
2352  return;
2353 
2354  /* Check that the data is not explicitly excluded */
2355  if (simple_oid_list_member(&tabledata_exclude_oids,
2356  tbinfo->dobj.catId.oid))
2357  return;
2358 
2359  /* OK, let's dump it */
2360  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2361 
2362  if (tbinfo->relkind == RELKIND_MATVIEW)
2363  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2364  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2365  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2366  else
2367  tdinfo->dobj.objType = DO_TABLE_DATA;
2368 
2369  /*
2370  * Note: use tableoid 0 so that this object won't be mistaken for
2371  * something that pg_depend entries apply to.
2372  */
2373  tdinfo->dobj.catId.tableoid = 0;
2374  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2375  AssignDumpId(&tdinfo->dobj);
2376  tdinfo->dobj.name = tbinfo->dobj.name;
2377  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2378  tdinfo->tdtable = tbinfo;
2379  tdinfo->filtercond = NULL; /* might get set later */
2380  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2381 
2382  tbinfo->dataObj = tdinfo;
2383 }
2384 
2385 /*
2386  * The refresh for a materialized view must be dependent on the refresh for
2387  * any materialized view that this one is dependent on.
2388  *
2389  * This must be called after all the objects are created, but before they are
2390  * sorted.
2391  */
2392 static void
2394 {
2395  PQExpBuffer query;
2396  PGresult *res;
2397  int ntups,
2398  i;
2399  int i_classid,
2400  i_objid,
2401  i_refobjid;
2402 
2403  /* No Mat Views before 9.3. */
2404  if (fout->remoteVersion < 90300)
2405  return;
2406 
2407  query = createPQExpBuffer();
2408 
2409  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2410  "( "
2411  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2412  "FROM pg_depend d1 "
2413  "JOIN pg_class c1 ON c1.oid = d1.objid "
2414  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2415  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2416  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2417  "AND d2.objid = r1.oid "
2418  "AND d2.refobjid <> d1.objid "
2419  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2420  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2421  CppAsString2(RELKIND_VIEW) ") "
2422  "WHERE d1.classid = 'pg_class'::regclass "
2423  "UNION "
2424  "SELECT w.objid, d3.refobjid, c3.relkind "
2425  "FROM w "
2426  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2427  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2428  "AND d3.objid = r3.oid "
2429  "AND d3.refobjid <> w.refobjid "
2430  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2431  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2432  CppAsString2(RELKIND_VIEW) ") "
2433  ") "
2434  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2435  "FROM w "
2436  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2437 
2438  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2439 
2440  ntups = PQntuples(res);
2441 
2442  i_classid = PQfnumber(res, "classid");
2443  i_objid = PQfnumber(res, "objid");
2444  i_refobjid = PQfnumber(res, "refobjid");
2445 
2446  for (i = 0; i < ntups; i++)
2447  {
2448  CatalogId objId;
2449  CatalogId refobjId;
2450  DumpableObject *dobj;
2451  DumpableObject *refdobj;
2452  TableInfo *tbinfo;
2453  TableInfo *reftbinfo;
2454 
2455  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2456  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2457  refobjId.tableoid = objId.tableoid;
2458  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2459 
2460  dobj = findObjectByCatalogId(objId);
2461  if (dobj == NULL)
2462  continue;
2463 
2464  Assert(dobj->objType == DO_TABLE);
2465  tbinfo = (TableInfo *) dobj;
2466  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2467  dobj = (DumpableObject *) tbinfo->dataObj;
2468  if (dobj == NULL)
2469  continue;
2470  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2471 
2472  refdobj = findObjectByCatalogId(refobjId);
2473  if (refdobj == NULL)
2474  continue;
2475 
2476  Assert(refdobj->objType == DO_TABLE);
2477  reftbinfo = (TableInfo *) refdobj;
2478  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2479  refdobj = (DumpableObject *) reftbinfo->dataObj;
2480  if (refdobj == NULL)
2481  continue;
2482  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2483 
2484  addObjectDependency(dobj, refdobj->dumpId);
2485 
2486  if (!reftbinfo->relispopulated)
2487  tbinfo->relispopulated = false;
2488  }
2489 
2490  PQclear(res);
2491 
2492  destroyPQExpBuffer(query);
2493 }
2494 
2495 /*
2496  * getTableDataFKConstraints -
2497  * add dump-order dependencies reflecting foreign key constraints
2498  *
2499  * This code is executed only in a data-only dump --- in schema+data dumps
2500  * we handle foreign key issues by not creating the FK constraints until
2501  * after the data is loaded. In a data-only dump, however, we want to
2502  * order the table data objects in such a way that a table's referenced
2503  * tables are restored first. (In the presence of circular references or
2504  * self-references this may be impossible; we'll detect and complain about
2505  * that during the dependency sorting step.)
2506  */
2507 static void
2509 {
2510  DumpableObject **dobjs;
2511  int numObjs;
2512  int i;
2513 
2514  /* Search through all the dumpable objects for FK constraints */
2515  getDumpableObjects(&dobjs, &numObjs);
2516  for (i = 0; i < numObjs; i++)
2517  {
2518  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2519  {
2520  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2521  TableInfo *ftable;
2522 
2523  /* Not interesting unless both tables are to be dumped */
2524  if (cinfo->contable == NULL ||
2525  cinfo->contable->dataObj == NULL)
2526  continue;
2527  ftable = findTableByOid(cinfo->confrelid);
2528  if (ftable == NULL ||
2529  ftable->dataObj == NULL)
2530  continue;
2531 
2532  /*
2533  * Okay, make referencing table's TABLE_DATA object depend on the
2534  * referenced table's TABLE_DATA object.
2535  */
2537  ftable->dataObj->dobj.dumpId);
2538  }
2539  }
2540  free(dobjs);
2541 }
2542 
2543 
2544 /*
2545  * guessConstraintInheritance:
2546  * In pre-8.4 databases, we can't tell for certain which constraints
2547  * are inherited. We assume a CHECK constraint is inherited if its name
2548  * matches the name of any constraint in the parent. Originally this code
2549  * tried to compare the expression texts, but that can fail for various
2550  * reasons --- for example, if the parent and child tables are in different
2551  * schemas, reverse-listing of function calls may produce different text
2552  * (schema-qualified or not) depending on search path.
2553  *
2554  * In 8.4 and up we can rely on the conislocal field to decide which
2555  * constraints must be dumped; much safer.
2556  *
2557  * This function assumes all conislocal flags were initialized to true.
2558  * It clears the flag on anything that seems to be inherited.
2559  */
2560 static void
2562 {
2563  int i,
2564  j,
2565  k;
2566 
2567  for (i = 0; i < numTables; i++)
2568  {
2569  TableInfo *tbinfo = &(tblinfo[i]);
2570  int numParents;
2571  TableInfo **parents;
2572  TableInfo *parent;
2573 
2574  /* Sequences and views never have parents */
2575  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2576  tbinfo->relkind == RELKIND_VIEW)
2577  continue;
2578 
2579  /* Don't bother computing anything for non-target tables, either */
2580  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2581  continue;
2582 
2583  numParents = tbinfo->numParents;
2584  parents = tbinfo->parents;
2585 
2586  if (numParents == 0)
2587  continue; /* nothing to see here, move along */
2588 
2589  /* scan for inherited CHECK constraints */
2590  for (j = 0; j < tbinfo->ncheck; j++)
2591  {
2592  ConstraintInfo *constr;
2593 
2594  constr = &(tbinfo->checkexprs[j]);
2595 
2596  for (k = 0; k < numParents; k++)
2597  {
2598  int l;
2599 
2600  parent = parents[k];
2601  for (l = 0; l < parent->ncheck; l++)
2602  {
2603  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2604 
2605  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2606  {
2607  constr->conislocal = false;
2608  break;
2609  }
2610  }
2611  if (!constr->conislocal)
2612  break;
2613  }
2614  }
2615  }
2616 }
2617 
2618 
2619 /*
2620  * dumpDatabase:
2621  * dump the database definition
2622  */
2623 static void
2625 {
2626  DumpOptions *dopt = fout->dopt;
2627  PQExpBuffer dbQry = createPQExpBuffer();
2628  PQExpBuffer delQry = createPQExpBuffer();
2629  PQExpBuffer creaQry = createPQExpBuffer();
2630  PQExpBuffer labelq = createPQExpBuffer();
2631  PGconn *conn = GetConnection(fout);
2632  PGresult *res;
2633  int i_tableoid,
2634  i_oid,
2635  i_datname,
2636  i_dba,
2637  i_encoding,
2638  i_collate,
2639  i_ctype,
2640  i_frozenxid,
2641  i_minmxid,
2642  i_datacl,
2643  i_rdatacl,
2644  i_datistemplate,
2645  i_datconnlimit,
2646  i_tablespace;
2647  CatalogId dbCatId;
2648  DumpId dbDumpId;
2649  const char *datname,
2650  *dba,
2651  *encoding,
2652  *collate,
2653  *ctype,
2654  *datacl,
2655  *rdatacl,
2656  *datistemplate,
2657  *datconnlimit,
2658  *tablespace;
2659  uint32 frozenxid,
2660  minmxid;
2661  char *qdatname;
2662 
2663  pg_log_info("saving database definition");
2664 
2665  /*
2666  * Fetch the database-level properties for this database.
2667  *
2668  * The order in which privileges are in the ACL string (the order they
2669  * have been GRANT'd in, which the backend maintains) must be preserved to
2670  * ensure that GRANTs WITH GRANT OPTION and subsequent GRANTs based on
2671  * those are dumped in the correct order. Note that initial privileges
2672  * (pg_init_privs) are not supported on databases, so this logic cannot
2673  * make use of buildACLQueries().
2674  */
2675  if (fout->remoteVersion >= 90600)
2676  {
2677  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2678  "(%s datdba) AS dba, "
2679  "pg_encoding_to_char(encoding) AS encoding, "
2680  "datcollate, datctype, datfrozenxid, datminmxid, "
2681  "(SELECT array_agg(acl ORDER BY row_n) FROM "
2682  " (SELECT acl, row_n FROM "
2683  " unnest(coalesce(datacl,acldefault('d',datdba))) "
2684  " WITH ORDINALITY AS perm(acl,row_n) "
2685  " WHERE NOT EXISTS ( "
2686  " SELECT 1 "
2687  " FROM unnest(acldefault('d',datdba)) "
2688  " AS init(init_acl) "
2689  " WHERE acl = init_acl)) AS datacls) "
2690  " AS datacl, "
2691  "(SELECT array_agg(acl ORDER BY row_n) FROM "
2692  " (SELECT acl, row_n FROM "
2693  " unnest(acldefault('d',datdba)) "
2694  " WITH ORDINALITY AS initp(acl,row_n) "
2695  " WHERE NOT EXISTS ( "
2696  " SELECT 1 "
2697  " FROM unnest(coalesce(datacl,acldefault('d',datdba))) "
2698  " AS permp(orig_acl) "
2699  " WHERE acl = orig_acl)) AS rdatacls) "
2700  " AS rdatacl, "
2701  "datistemplate, datconnlimit, "
2702  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2703  "shobj_description(oid, 'pg_database') AS description "
2704 
2705  "FROM pg_database "
2706  "WHERE datname = current_database()",
2708  }
2709  else if (fout->remoteVersion >= 90300)
2710  {
2711  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2712  "(%s datdba) AS dba, "
2713  "pg_encoding_to_char(encoding) AS encoding, "
2714  "datcollate, datctype, datfrozenxid, datminmxid, "
2715  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2716  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2717  "shobj_description(oid, 'pg_database') AS description "
2718 
2719  "FROM pg_database "
2720  "WHERE datname = current_database()",
2722  }
2723  else if (fout->remoteVersion >= 80400)
2724  {
2725  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2726  "(%s datdba) AS dba, "
2727  "pg_encoding_to_char(encoding) AS encoding, "
2728  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2729  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2730  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2731  "shobj_description(oid, 'pg_database') AS description "
2732 
2733  "FROM pg_database "
2734  "WHERE datname = current_database()",
2736  }
2737  else if (fout->remoteVersion >= 80200)
2738  {
2739  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2740  "(%s datdba) AS dba, "
2741  "pg_encoding_to_char(encoding) AS encoding, "
2742  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2743  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2744  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2745  "shobj_description(oid, 'pg_database') AS description "
2746 
2747  "FROM pg_database "
2748  "WHERE datname = current_database()",
2750  }
2751  else
2752  {
2753  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2754  "(%s datdba) AS dba, "
2755  "pg_encoding_to_char(encoding) AS encoding, "
2756  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2757  "datacl, '' as rdatacl, datistemplate, "
2758  "-1 as datconnlimit, "
2759  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2760  "FROM pg_database "
2761  "WHERE datname = current_database()",
2763  }
2764 
2765  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2766 
2767  i_tableoid = PQfnumber(res, "tableoid");
2768  i_oid = PQfnumber(res, "oid");
2769  i_datname = PQfnumber(res, "datname");
2770  i_dba = PQfnumber(res, "dba");
2771  i_encoding = PQfnumber(res, "encoding");
2772  i_collate = PQfnumber(res, "datcollate");
2773  i_ctype = PQfnumber(res, "datctype");
2774  i_frozenxid = PQfnumber(res, "datfrozenxid");
2775  i_minmxid = PQfnumber(res, "datminmxid");
2776  i_datacl = PQfnumber(res, "datacl");
2777  i_rdatacl = PQfnumber(res, "rdatacl");
2778  i_datistemplate = PQfnumber(res, "datistemplate");
2779  i_datconnlimit = PQfnumber(res, "datconnlimit");
2780  i_tablespace = PQfnumber(res, "tablespace");
2781 
2782  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2783  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2784  datname = PQgetvalue(res, 0, i_datname);
2785  dba = PQgetvalue(res, 0, i_dba);
2786  encoding = PQgetvalue(res, 0, i_encoding);
2787  collate = PQgetvalue(res, 0, i_collate);
2788  ctype = PQgetvalue(res, 0, i_ctype);
2789  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2790  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2791  datacl = PQgetvalue(res, 0, i_datacl);
2792  rdatacl = PQgetvalue(res, 0, i_rdatacl);
2793  datistemplate = PQgetvalue(res, 0, i_datistemplate);
2794  datconnlimit = PQgetvalue(res, 0, i_datconnlimit);
2795  tablespace = PQgetvalue(res, 0, i_tablespace);
2796 
2797  qdatname = pg_strdup(fmtId(datname));
2798 
2799  /*
2800  * Prepare the CREATE DATABASE command. We must specify encoding, locale,
2801  * and tablespace since those can't be altered later. Other DB properties
2802  * are left to the DATABASE PROPERTIES entry, so that they can be applied
2803  * after reconnecting to the target DB.
2804  */
2805  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2806  qdatname);
2807  if (strlen(encoding) > 0)
2808  {
2809  appendPQExpBufferStr(creaQry, " ENCODING = ");
2810  appendStringLiteralAH(creaQry, encoding, fout);
2811  }
2812  if (strlen(collate) > 0 && strcmp(collate, ctype) == 0)
2813  {
2814  appendPQExpBufferStr(creaQry, " LOCALE = ");
2815  appendStringLiteralAH(creaQry, collate, fout);
2816  }
2817  else
2818  {
2819  if (strlen(collate) > 0)
2820  {
2821  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2822  appendStringLiteralAH(creaQry, collate, fout);
2823  }
2824  if (strlen(ctype) > 0)
2825  {
2826  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2827  appendStringLiteralAH(creaQry, ctype, fout);
2828  }
2829  }
2830 
2831  /*
2832  * Note: looking at dopt->outputNoTablespaces here is completely the wrong
2833  * thing; the decision whether to specify a tablespace should be left till
2834  * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
2835  * label the DATABASE entry with the tablespace and let the normal
2836  * tablespace selection logic work ... but CREATE DATABASE doesn't pay
2837  * attention to default_tablespace, so that won't work.
2838  */
2839  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2840  !dopt->outputNoTablespaces)
2841  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2842  fmtId(tablespace));
2843  appendPQExpBufferStr(creaQry, ";\n");
2844 
2845  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2846  qdatname);
2847 
2848  dbDumpId = createDumpId();
2849 
2850  ArchiveEntry(fout,
2851  dbCatId, /* catalog ID */
2852  dbDumpId, /* dump ID */
2853  ARCHIVE_OPTS(.tag = datname,
2854  .owner = dba,
2855  .description = "DATABASE",
2856  .section = SECTION_PRE_DATA,
2857  .createStmt = creaQry->data,
2858  .dropStmt = delQry->data));
2859 
2860  /* Compute correct tag for archive entry */
2861  appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
2862 
2863  /* Dump DB comment if any */
2864  if (fout->remoteVersion >= 80200)
2865  {
2866  /*
2867  * 8.2 and up keep comments on shared objects in a shared table, so we
2868  * cannot use the dumpComment() code used for other database objects.
2869  * Be careful that the ArchiveEntry parameters match that function.
2870  */
2871  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2872 
2873  if (comment && *comment && !dopt->no_comments)
2874  {
2875  resetPQExpBuffer(dbQry);
2876 
2877  /*
2878  * Generates warning when loaded into a differently-named
2879  * database.
2880  */
2881  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
2882  appendStringLiteralAH(dbQry, comment, fout);
2883  appendPQExpBufferStr(dbQry, ";\n");
2884 
2885  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2886  ARCHIVE_OPTS(.tag = labelq->data,
2887  .owner = dba,
2888  .description = "COMMENT",
2889  .section = SECTION_NONE,
2890  .createStmt = dbQry->data,
2891  .deps = &dbDumpId,
2892  .nDeps = 1));
2893  }
2894  }
2895  else
2896  {
2897  dumpComment(fout, "DATABASE", qdatname, NULL, dba,
2898  dbCatId, 0, dbDumpId);
2899  }
2900 
2901  /* Dump DB security label, if enabled */
2902  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2903  {
2904  PGresult *shres;
2905  PQExpBuffer seclabelQry;
2906 
2907  seclabelQry = createPQExpBuffer();
2908 
2909  buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry);
2910  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2911  resetPQExpBuffer(seclabelQry);
2912  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2913  if (seclabelQry->len > 0)
2914  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2915  ARCHIVE_OPTS(.tag = labelq->data,
2916  .owner = dba,
2917  .description = "SECURITY LABEL",
2918  .section = SECTION_NONE,
2919  .createStmt = seclabelQry->data,
2920  .deps = &dbDumpId,
2921  .nDeps = 1));
2922  destroyPQExpBuffer(seclabelQry);
2923  PQclear(shres);
2924  }
2925 
2926  /*
2927  * Dump ACL if any. Note that we do not support initial privileges
2928  * (pg_init_privs) on databases.
2929  */
2930  dumpACL(fout, dbCatId, dbDumpId, "DATABASE",
2931  qdatname, NULL, NULL,
2932  dba, datacl, rdatacl, "", "");
2933 
2934  /*
2935  * Now construct a DATABASE PROPERTIES archive entry to restore any
2936  * non-default database-level properties. (The reason this must be
2937  * separate is that we cannot put any additional commands into the TOC
2938  * entry that has CREATE DATABASE. pg_restore would execute such a group
2939  * in an implicit transaction block, and the backend won't allow CREATE
2940  * DATABASE in that context.)
2941  */
2942  resetPQExpBuffer(creaQry);
2943  resetPQExpBuffer(delQry);
2944 
2945  if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
2946  appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
2947  qdatname, datconnlimit);
2948 
2949  if (strcmp(datistemplate, "t") == 0)
2950  {
2951  appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
2952  qdatname);
2953 
2954  /*
2955  * The backend won't accept DROP DATABASE on a template database. We
2956  * can deal with that by removing the template marking before the DROP
2957  * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
2958  * since no such command is currently supported, fake it with a direct
2959  * UPDATE on pg_database.
2960  */
2961  appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
2962  "SET datistemplate = false WHERE datname = ");
2963  appendStringLiteralAH(delQry, datname, fout);
2964  appendPQExpBufferStr(delQry, ";\n");
2965  }
2966 
2967  /* Add database-specific SET options */
2968  dumpDatabaseConfig(fout, creaQry, datname, dbCatId.oid);
2969 
2970  /*
2971  * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
2972  * entry, too, for lack of a better place.
2973  */
2974  if (dopt->binary_upgrade)
2975  {
2976  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
2977  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
2978  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
2979  "WHERE datname = ",
2980  frozenxid, minmxid);
2981  appendStringLiteralAH(creaQry, datname, fout);
2982  appendPQExpBufferStr(creaQry, ";\n");
2983  }
2984 
2985  if (creaQry->len > 0)
2986  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2987  ARCHIVE_OPTS(.tag = datname,
2988  .owner = dba,
2989  .description = "DATABASE PROPERTIES",
2990  .section = SECTION_PRE_DATA,
2991  .createStmt = creaQry->data,
2992  .dropStmt = delQry->data,
2993  .deps = &dbDumpId));
2994 
2995  /*
2996  * pg_largeobject comes from the old system intact, so set its
2997  * relfrozenxids and relminmxids.
2998  */
2999  if (dopt->binary_upgrade)
3000  {
3001  PGresult *lo_res;
3002  PQExpBuffer loFrozenQry = createPQExpBuffer();
3003  PQExpBuffer loOutQry = createPQExpBuffer();
3004  int i_relfrozenxid,
3005  i_relminmxid;
3006 
3007  /*
3008  * pg_largeobject
3009  */
3010  if (fout->remoteVersion >= 90300)
3011  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
3012  "FROM pg_catalog.pg_class\n"
3013  "WHERE oid = %u;\n",
3014  LargeObjectRelationId);
3015  else
3016  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
3017  "FROM pg_catalog.pg_class\n"
3018  "WHERE oid = %u;\n",
3019  LargeObjectRelationId);
3020 
3021  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
3022 
3023  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
3024  i_relminmxid = PQfnumber(lo_res, "relminmxid");
3025 
3026  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
3027  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
3028  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
3029  "WHERE oid = %u;\n",
3030  atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
3031  atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
3032  LargeObjectRelationId);
3033  ArchiveEntry(fout, nilCatalogId, createDumpId(),
3034  ARCHIVE_OPTS(.tag = "pg_largeobject",
3035  .description = "pg_largeobject",
3036  .section = SECTION_PRE_DATA,
3037  .createStmt = loOutQry->data));
3038 
3039  PQclear(lo_res);
3040 
3041  destroyPQExpBuffer(loFrozenQry);
3042  destroyPQExpBuffer(loOutQry);
3043  }
3044 
3045  PQclear(res);
3046 
3047  free(qdatname);
3048  destroyPQExpBuffer(dbQry);
3049  destroyPQExpBuffer(delQry);
3050  destroyPQExpBuffer(creaQry);
3051  destroyPQExpBuffer(labelq);
3052 }
3053 
3054 /*
3055  * Collect any database-specific or role-and-database-specific SET options
3056  * for this database, and append them to outbuf.
3057  */
3058 static void
3060  const char *dbname, Oid dboid)
3061 {
3062  PGconn *conn = GetConnection(AH);
3064  PGresult *res;
3065  int count = 1;
3066 
3067  /*
3068  * First collect database-specific options. Pre-8.4 server versions lack
3069  * unnest(), so we do this the hard way by querying once per subscript.
3070  */
3071  for (;;)
3072  {
3073  if (AH->remoteVersion >= 90000)
3074  printfPQExpBuffer(buf, "SELECT setconfig[%d] FROM pg_db_role_setting "
3075  "WHERE setrole = 0 AND setdatabase = '%u'::oid",
3076  count, dboid);
3077  else
3078  printfPQExpBuffer(buf, "SELECT datconfig[%d] FROM pg_database WHERE oid = '%u'::oid", count, dboid);
3079 
3080  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3081 
3082  if (PQntuples(res) == 1 &&
3083  !PQgetisnull(res, 0, 0))
3084  {
3085  makeAlterConfigCommand(conn, PQgetvalue(res, 0, 0),
3086  "DATABASE", dbname, NULL, NULL,
3087  outbuf);
3088  PQclear(res);
3089  count++;
3090  }
3091  else
3092  {
3093  PQclear(res);
3094  break;
3095  }
3096  }
3097 
3098  /* Now look for role-and-database-specific options */
3099  if (AH->remoteVersion >= 90000)
3100  {
3101  /* Here we can assume we have unnest() */
3102  printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
3103  "FROM pg_db_role_setting s, pg_roles r "
3104  "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
3105  dboid);
3106 
3107  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3108 
3109  if (PQntuples(res) > 0)
3110  {
3111  int i;
3112 
3113  for (i = 0; i < PQntuples(res); i++)
3114  makeAlterConfigCommand(conn, PQgetvalue(res, i, 1),
3115  "ROLE", PQgetvalue(res, i, 0),
3116  "DATABASE", dbname,
3117  outbuf);
3118  }
3119 
3120  PQclear(res);
3121  }
3122 
3123  destroyPQExpBuffer(buf);
3124 }
3125 
3126 /*
3127  * dumpEncoding: put the correct encoding into the archive
3128  */
3129 static void
3131 {
3132  const char *encname = pg_encoding_to_char(AH->encoding);
3134 
3135  pg_log_info("saving encoding = %s", encname);
3136 
3137  appendPQExpBufferStr(qry, "SET client_encoding = ");
3138  appendStringLiteralAH(qry, encname, AH);
3139  appendPQExpBufferStr(qry, ";\n");
3140 
3141  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3142  ARCHIVE_OPTS(.tag = "ENCODING",
3143  .description = "ENCODING",
3144  .section = SECTION_PRE_DATA,
3145  .createStmt = qry->data));
3146 
3147  destroyPQExpBuffer(qry);
3148 }
3149 
3150 
3151 /*
3152  * dumpStdStrings: put the correct escape string behavior into the archive
3153  */
3154 static void
3156 {
3157  const char *stdstrings = AH->std_strings ? "on" : "off";
3159 
3160  pg_log_info("saving standard_conforming_strings = %s",
3161  stdstrings);
3162 
3163  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3164  stdstrings);
3165 
3166  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3167  ARCHIVE_OPTS(.tag = "STDSTRINGS",
3168  .description = "STDSTRINGS",
3169  .section = SECTION_PRE_DATA,
3170  .createStmt = qry->data));
3171 
3172  destroyPQExpBuffer(qry);
3173 }
3174 
3175 /*
3176  * dumpSearchPath: record the active search_path in the archive
3177  */
3178 static void
3180 {
3182  PQExpBuffer path = createPQExpBuffer();
3183  PGresult *res;
3184  char **schemanames = NULL;
3185  int nschemanames = 0;
3186  int i;
3187 
3188  /*
3189  * We use the result of current_schemas(), not the search_path GUC,
3190  * because that might contain wildcards such as "$user", which won't
3191  * necessarily have the same value during restore. Also, this way avoids
3192  * listing schemas that may appear in search_path but not actually exist,
3193  * which seems like a prudent exclusion.
3194  */
3195  res = ExecuteSqlQueryForSingleRow(AH,
3196  "SELECT pg_catalog.current_schemas(false)");
3197 
3198  if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3199  fatal("could not parse result of current_schemas()");
3200 
3201  /*
3202  * We use set_config(), not a simple "SET search_path" command, because
3203  * the latter has less-clean behavior if the search path is empty. While
3204  * that's likely to get fixed at some point, it seems like a good idea to
3205  * be as backwards-compatible as possible in what we put into archives.
3206  */
3207  for (i = 0; i < nschemanames; i++)
3208  {
3209  if (i > 0)
3210  appendPQExpBufferStr(path, ", ");
3211  appendPQExpBufferStr(path, fmtId(schemanames[i]));
3212  }
3213 
3214  appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3215  appendStringLiteralAH(qry, path->data, AH);
3216  appendPQExpBufferStr(qry, ", false);\n");
3217 
3218  pg_log_info("saving search_path = %s", path->data);
3219 
3220  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3221  ARCHIVE_OPTS(.tag = "SEARCHPATH",
3222  .description = "SEARCHPATH",
3223  .section = SECTION_PRE_DATA,
3224  .createStmt = qry->data));
3225 
3226  /* Also save it in AH->searchpath, in case we're doing plain text dump */
3227  AH->searchpath = pg_strdup(qry->data);
3228 
3229  if (schemanames)
3230  free(schemanames);
3231  PQclear(res);
3232  destroyPQExpBuffer(qry);
3233  destroyPQExpBuffer(path);
3234 }
3235 
3236 
3237 /*
3238  * getBlobs:
3239  * Collect schema-level data about large objects
3240  */
3241 static void
3243 {
3244  DumpOptions *dopt = fout->dopt;
3245  PQExpBuffer blobQry = createPQExpBuffer();
3246  BlobInfo *binfo;
3247  DumpableObject *bdata;
3248  PGresult *res;
3249  int ntups;
3250  int i;
3251  int i_oid;
3252  int i_lomowner;
3253  int i_lomacl;
3254  int i_rlomacl;
3255  int i_initlomacl;
3256  int i_initrlomacl;
3257 
3258  pg_log_info("reading large objects");
3259 
3260  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
3261  if (fout->remoteVersion >= 90600)
3262  {
3263  PQExpBuffer acl_subquery = createPQExpBuffer();
3264  PQExpBuffer racl_subquery = createPQExpBuffer();
3265  PQExpBuffer init_acl_subquery = createPQExpBuffer();
3266  PQExpBuffer init_racl_subquery = createPQExpBuffer();
3267 
3268  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
3269  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
3270  dopt->binary_upgrade);
3271 
3272  appendPQExpBuffer(blobQry,
3273  "SELECT l.oid, (%s l.lomowner) AS rolname, "
3274  "%s AS lomacl, "
3275  "%s AS rlomacl, "
3276  "%s AS initlomacl, "
3277  "%s AS initrlomacl "
3278  "FROM pg_largeobject_metadata l "
3279  "LEFT JOIN pg_init_privs pip ON "
3280  "(l.oid = pip.objoid "
3281  "AND pip.classoid = 'pg_largeobject'::regclass "
3282  "AND pip.objsubid = 0) ",
3284  acl_subquery->data,
3285  racl_subquery->data,
3286  init_acl_subquery->data,
3287  init_racl_subquery->data);
3288 
3289  destroyPQExpBuffer(acl_subquery);
3290  destroyPQExpBuffer(racl_subquery);
3291  destroyPQExpBuffer(init_acl_subquery);
3292  destroyPQExpBuffer(init_racl_subquery);
3293  }
3294  else if (fout->remoteVersion >= 90000)
3295  appendPQExpBuffer(blobQry,
3296  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
3297  "NULL AS rlomacl, NULL AS initlomacl, "
3298  "NULL AS initrlomacl "
3299  " FROM pg_largeobject_metadata",
3301  else
3302  appendPQExpBufferStr(blobQry,
3303  "SELECT DISTINCT loid AS oid, "
3304  "NULL::name AS rolname, NULL::oid AS lomacl, "
3305  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
3306  "NULL::oid AS initrlomacl "
3307  " FROM pg_largeobject");
3308 
3309  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
3310 
3311  i_oid = PQfnumber(res, "oid");
3312  i_lomowner = PQfnumber(res, "rolname");
3313  i_lomacl = PQfnumber(res, "lomacl");
3314  i_rlomacl = PQfnumber(res, "rlomacl");
3315  i_initlomacl = PQfnumber(res, "initlomacl");
3316  i_initrlomacl = PQfnumber(res, "initrlomacl");
3317 
3318  ntups = PQntuples(res);
3319 
3320  /*
3321  * Each large object has its own BLOB archive entry.
3322  */
3323  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
3324 
3325  for (i = 0; i < ntups; i++)
3326  {
3327  binfo[i].dobj.objType = DO_BLOB;
3328  binfo[i].dobj.catId.tableoid = LargeObjectRelationId;
3329  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3330  AssignDumpId(&binfo[i].dobj);
3331 
3332  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
3333  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
3334  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
3335  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
3336  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
3337  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
3338 
3339  if (PQgetisnull(res, i, i_lomacl) &&
3340  PQgetisnull(res, i, i_rlomacl) &&
3341  PQgetisnull(res, i, i_initlomacl) &&
3342  PQgetisnull(res, i, i_initrlomacl))
3343  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
3344 
3345  /*
3346  * In binary-upgrade mode for blobs, we do *not* dump out the blob
3347  * data, as it will be copied by pg_upgrade, which simply copies the
3348  * pg_largeobject table. We *do* however dump out anything but the
3349  * data, as pg_upgrade copies just pg_largeobject, but not
3350  * pg_largeobject_metadata, after the dump is restored.
3351  */
3352  if (dopt->binary_upgrade)
3353  binfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
3354  }
3355 
3356  /*
3357  * If we have any large objects, a "BLOBS" archive entry is needed. This
3358  * is just a placeholder for sorting; it carries no data now.
3359  */
3360  if (ntups > 0)
3361  {
3362  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
3363  bdata->objType = DO_BLOB_DATA;
3364  bdata->catId = nilCatalogId;
3365  AssignDumpId(bdata);
3366  bdata->name = pg_strdup("BLOBS");
3367  }
3368 
3369  PQclear(res);
3370  destroyPQExpBuffer(blobQry);
3371 }
3372 
3373 /*
3374  * dumpBlob
3375  *
3376  * dump the definition (metadata) of the given large object
3377  */
3378 static void
3379 dumpBlob(Archive *fout, BlobInfo *binfo)
3380 {
3381  PQExpBuffer cquery = createPQExpBuffer();
3382  PQExpBuffer dquery = createPQExpBuffer();
3383 
3384  appendPQExpBuffer(cquery,
3385  "SELECT pg_catalog.lo_create('%s');\n",
3386  binfo->dobj.name);
3387 
3388  appendPQExpBuffer(dquery,
3389  "SELECT pg_catalog.lo_unlink('%s');\n",
3390  binfo->dobj.name);
3391 
3392  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
3393  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
3394  ARCHIVE_OPTS(.tag = binfo->dobj.name,
3395  .owner = binfo->rolname,
3396  .description = "BLOB",
3397  .section = SECTION_PRE_DATA,
3398  .createStmt = cquery->data,
3399  .dropStmt = dquery->data));
3400 
3401  /* Dump comment if any */
3402  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3403  dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
3404  NULL, binfo->rolname,
3405  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3406 
3407  /* Dump security label if any */
3408  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3409  dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
3410  NULL, binfo->rolname,
3411  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3412 
3413  /* Dump ACL if any */
3414  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3415  dumpACL(fout, binfo->dobj.catId, binfo->dobj.dumpId, "LARGE OBJECT",
3416  binfo->dobj.name, NULL,
3417  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3418  binfo->initblobacl, binfo->initrblobacl);
3419 
3420  destroyPQExpBuffer(cquery);
3421  destroyPQExpBuffer(dquery);
3422 }
3423 
3424 /*
3425  * dumpBlobs:
3426  * dump the data contents of all large objects
3427  */
3428 static int
3429 dumpBlobs(Archive *fout, void *arg)
3430 {
3431  const char *blobQry;
3432  const char *blobFetchQry;
3433  PGconn *conn = GetConnection(fout);
3434  PGresult *res;
3435  char buf[LOBBUFSIZE];
3436  int ntups;
3437  int i;
3438  int cnt;
3439 
3440  pg_log_info("saving large objects");
3441 
3442  /*
3443  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3444  * the already-in-memory dumpable objects instead...
3445  */
3446  if (fout->remoteVersion >= 90000)
3447  blobQry =
3448  "DECLARE bloboid CURSOR FOR "
3449  "SELECT oid FROM pg_largeobject_metadata ORDER BY 1";
3450  else
3451  blobQry =
3452  "DECLARE bloboid CURSOR FOR "
3453  "SELECT DISTINCT loid FROM pg_largeobject ORDER BY 1";
3454 
3455  ExecuteSqlStatement(fout, blobQry);
3456 
3457  /* Command to fetch from cursor */
3458  blobFetchQry = "FETCH 1000 IN bloboid";
3459 
3460  do
3461  {
3462  /* Do a fetch */
3463  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3464 
3465  /* Process the tuples, if any */
3466  ntups = PQntuples(res);
3467  for (i = 0; i < ntups; i++)
3468  {
3469  Oid blobOid;
3470  int loFd;
3471 
3472  blobOid = atooid(PQgetvalue(res, i, 0));
3473  /* Open the BLOB */
3474  loFd = lo_open(conn, blobOid, INV_READ);
3475  if (loFd == -1)
3476  fatal("could not open large object %u: %s",
3477  blobOid, PQerrorMessage(conn));
3478 
3479  StartBlob(fout, blobOid);
3480 
3481  /* Now read it in chunks, sending data to archive */
3482  do
3483  {
3484  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3485  if (cnt < 0)
3486  fatal("error reading large object %u: %s",
3487  blobOid, PQerrorMessage(conn));
3488 
3489  WriteData(fout, buf, cnt);
3490  } while (cnt > 0);
3491 
3492  lo_close(conn, loFd);
3493 
3494  EndBlob(fout, blobOid);
3495  }
3496 
3497  PQclear(res);
3498  } while (ntups > 0);
3499 
3500  return 1;
3501 }
3502 
3503 /*
3504  * getPolicies
3505  * get information about policies on a dumpable table.
3506  */
3507 void
3508 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3509 {
3510  PQExpBuffer query;
3511  PGresult *res;
3512  PolicyInfo *polinfo;
3513  int i_oid;
3514  int i_tableoid;
3515  int i_polname;
3516  int i_polcmd;
3517  int i_polpermissive;
3518  int i_polroles;
3519  int i_polqual;
3520  int i_polwithcheck;
3521  int i,
3522  j,
3523  ntups;
3524 
3525  if (fout->remoteVersion < 90500)
3526  return;
3527 
3528  query = createPQExpBuffer();
3529 
3530  for (i = 0; i < numTables; i++)
3531  {
3532  TableInfo *tbinfo = &tblinfo[i];
3533 
3534  /* Ignore row security on tables not to be dumped */
3535  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3536  continue;
3537 
3538  pg_log_info("reading row security enabled for table \"%s.%s\"",
3539  tbinfo->dobj.namespace->dobj.name,
3540  tbinfo->dobj.name);
3541 
3542  /*
3543  * Get row security enabled information for the table. We represent
3544  * RLS being enabled on a table by creating a PolicyInfo object with
3545  * null polname.
3546  */
3547  if (tbinfo->rowsec)
3548  {
3549  /*
3550  * Note: use tableoid 0 so that this object won't be mistaken for
3551  * something that pg_depend entries apply to.
3552  */
3553  polinfo = pg_malloc(sizeof(PolicyInfo));
3554  polinfo->dobj.objType = DO_POLICY;
3555  polinfo->dobj.catId.tableoid = 0;
3556  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3557  AssignDumpId(&polinfo->dobj);
3558  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3559  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3560  polinfo->poltable = tbinfo;
3561  polinfo->polname = NULL;
3562  polinfo->polcmd = '\0';
3563  polinfo->polpermissive = 0;
3564  polinfo->polroles = NULL;
3565  polinfo->polqual = NULL;
3566  polinfo->polwithcheck = NULL;
3567  }
3568 
3569  pg_log_info("reading policies for table \"%s.%s\"",
3570  tbinfo->dobj.namespace->dobj.name,
3571  tbinfo->dobj.name);
3572 
3573  resetPQExpBuffer(query);
3574 
3575  /* Get the policies for the table. */
3576  if (fout->remoteVersion >= 100000)
3577  appendPQExpBuffer(query,
3578  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3579  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3580  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3581  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3582  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3583  "FROM pg_catalog.pg_policy pol "
3584  "WHERE polrelid = '%u'",
3585  tbinfo->dobj.catId.oid);
3586  else
3587  appendPQExpBuffer(query,
3588  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3589  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3590  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3591  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3592  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3593  "FROM pg_catalog.pg_policy pol "
3594  "WHERE polrelid = '%u'",
3595  tbinfo->dobj.catId.oid);
3596  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3597 
3598  ntups = PQntuples(res);
3599 
3600  if (ntups == 0)
3601  {
3602  /*
3603  * No explicit policies to handle (only the default-deny policy,
3604  * which is handled as part of the table definition). Clean up
3605  * and return.
3606  */
3607  PQclear(res);
3608  continue;
3609  }
3610 
3611  i_oid = PQfnumber(res, "oid");
3612  i_tableoid = PQfnumber(res, "tableoid");
3613  i_polname = PQfnumber(res, "polname");
3614  i_polcmd = PQfnumber(res, "polcmd");
3615  i_polpermissive = PQfnumber(res, "polpermissive");
3616  i_polroles = PQfnumber(res, "polroles");
3617  i_polqual = PQfnumber(res, "polqual");
3618  i_polwithcheck = PQfnumber(res, "polwithcheck");
3619 
3620  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3621 
3622  for (j = 0; j < ntups; j++)
3623  {
3624  polinfo[j].dobj.objType = DO_POLICY;
3625  polinfo[j].dobj.catId.tableoid =
3626  atooid(PQgetvalue(res, j, i_tableoid));
3627  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3628  AssignDumpId(&polinfo[j].dobj);
3629  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3630  polinfo[j].poltable = tbinfo;
3631  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3632  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3633 
3634  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3635  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3636 
3637  if (PQgetisnull(res, j, i_polroles))
3638  polinfo[j].polroles = NULL;
3639  else
3640  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3641 
3642  if (PQgetisnull(res, j, i_polqual))
3643  polinfo[j].polqual = NULL;
3644  else
3645  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3646 
3647  if (PQgetisnull(res, j, i_polwithcheck))
3648  polinfo[j].polwithcheck = NULL;
3649  else
3650  polinfo[j].polwithcheck
3651  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3652  }
3653  PQclear(res);
3654  }
3655  destroyPQExpBuffer(query);
3656 }
3657 
3658 /*
3659  * dumpPolicy
3660  * dump the definition of the given policy
3661  */
3662 static void
3664 {
3665  DumpOptions *dopt = fout->dopt;
3666  TableInfo *tbinfo = polinfo->poltable;
3667  PQExpBuffer query;
3668  PQExpBuffer delqry;
3669  const char *cmd;
3670  char *tag;
3671 
3672  if (dopt->dataOnly)
3673  return;
3674 
3675  /*
3676  * If polname is NULL, then this record is just indicating that ROW LEVEL
3677  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3678  * ROW LEVEL SECURITY.
3679  */
3680  if (polinfo->polname == NULL)
3681  {
3682  query = createPQExpBuffer();
3683 
3684  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3685  fmtQualifiedDumpable(tbinfo));
3686 
3687  /*
3688  * We must emit the ROW SECURITY object's dependency on its table
3689  * explicitly, because it will not match anything in pg_depend (unlike
3690  * the case for other PolicyInfo objects).
3691  */
3692  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3693  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3694  ARCHIVE_OPTS(.tag = polinfo->dobj.name,
3695  .namespace = polinfo->dobj.namespace->dobj.name,
3696  .owner = tbinfo->rolname,
3697  .description = "ROW SECURITY",
3698  .section = SECTION_POST_DATA,
3699  .createStmt = query->data,
3700  .deps = &(tbinfo->dobj.dumpId),
3701  .nDeps = 1));
3702 
3703  destroyPQExpBuffer(query);
3704  return;
3705  }
3706 
3707  if (polinfo->polcmd == '*')
3708  cmd = "";
3709  else if (polinfo->polcmd == 'r')
3710  cmd = " FOR SELECT";
3711  else if (polinfo->polcmd == 'a')
3712  cmd = " FOR INSERT";
3713  else if (polinfo->polcmd == 'w')
3714  cmd = " FOR UPDATE";
3715  else if (polinfo->polcmd == 'd')
3716  cmd = " FOR DELETE";
3717  else
3718  {
3719  pg_log_error("unexpected policy command type: %c",
3720  polinfo->polcmd);
3721  exit_nicely(1);
3722  }
3723 
3724  query = createPQExpBuffer();
3725  delqry = createPQExpBuffer();
3726 
3727  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3728 
3729  appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
3730  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3731 
3732  if (polinfo->polroles != NULL)
3733  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3734 
3735  if (polinfo->polqual != NULL)
3736  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3737 
3738  if (polinfo->polwithcheck != NULL)
3739  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3740 
3741  appendPQExpBufferStr(query, ";\n");
3742 
3743  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3744  appendPQExpBuffer(delqry, " ON %s;\n", fmtQualifiedDumpable(tbinfo));
3745 
3746  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3747 
3748  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3749  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3750  ARCHIVE_OPTS(.tag = tag,
3751  .namespace = polinfo->dobj.namespace->dobj.name,
3752  .owner = tbinfo->rolname,
3753  .description = "POLICY",
3754  .section = SECTION_POST_DATA,
3755  .createStmt = query->data,
3756  .dropStmt = delqry->data));
3757 
3758  free(tag);
3759  destroyPQExpBuffer(query);
3760  destroyPQExpBuffer(delqry);
3761 }
3762 
3763 /*
3764  * getPublications
3765  * get information about publications
3766  */
3767 void
3769 {
3770  DumpOptions *dopt = fout->dopt;
3771  PQExpBuffer query;
3772  PGresult *res;
3773  PublicationInfo *pubinfo;
3774  int i_tableoid;
3775  int i_oid;
3776  int i_pubname;
3777  int i_rolname;
3778  int i_puballtables;
3779  int i_pubinsert;
3780  int i_pubupdate;
3781  int i_pubdelete;
3782  int i_pubtruncate;
3783  int i,
3784  ntups;
3785 
3786  if (dopt->no_publications || fout->remoteVersion < 100000)
3787  return;
3788 
3789  query = createPQExpBuffer();
3790 
3791  resetPQExpBuffer(query);
3792 
3793  /* Get the publications. */
3794  if (fout->remoteVersion >= 110000)
3795  appendPQExpBuffer(query,
3796  "SELECT p.tableoid, p.oid, p.pubname, "
3797  "(%s p.pubowner) AS rolname, "
3798  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate "
3799  "FROM pg_publication p",
3801  else
3802  appendPQExpBuffer(query,
3803  "SELECT p.tableoid, p.oid, p.pubname, "
3804  "(%s p.pubowner) AS rolname, "
3805  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, false AS pubtruncate "
3806  "FROM pg_publication p",
3808 
3809  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3810 
3811  ntups = PQntuples(res);
3812 
3813  i_tableoid = PQfnumber(res, "tableoid");
3814  i_oid = PQfnumber(res, "oid");
3815  i_pubname = PQfnumber(res, "pubname");
3816  i_rolname = PQfnumber(res, "rolname");
3817  i_puballtables = PQfnumber(res, "puballtables");
3818  i_pubinsert = PQfnumber(res, "pubinsert");
3819  i_pubupdate = PQfnumber(res, "pubupdate");
3820  i_pubdelete = PQfnumber(res, "pubdelete");
3821  i_pubtruncate = PQfnumber(res, "pubtruncate");
3822 
3823  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3824 
3825  for (i = 0; i < ntups; i++)
3826  {
3827  pubinfo[i].dobj.objType = DO_PUBLICATION;
3828  pubinfo[i].dobj.catId.tableoid =
3829  atooid(PQgetvalue(res, i, i_tableoid));
3830  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3831  AssignDumpId(&pubinfo[i].dobj);
3832  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3833  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3834  pubinfo[i].puballtables =
3835  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3836  pubinfo[i].pubinsert =
3837  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3838  pubinfo[i].pubupdate =
3839  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3840  pubinfo[i].pubdelete =
3841  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3842  pubinfo[i].pubtruncate =
3843  (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
3844 
3845  if (strlen(pubinfo[i].rolname) == 0)
3846  pg_log_warning("owner of publication \"%s\" appears to be invalid",
3847  pubinfo[i].dobj.name);
3848 
3849  /* Decide whether we want to dump it */
3850  selectDumpableObject(&(pubinfo[i].dobj), fout);
3851  }
3852  PQclear(res);
3853 
3854  destroyPQExpBuffer(query);
3855 }
3856 
3857 /*
3858  * dumpPublication
3859  * dump the definition of the given publication
3860  */
3861 static void
3863 {
3864  PQExpBuffer delq;
3865  PQExpBuffer query;
3866  char *qpubname;
3867  bool first = true;
3868 
3869  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3870  return;
3871 
3872  delq = createPQExpBuffer();
3873  query = createPQExpBuffer();
3874 
3875  qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
3876 
3877  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3878  qpubname);
3879 
3880  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3881  qpubname);
3882 
3883  if (pubinfo->puballtables)
3884  appendPQExpBufferStr(query, " FOR ALL TABLES");
3885 
3886  appendPQExpBufferStr(query, " WITH (publish = '");
3887  if (pubinfo->pubinsert)
3888  {
3889  appendPQExpBufferStr(query, "insert");
3890  first = false;
3891  }
3892 
3893  if (pubinfo->pubupdate)
3894  {
3895  if (!first)
3896  appendPQExpBufferStr(query, ", ");
3897 
3898  appendPQExpBufferStr(query, "update");
3899  first = false;
3900  }
3901 
3902  if (pubinfo->pubdelete)
3903  {
3904  if (!first)
3905  appendPQExpBufferStr(query, ", ");
3906 
3907  appendPQExpBufferStr(query, "delete");
3908  first = false;
3909  }
3910 
3911  if (pubinfo->pubtruncate)
3912  {
3913  if (!first)
3914  appendPQExpBufferStr(query, ", ");
3915 
3916  appendPQExpBufferStr(query, "truncate");
3917  first = false;
3918  }
3919 
3920  appendPQExpBufferStr(query, "');\n");
3921 
3922  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
3923  ARCHIVE_OPTS(.tag = pubinfo->dobj.name,
3924  .owner = pubinfo->rolname,
3925  .description = "PUBLICATION",
3926  .section = SECTION_POST_DATA,
3927  .createStmt = query->data,
3928  .dropStmt = delq->data));
3929 
3930  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3931  dumpComment(fout, "PUBLICATION", qpubname,
3932  NULL, pubinfo->rolname,
3933  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3934 
3935  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3936  dumpSecLabel(fout, "PUBLICATION", qpubname,
3937  NULL, pubinfo->rolname,
3938  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3939 
3940  destroyPQExpBuffer(delq);
3941  destroyPQExpBuffer(query);
3942  free(qpubname);
3943 }
3944 
3945 /*
3946  * getPublicationTables
3947  * get information about publication membership for dumpable tables.
3948  */
3949 void
3951 {
3952  PQExpBuffer query;
3953  PGresult *res;
3954  PublicationRelInfo *pubrinfo;
3955  DumpOptions *dopt = fout->dopt;
3956  int i_tableoid;
3957  int i_oid;
3958  int i_pubname;
3959  int i,
3960  j,
3961  ntups;
3962 
3963  if (dopt->no_publications || fout->remoteVersion < 100000)
3964  return;
3965 
3966  query = createPQExpBuffer();
3967 
3968  for (i = 0; i < numTables; i++)
3969  {
3970  TableInfo *tbinfo = &tblinfo[i];
3971 
3972  /* Only plain tables can be aded to publications. */
3973  if (tbinfo->relkind != RELKIND_RELATION)
3974  continue;
3975 
3976  /*
3977  * Ignore publication membership of tables whose definitions are not
3978  * to be dumped.
3979  */
3980  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3981  continue;
3982 
3983  pg_log_info("reading publication membership for table \"%s.%s\"",
3984  tbinfo->dobj.namespace->dobj.name,
3985  tbinfo->dobj.name);
3986 
3987  resetPQExpBuffer(query);
3988 
3989  /* Get the publication membership for the table. */
3990  appendPQExpBuffer(query,
3991  "SELECT pr.tableoid, pr.oid, p.pubname "
3992  "FROM pg_publication_rel pr, pg_publication p "
3993  "WHERE pr.prrelid = '%u'"
3994  " AND p.oid = pr.prpubid",
3995  tbinfo->dobj.catId.oid);
3996  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3997 
3998  ntups = PQntuples(res);
3999 
4000  if (ntups == 0)
4001  {
4002  /*
4003  * Table is not member of any publications. Clean up and return.
4004  */
4005  PQclear(res);
4006  continue;
4007  }
4008 
4009  i_tableoid = PQfnumber(res, "tableoid");
4010  i_oid = PQfnumber(res, "oid");
4011  i_pubname = PQfnumber(res, "pubname");
4012 
4013  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
4014 
4015  for (j = 0; j < ntups; j++)
4016  {
4017  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
4018  pubrinfo[j].dobj.catId.tableoid =
4019  atooid(PQgetvalue(res, j, i_tableoid));
4020  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
4021  AssignDumpId(&pubrinfo[j].dobj);
4022  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4023  pubrinfo[j].dobj.name = tbinfo->dobj.name;
4024  pubrinfo[j].pubname = pg_strdup(PQgetvalue(res, j, i_pubname));
4025  pubrinfo[j].pubtable = tbinfo;
4026 
4027  /* Decide whether we want to dump it */
4028  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
4029  }
4030  PQclear(res);
4031  }
4032  destroyPQExpBuffer(query);
4033 }
4034 
4035 /*
4036  * dumpPublicationTable
4037  * dump the definition of the given publication table mapping
4038  */
4039 static void
4041 {
4042  TableInfo *tbinfo = pubrinfo->pubtable;
4043  PQExpBuffer query;
4044  char *tag;
4045 
4046  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4047  return;
4048 
4049  tag = psprintf("%s %s", pubrinfo->pubname, tbinfo->dobj.name);
4050 
4051  query = createPQExpBuffer();
4052 
4053  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
4054  fmtId(pubrinfo->pubname));
4055  appendPQExpBuffer(query, " %s;\n",
4056  fmtQualifiedDumpable(tbinfo));
4057 
4058  /*
4059  * There is no point in creating drop query as the drop is done by table
4060  * drop.
4061  */
4062  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
4063  ARCHIVE_OPTS(.tag = tag,
4064  .namespace = tbinfo->dobj.namespace->dobj.name,
4065  .description = "PUBLICATION TABLE",
4066  .section = SECTION_POST_DATA,
4067  .createStmt = query->data));
4068 
4069  free(tag);
4070  destroyPQExpBuffer(query);
4071 }
4072 
4073 /*
4074  * Is the currently connected user a superuser?
4075  */
4076 static bool
4078 {
4079  ArchiveHandle *AH = (ArchiveHandle *) fout;
4080  const char *val;
4081 
4082  val = PQparameterStatus(AH->connection, "is_superuser");
4083 
4084  if (val && strcmp(val, "on") == 0)
4085  return true;
4086 
4087  return false;
4088 }
4089 
4090 /*
4091  * getSubscriptions
4092  * get information about subscriptions
4093  */
4094 void
4096 {
4097  DumpOptions *dopt = fout->dopt;
4098  PQExpBuffer query;
4099  PGresult *res;
4100  SubscriptionInfo *subinfo;
4101  int i_tableoid;
4102  int i_oid;
4103  int i_subname;
4104  int i_rolname;
4105  int i_subconninfo;
4106  int i_subslotname;
4107  int i_subsynccommit;
4108  int i_subpublications;
4109  int i,
4110  ntups;
4111 
4112  if (dopt->no_subscriptions || fout->remoteVersion < 100000)
4113  return;
4114 
4115  if (!is_superuser(fout))
4116  {
4117  int n;
4118 
4119  res = ExecuteSqlQuery(fout,
4120  "SELECT count(*) FROM pg_subscription "
4121  "WHERE subdbid = (SELECT oid FROM pg_database"
4122  " WHERE datname = current_database())",
4123  PGRES_TUPLES_OK);
4124  n = atoi(PQgetvalue(res, 0, 0));
4125  if (n > 0)
4126  pg_log_warning("subscriptions not dumped because current user is not a superuser");
4127  PQclear(res);
4128  return;
4129  }
4130 
4131  query = createPQExpBuffer();
4132 
4133  resetPQExpBuffer(query);
4134 
4135  /* Get the subscriptions in current database. */
4136  appendPQExpBuffer(query,
4137  "SELECT s.tableoid, s.oid, s.subname,"
4138  "(%s s.subowner) AS rolname, "
4139  " s.subconninfo, s.subslotname, s.subsynccommit, "
4140  " s.subpublications "
4141  "FROM pg_subscription s "
4142  "WHERE s.subdbid = (SELECT oid FROM pg_database"
4143  " WHERE datname = current_database())",
4145  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4146 
4147  ntups = PQntuples(res);
4148 
4149  i_tableoid = PQfnumber(res, "tableoid");
4150  i_oid = PQfnumber(res, "oid");
4151  i_subname = PQfnumber(res, "subname");
4152  i_rolname = PQfnumber(res, "rolname");
4153  i_subconninfo = PQfnumber(res, "subconninfo");
4154  i_subslotname = PQfnumber(res, "subslotname");
4155  i_subsynccommit = PQfnumber(res, "subsynccommit");
4156  i_subpublications = PQfnumber(res, "subpublications");
4157 
4158  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
4159 
4160  for (i = 0; i < ntups; i++)
4161  {
4162  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
4163  subinfo[i].dobj.catId.tableoid =
4164  atooid(PQgetvalue(res, i, i_tableoid));
4165  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4166  AssignDumpId(&subinfo[i].dobj);
4167  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
4168  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4169  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
4170  if (PQgetisnull(res, i, i_subslotname))
4171  subinfo[i].subslotname = NULL;
4172  else
4173  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
4174  subinfo[i].subsynccommit =
4175  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
4176  subinfo[i].subpublications =
4177  pg_strdup(PQgetvalue(res, i, i_subpublications));
4178 
4179  if (strlen(subinfo[i].rolname) == 0)
4180  pg_log_warning("owner of subscription \"%s\" appears to be invalid",
4181  subinfo[i].dobj.name);
4182 
4183  /* Decide whether we want to dump it */
4184  selectDumpableObject(&(subinfo[i].dobj), fout);
4185  }
4186  PQclear(res);
4187 
4188  destroyPQExpBuffer(query);
4189 }
4190 
4191 /*
4192  * dumpSubscription
4193  * dump the definition of the given subscription
4194  */
4195 static void
4197 {
4198  PQExpBuffer delq;
4199  PQExpBuffer query;
4200  PQExpBuffer publications;
4201  char *qsubname;
4202  char **pubnames = NULL;
4203  int npubnames = 0;
4204  int i;
4205 
4206  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4207  return;
4208 
4209  delq = createPQExpBuffer();
4210  query = createPQExpBuffer();
4211 
4212  qsubname = pg_strdup(fmtId(subinfo->dobj.name));
4213 
4214  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
4215  qsubname);
4216 
4217  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
4218  qsubname);
4219  appendStringLiteralAH(query, subinfo->subconninfo, fout);
4220 
4221  /* Build list of quoted publications and append them to query. */
4222  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
4223  {
4224  pg_log_warning("could not parse subpublications array");
4225  if (pubnames)
4226  free(pubnames);
4227  pubnames = NULL;
4228  npubnames = 0;
4229  }
4230 
4231  publications = createPQExpBuffer();
4232  for (i = 0; i < npubnames; i++)
4233  {
4234  if (i > 0)
4235  appendPQExpBufferStr(publications, ", ");
4236 
4237  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
4238  }
4239 
4240  appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
4241  if (subinfo->subslotname)
4242  appendStringLiteralAH(query, subinfo->subslotname, fout);
4243  else
4244  appendPQExpBufferStr(query, "NONE");
4245 
4246  if (strcmp(subinfo->subsynccommit, "off") != 0)
4247  appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
4248 
4249  appendPQExpBufferStr(query, ");\n");
4250 
4251  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
4252  ARCHIVE_OPTS(.tag = subinfo->dobj.name,
4253  .owner = subinfo->rolname,
4254  .description = "SUBSCRIPTION",
4255  .section = SECTION_POST_DATA,
4256  .createStmt = query->data,
4257  .dropStmt = delq->data));
4258 
4259  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4260  dumpComment(fout, "SUBSCRIPTION", qsubname,
4261  NULL, subinfo->rolname,
4262  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4263 
4264  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4265  dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
4266  NULL, subinfo->rolname,
4267  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4268 
4269  destroyPQExpBuffer(publications);
4270  if (pubnames)
4271  free(pubnames);
4272 
4273  destroyPQExpBuffer(delq);
4274  destroyPQExpBuffer(query);
4275  free(qsubname);
4276 }
4277 
4278 static void
4280  PQExpBuffer upgrade_buffer,
4281  Oid pg_type_oid,
4282  bool force_array_type)
4283 {
4284  PQExpBuffer upgrade_query = createPQExpBuffer();
4285  PGresult *res;
4286  Oid pg_type_array_oid;
4287 
4288  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
4289  appendPQExpBuffer(upgrade_buffer,
4290  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4291  pg_type_oid);
4292 
4293  /* we only support old >= 8.3 for binary upgrades */
4294  appendPQExpBuffer(upgrade_query,
4295  "SELECT typarray "
4296  "FROM pg_catalog.pg_type "
4297  "WHERE oid = '%u'::pg_catalog.oid;",
4298  pg_type_oid);
4299 
4300  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4301 
4302  pg_type_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
4303 
4304  PQclear(res);
4305 
4306  if (!OidIsValid(pg_type_array_oid) && force_array_type)
4307  {
4308  /*
4309  * If the old version didn't assign an array type, but the new version
4310  * does, we must select an unused type OID to assign. This currently
4311  * only happens for domains, when upgrading pre-v11 to v11 and up.
4312  *
4313  * Note: local state here is kind of ugly, but we must have some,
4314  * since we mustn't choose the same unused OID more than once.
4315  */
4316  static Oid next_possible_free_oid = FirstNormalObjectId;
4317  bool is_dup;
4318 
4319  do
4320  {
4321  ++next_possible_free_oid;
4322  printfPQExpBuffer(upgrade_query,
4323  "SELECT EXISTS(SELECT 1 "
4324  "FROM pg_catalog.pg_type "
4325  "WHERE oid = '%u'::pg_catalog.oid);",
4326  next_possible_free_oid);
4327  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4328  is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
4329  PQclear(res);
4330  } while (is_dup);
4331 
4332  pg_type_array_oid = next_possible_free_oid;
4333  }
4334 
4335  if (OidIsValid(pg_type_array_oid))
4336  {
4337  appendPQExpBufferStr(upgrade_buffer,
4338  "\n-- For binary upgrade, must preserve pg_type array oid\n");
4339  appendPQExpBuffer(upgrade_buffer,
4340  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4341  pg_type_array_oid);
4342  }
4343 
4344  destroyPQExpBuffer(upgrade_query);
4345 }
4346 
4347 static bool
4349  PQExpBuffer upgrade_buffer,
4350  Oid pg_rel_oid)
4351 {
4352  PQExpBuffer upgrade_query = createPQExpBuffer();
4353  PGresult *upgrade_res;
4354  Oid pg_type_oid;
4355  bool toast_set = false;
4356 
4357  /*
4358  * We only support old >= 8.3 for binary upgrades.
4359  *
4360  * We purposefully ignore toast OIDs for partitioned tables; the reason is
4361  * that versions 10 and 11 have them, but 12 does not, so emitting them
4362  * causes the upgrade to fail.
4363  */
4364  appendPQExpBuffer(upgrade_query,
4365  "SELECT c.reltype AS crel, t.reltype AS trel "
4366  "FROM pg_catalog.pg_class c "
4367  "LEFT JOIN pg_catalog.pg_class t ON "
4368  " (c.reltoastrelid = t.oid AND c.relkind <> '%c') "
4369  "WHERE c.oid = '%u'::pg_catalog.oid;",
4370  RELKIND_PARTITIONED_TABLE, pg_rel_oid);
4371 
4372  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4373 
4374  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
4375 
4376  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
4377  pg_type_oid, false);
4378 
4379  if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
4380  {
4381  /* Toast tables do not have pg_type array rows */
4382  Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
4383  PQfnumber(upgrade_res, "trel")));
4384 
4385  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
4386  appendPQExpBuffer(upgrade_buffer,
4387  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4388  pg_type_toast_oid);
4389 
4390  toast_set = true;
4391  }
4392 
4393  PQclear(upgrade_res);
4394  destroyPQExpBuffer(upgrade_query);
4395 
4396  return toast_set;
4397 }
4398 
4399 static void
4401  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
4402  bool is_index)
4403 {
4404  PQExpBuffer upgrade_query = createPQExpBuffer();
4405  PGresult *upgrade_res;
4406  Oid pg_class_reltoastrelid;
4407  Oid pg_index_indexrelid;
4408 
4409  appendPQExpBuffer(upgrade_query,
4410  "SELECT c.reltoastrelid, i.indexrelid "
4411  "FROM pg_catalog.pg_class c LEFT JOIN "
4412  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
4413  "WHERE c.oid = '%u'::pg_catalog.oid;",
4414  pg_class_oid);
4415 
4416  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4417 
4418  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastrelid")));
4419  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
4420 
4421  appendPQExpBufferStr(upgrade_buffer,
4422  "\n-- For binary upgrade, must preserve pg_class oids\n");
4423 
4424  if (!is_index)
4425  {
4426  appendPQExpBuffer(upgrade_buffer,
4427  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
4428  pg_class_oid);
4429  /* only tables have toast tables, not indexes */
4430  if (OidIsValid(pg_class_reltoastrelid))
4431  {
4432  /*
4433  * One complexity is that the table definition might not require
4434  * the creation of a TOAST table, and the TOAST table might have
4435  * been created long after table creation, when the table was
4436  * loaded with wide data. By setting the TOAST oid we force
4437  * creation of the TOAST heap and TOAST index by the backend so we
4438  * can cleanly copy the files during binary upgrade.
4439  */
4440 
4441  appendPQExpBuffer(upgrade_buffer,
4442  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
4443  pg_class_reltoastrelid);
4444 
4445  /* every toast table has an index */
4446  appendPQExpBuffer(upgrade_buffer,
4447  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4448  pg_index_indexrelid);
4449  }
4450  }
4451  else
4452  appendPQExpBuffer(upgrade_buffer,
4453  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4454  pg_class_oid);
4455 
4456  appendPQExpBufferChar(upgrade_buffer, '\n');
4457 
4458  PQclear(upgrade_res);
4459  destroyPQExpBuffer(upgrade_query);
4460 }
4461 
4462 /*
4463  * If the DumpableObject is a member of an extension, add a suitable
4464  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4465  *
4466  * For somewhat historical reasons, objname should already be quoted,
4467  * but not objnamespace (if any).
4468  */
4469 static void
4471  DumpableObject *dobj,
4472  const char *objtype,
4473  const char *objname,
4474  const char *objnamespace)
4475 {
4476  DumpableObject *extobj = NULL;
4477  int i;
4478 
4479  if (!dobj->ext_member)
4480  return;
4481 
4482  /*
4483  * Find the parent extension. We could avoid this search if we wanted to
4484  * add a link field to DumpableObject, but the space costs of that would
4485  * be considerable. We assume that member objects could only have a
4486  * direct dependency on their own extension, not any others.
4487  */
4488  for (i = 0; i < dobj->nDeps; i++)
4489  {
4490  extobj = findObjectByDumpId(dobj->dependencies[i]);
4491  if (extobj && extobj->objType == DO_EXTENSION)
4492  break;
4493  extobj = NULL;
4494  }
4495  if (extobj == NULL)
4496  fatal("could not find parent extension for %s %s",
4497  objtype, objname);
4498 
4499  appendPQExpBufferStr(upgrade_buffer,
4500  "\n-- For binary upgrade, handle extension membership the hard way\n");
4501  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
4502  fmtId(extobj->name),
4503  objtype);
4504  if (objnamespace && *objnamespace)
4505  appendPQExpBuffer(upgrade_buffer, "%s.", fmtId(objnamespace));
4506  appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
4507 }
4508 
4509 /*
4510  * getNamespaces:
4511  * read all namespaces in the system catalogs and return them in the
4512  * NamespaceInfo* structure
4513  *
4514  * numNamespaces is set to the number of namespaces read in
4515  */
4516 NamespaceInfo *
4518 {
4519  DumpOptions *dopt = fout->dopt;
4520  PGresult *res;
4521  int ntups;
4522  int i;
4523  PQExpBuffer query;
4524  NamespaceInfo *nsinfo;
4525  int i_tableoid;
4526  int i_oid;
4527  int i_nspname;
4528  int i_rolname;
4529  int i_nspacl;
4530  int i_rnspacl;
4531  int i_initnspacl;
4532  int i_initrnspacl;
4533 
4534  query = createPQExpBuffer();
4535 
4536  /*
4537  * we fetch all namespaces including system ones, so that every object we
4538  * read in can be linked to a containing namespace.
4539  */
4540  if (fout->remoteVersion >= 90600)
4541  {
4542  PQExpBuffer acl_subquery = createPQExpBuffer();
4543  PQExpBuffer racl_subquery = createPQExpBuffer();
4544  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4545  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4546 
4547  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4548  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4549  dopt->binary_upgrade);
4550 
4551  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4552  "(%s nspowner) AS rolname, "
4553  "%s as nspacl, "
4554  "%s as rnspacl, "
4555  "%s as initnspacl, "
4556  "%s as initrnspacl "
4557  "FROM pg_namespace n "
4558  "LEFT JOIN pg_init_privs pip "
4559  "ON (n.oid = pip.objoid "
4560  "AND pip.classoid = 'pg_namespace'::regclass "
4561  "AND pip.objsubid = 0",
4563  acl_subquery->data,
4564  racl_subquery->data,
4565  init_acl_subquery->data,
4566  init_racl_subquery->data);
4567 
4568  appendPQExpBufferStr(query, ") ");
4569 
4570  destroyPQExpBuffer(acl_subquery);
4571  destroyPQExpBuffer(racl_subquery);
4572  destroyPQExpBuffer(init_acl_subquery);
4573  destroyPQExpBuffer(init_racl_subquery);
4574  }
4575  else
4576  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4577  "(%s nspowner) AS rolname, "
4578  "nspacl, NULL as rnspacl, "
4579  "NULL AS initnspacl, NULL as initrnspacl "
4580  "FROM pg_namespace",
4582 
4583  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4584 
4585  ntups = PQntuples(res);
4586 
4587  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4588 
4589  i_tableoid = PQfnumber(res, "tableoid");
4590  i_oid = PQfnumber(res, "oid");
4591  i_nspname = PQfnumber(res, "nspname");
4592  i_rolname = PQfnumber(res, "rolname");
4593  i_nspacl = PQfnumber(res, "nspacl");
4594  i_rnspacl = PQfnumber(res, "rnspacl");
4595  i_initnspacl = PQfnumber(res, "initnspacl");
4596  i_initrnspacl = PQfnumber(res, "initrnspacl");
4597 
4598  for (i = 0; i < ntups; i++)
4599  {
4600  nsinfo[i].dobj.objType = DO_NAMESPACE;
4601  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4602  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4603  AssignDumpId(&nsinfo[i].dobj);
4604  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4605  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4606  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4607  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4608  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4609  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4610 
4611  /* Decide whether to dump this namespace */
4612  selectDumpableNamespace(&nsinfo[i], fout);
4613 
4614  /*
4615  * Do not try to dump ACL if the ACL is empty or the default.
4616  *
4617  * This is useful because, for some schemas/objects, the only
4618  * component we are going to try and dump is the ACL and if we can
4619  * remove that then 'dump' goes to zero/false and we don't consider
4620  * this object for dumping at all later on.
4621  */
4622  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4623  PQgetisnull(res, i, i_initnspacl) &&
4624  PQgetisnull(res, i, i_initrnspacl))
4625  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4626 
4627  if (strlen(nsinfo[i].rolname) == 0)
4628  pg_log_warning("owner of schema \"%s\" appears to be invalid",
4629  nsinfo[i].dobj.name);
4630  }
4631 
4632  PQclear(res);
4633  destroyPQExpBuffer(query);
4634 
4635  *numNamespaces = ntups;
4636 
4637  return nsinfo;
4638 }
4639 
4640 /*
4641  * findNamespace:
4642  * given a namespace OID, look up the info read by getNamespaces
4643  */
4644 static NamespaceInfo *
4646 {
4647  NamespaceInfo *nsinfo;
4648 
4649  nsinfo = findNamespaceByOid(nsoid);
4650  if (nsinfo == NULL)
4651  fatal("schema with OID %u does not exist", nsoid);
4652  return nsinfo;
4653 }
4654 
4655 /*
4656  * getExtensions:
4657  * read all extensions in the system catalogs and return them in the
4658  * ExtensionInfo* structure
4659  *
4660  * numExtensions is set to the number of extensions read in
4661  */
4662 ExtensionInfo *
4664 {
4665  DumpOptions *dopt = fout->dopt;
4666  PGresult *res;
4667  int ntups;
4668  int i;
4669  PQExpBuffer query;
4670  ExtensionInfo *extinfo;
4671  int i_tableoid;
4672  int i_oid;
4673  int i_extname;
4674  int i_nspname;
4675  int i_extrelocatable;
4676  int i_extversion;
4677  int i_extconfig;
4678  int i_extcondition;
4679 
4680  /*
4681  * Before 9.1, there are no extensions.
4682  */
4683  if (fout->remoteVersion < 90100)
4684  {
4685  *numExtensions = 0;
4686  return NULL;
4687  }
4688 
4689  query = createPQExpBuffer();
4690 
4691  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4692  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4693  "FROM pg_extension x "
4694  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4695 
4696  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4697 
4698  ntups = PQntuples(res);
4699 
4700  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4701 
4702  i_tableoid = PQfnumber(res, "tableoid");
4703  i_oid = PQfnumber(res, "oid");
4704  i_extname = PQfnumber(res, "extname");
4705  i_nspname = PQfnumber(res, "nspname");
4706  i_extrelocatable = PQfnumber(res, "extrelocatable");
4707  i_extversion = PQfnumber(res, "extversion");
4708  i_extconfig = PQfnumber(res, "extconfig");
4709  i_extcondition = PQfnumber(res, "extcondition");
4710 
4711  for (i = 0; i < ntups; i++)
4712  {
4713  extinfo[i].dobj.objType = DO_EXTENSION;
4714  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4715  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4716  AssignDumpId(&extinfo[i].dobj);
4717  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4718  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4719  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4720  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4721  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4722  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4723 
4724  /* Decide whether we want to dump it */
4725  selectDumpableExtension(&(extinfo[i]), dopt);
4726  }
4727 
4728  PQclear(res);
4729  destroyPQExpBuffer(query);
4730 
4731  *numExtensions = ntups;
4732 
4733  return extinfo;
4734 }
4735 
4736 /*
4737  * getTypes:
4738  * read all types in the system catalogs and return them in the
4739  * TypeInfo* structure
4740  *
4741  * numTypes is set to the number of types read in
4742  *
4743  * NB: this must run after getFuncs() because we assume we can do
4744  * findFuncByOid().
4745  */
4746 TypeInfo *
4748 {
4749  DumpOptions *dopt = fout->dopt;
4750  PGresult *res;
4751  int ntups;
4752  int i;
4753  PQExpBuffer query = createPQExpBuffer();
4754  TypeInfo *tyinfo;
4755  ShellTypeInfo *stinfo;
4756  int i_tableoid;
4757  int i_oid;
4758  int i_typname;
4759  int i_typnamespace;
4760  int i_typacl;
4761  int i_rtypacl;
4762  int i_inittypacl;
4763  int i_initrtypacl;
4764  int i_rolname;
4765  int i_typelem;
4766  int i_typrelid;
4767  int i_typrelkind;
4768  int i_typtype;
4769  int i_typisdefined;
4770  int i_isarray;
4771 
4772  /*
4773  * we include even the built-in types because those may be used as array
4774  * elements by user-defined types
4775  *
4776  * we filter out the built-in types when we dump out the types
4777  *
4778  * same approach for undefined (shell) types and array types
4779  *
4780  * Note: as of 8.3 we can reliably detect whether a type is an
4781  * auto-generated array type by checking the element type's typarray.
4782  * (Before that the test is capable of generating false positives.) We
4783  * still check for name beginning with '_', though, so as to avoid the
4784  * cost of the subselect probe for all standard types. This would have to
4785  * be revisited if the backend ever allows renaming of array types.
4786  */
4787 
4788  if (fout->remoteVersion >= 90600)
4789  {
4790  PQExpBuffer acl_subquery = createPQExpBuffer();
4791  PQExpBuffer racl_subquery = createPQExpBuffer();
4792  PQExpBuffer initacl_subquery = createPQExpBuffer();
4793  PQExpBuffer initracl_subquery = createPQExpBuffer();
4794 
4795  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
4796  initracl_subquery, "t.typacl", "t.typowner", "'T'",
4797  dopt->binary_upgrade);
4798 
4799  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
4800  "t.typnamespace, "
4801  "%s AS typacl, "
4802  "%s AS rtypacl, "
4803  "%s AS inittypacl, "
4804  "%s AS initrtypacl, "
4805  "(%s t.typowner) AS rolname, "
4806  "t.typelem, t.typrelid, "
4807  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
4808  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
4809  "t.typtype, t.typisdefined, "
4810  "t.typname[0] = '_' AND t.typelem != 0 AND "
4811  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
4812  "FROM pg_type t "
4813  "LEFT JOIN pg_init_privs pip ON "
4814  "(t.oid = pip.objoid "
4815  "AND pip.classoid = 'pg_type'::regclass "
4816  "AND pip.objsubid = 0) ",
4817  acl_subquery->data,
4818  racl_subquery->data,
4819  initacl_subquery->data,
4820  initracl_subquery->data,
4822 
4823  destroyPQExpBuffer(acl_subquery);
4824  destroyPQExpBuffer(racl_subquery);
4825  destroyPQExpBuffer(initacl_subquery);
4826  destroyPQExpBuffer(initracl_subquery);
4827  }
4828  else if (fout->remoteVersion >= 90200)
4829  {
4830  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4831  "typnamespace, typacl, NULL as rtypacl, "
4832  "NULL AS inittypacl, NULL AS initrtypacl, "
4833  "(%s typowner) AS rolname, "
4834  "typelem, typrelid, "
4835  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4836  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4837  "typtype, typisdefined, "
4838  "typname[0] = '_' AND typelem != 0 AND "
4839  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4840  "FROM pg_type",
4842  }
4843  else if (fout->remoteVersion >= 80300)
4844  {
4845  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4846  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4847  "NULL AS inittypacl, NULL AS initrtypacl, "
4848  "(%s typowner) AS rolname, "
4849  "typelem, typrelid, "
4850  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4851  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4852  "typtype, typisdefined, "
4853  "typname[0] = '_' AND typelem != 0 AND "
4854  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4855  "FROM pg_type",
4857  }
4858  else
4859  {
4860  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4861  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4862  "NULL AS inittypacl, NULL AS initrtypacl, "
4863  "(%s typowner) AS rolname, "
4864  "typelem, typrelid, "
4865  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4866  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4867  "typtype, typisdefined, "
4868  "typname[0] = '_' AND typelem != 0 AS isarray "
4869  "FROM pg_type",
4871  }
4872 
4873  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4874 
4875  ntups = PQntuples(res);
4876 
4877  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
4878 
4879  i_tableoid = PQfnumber(res, "tableoid");
4880  i_oid = PQfnumber(res, "oid");
4881  i_typname = PQfnumber(res, "typname");
4882  i_typnamespace = PQfnumber(res, "typnamespace");
4883  i_typacl = PQfnumber(res, "typacl");
4884  i_rtypacl = PQfnumber(res, "rtypacl");
4885  i_inittypacl = PQfnumber(res, "inittypacl");
4886  i_initrtypacl = PQfnumber(res, "initrtypacl");
4887  i_rolname = PQfnumber(res, "rolname");
4888  i_typelem = PQfnumber(res, "typelem");
4889  i_typrelid = PQfnumber(res, "typrelid");
4890  i_typrelkind = PQfnumber(res, "typrelkind");
4891  i_typtype = PQfnumber(res, "typtype");
4892  i_typisdefined = PQfnumber(res, "typisdefined");
4893  i_isarray = PQfnumber(res, "isarray");
4894 
4895  for (i = 0; i < ntups; i++)
4896  {
4897  tyinfo[i].dobj.objType = DO_TYPE;
4898  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4899  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4900  AssignDumpId(&tyinfo[i].dobj);
4901  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
4902  tyinfo[i].dobj.namespace =
4903  findNamespace(fout,
4904  atooid(PQgetvalue(res, i, i_typnamespace)));
4905  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4906  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
4907  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
4908  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
4909  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
4910  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
4911  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
4912  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
4913  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
4914  tyinfo[i].shellType = NULL;
4915 
4916  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
4917  tyinfo[i].isDefined = true;
4918  else
4919  tyinfo[i].isDefined = false;
4920 
4921  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
4922  tyinfo[i].isArray = true;
4923  else
4924  tyinfo[i].isArray = false;
4925 
4926  /* Decide whether we want to dump it */
4927  selectDumpableType(&tyinfo[i], fout);
4928 
4929  /* Do not try to dump ACL if no ACL exists. */
4930  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
4931  PQgetisnull(res, i, i_inittypacl) &&
4932  PQgetisnull(res, i, i_initrtypacl))
4933  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4934 
4935  /*
4936  * If it's a domain, fetch info about its constraints, if any
4937  */
4938  tyinfo[i].nDomChecks = 0;
4939  tyinfo[i].domChecks = NULL;
4940  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4941  tyinfo[i].typtype == TYPTYPE_DOMAIN)
4942  getDomainConstraints(fout, &(tyinfo[i]));
4943 
4944  /*
4945  * If it's a base type, make a DumpableObject representing a shell
4946  * definition of the type. We will need to dump that ahead of the I/O
4947  * functions for the type. Similarly, range types need a shell
4948  * definition in case they have a canonicalize function.
4949  *
4950  * Note: the shell type doesn't have a catId. You might think it
4951  * should copy the base type's catId, but then it might capture the
4952  * pg_depend entries for the type, which we don't want.
4953  */
4954  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4955  (tyinfo[i].typtype == TYPTYPE_BASE ||
4956  tyinfo[i].typtype == TYPTYPE_RANGE))
4957  {
4958  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
4959  stinfo->dobj.objType = DO_SHELL_TYPE;
4960  stinfo->dobj.catId = nilCatalogId;
4961  AssignDumpId(&stinfo->dobj);
4962  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
4963  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
4964  stinfo->baseType = &(tyinfo[i]);
4965  tyinfo[i].shellType = stinfo;
4966 
4967  /*
4968  * Initially mark the shell type as not to be dumped. We'll only
4969  * dump it if the I/O or canonicalize functions need to be dumped;
4970  * this is taken care of while sorting dependencies.
4971  */
4972  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
4973  }
4974 
4975  if (strlen(tyinfo[i].rolname) == 0)
4976  pg_log_warning("owner of data type \"%s\" appears to be invalid",
4977  tyinfo[i].dobj.name);
4978  }
4979 
4980  *numTypes = ntups;
4981 
4982  PQclear(res);
4983 
4984  destroyPQExpBuffer(query);
4985 
4986  return tyinfo;
4987 }
4988 
4989 /*
4990  * getOperators:
4991  * read all operators in the system catalogs and return them in the
4992  * OprInfo* structure
4993  *
4994  * numOprs is set to the number of operators read in
4995  */
4996 OprInfo *
4997 getOperators(Archive *fout, int *numOprs)
4998 {
4999  PGresult *res;
5000  int ntups;
5001  int i;
5002  PQExpBuffer query = createPQExpBuffer();
5003  OprInfo *oprinfo;
5004  int i_tableoid;
5005  int i_oid;
5006  int i_oprname;
5007  int i_oprnamespace;
5008  int i_rolname;
5009  int i_oprkind;
5010  int i_oprcode;
5011 
5012  /*
5013  * find all operators, including builtin operators; we filter out
5014  * system-defined operators at dump-out time.
5015  */
5016 
5017  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
5018  "oprnamespace, "
5019  "(%s oprowner) AS rolname, "
5020  "oprkind, "
5021  "oprcode::oid AS oprcode "
5022  "FROM pg_operator",
5024 
5025  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5026 
5027  ntups = PQntuples(res);
5028  *numOprs = ntups;
5029 
5030  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
5031 
5032  i_tableoid = PQfnumber(res, "tableoid");
5033  i_oid = PQfnumber(res, "oid");
5034  i_oprname = PQfnumber(res, "oprname");
5035  i_oprnamespace = PQfnumber(res, "oprnamespace");
5036  i_rolname = PQfnumber(res, "rolname");
5037  i_oprkind = PQfnumber(res, "oprkind");
5038  i_oprcode = PQfnumber(res, "oprcode");
5039 
5040  for (i = 0; i < ntups; i++)
5041  {
5042  oprinfo[i].dobj.objType = DO_OPERATOR;
5043  oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5044  oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5045  AssignDumpId(&oprinfo[i].dobj);
5046  oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
5047  oprinfo[i].dobj.namespace =
5048  findNamespace(fout,
5049  atooid(PQgetvalue(res, i, i_oprnamespace)));
5050  oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5051  oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
5052  oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
5053 
5054  /* Decide whether we want to dump it */
5055  selectDumpableObject(&(oprinfo[i].dobj), fout);
5056 
5057  /* Operators do not currently have ACLs. */
5058  oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5059 
5060  if (strlen(oprinfo[i].rolname) == 0)
5061  pg_log_warning("owner of operator \"%s\" appears to be invalid",
5062  oprinfo[i].dobj.name);
5063  }
5064 
5065  PQclear(res);
5066 
5067  destroyPQExpBuffer(query);
5068 
5069  return oprinfo;
5070 }
5071 
5072 /*
5073  * getCollations:
5074  * read all collations in the system catalogs and return them in the
5075  * CollInfo* structure
5076  *
5077  * numCollations is set to the number of collations read in
5078  */
5079 CollInfo *
5081 {
5082  PGresult *res;
5083  int ntups;
5084  int i;
5085  PQExpBuffer query;
5086  CollInfo *collinfo;
5087  int i_tableoid;
5088  int i_oid;
5089  int i_collname;
5090  int i_collnamespace;
5091  int i_rolname;
5092 
5093  /* Collations didn't exist pre-9.1 */
5094  if (fout->remoteVersion < 90100)
5095  {
5096  *numCollations = 0;
5097  return NULL;
5098  }
5099 
5100  query = createPQExpBuffer();
5101 
5102  /*
5103  * find all collations, including builtin collations; we filter out
5104  * system-defined collations at dump-out time.
5105  */
5106 
5107  appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
5108  "collnamespace, "
5109  "(%s collowner) AS rolname "
5110  "FROM pg_collation",
5112 
5113  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5114 
5115  ntups = PQntuples(res);
5116  *numCollations = ntups;
5117 
5118  collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
5119 
5120  i_tableoid = PQfnumber(res, "tableoid");
5121  i_oid = PQfnumber(res, "oid");
5122  i_collname = PQfnumber(res, "collname");
5123  i_collnamespace = PQfnumber(res, "collnamespace");
5124  i_rolname = PQfnumber(res, "rolname");
5125 
5126  for (i = 0; i < ntups; i++)
5127  {
5128  collinfo[i].dobj.objType = DO_COLLATION;
5129  collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5130  collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5131  AssignDumpId(&collinfo[i].dobj);
5132  collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
5133  collinfo[i].dobj.namespace =
5134  findNamespace(fout,
5135  atooid(PQgetvalue(res, i, i_collnamespace)));
5136  collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5137 
5138  /* Decide whether we want to dump it */
5139  selectDumpableObject(&(collinfo[i].dobj), fout);
5140 
5141  /* Collations do not currently have ACLs. */
5142  collinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5143  }
5144 
5145  PQclear(res);
5146 
5147  destroyPQExpBuffer(query);
5148 
5149  return collinfo;
5150 }
5151 
5152 /*
5153  * getConversions:
5154  * read all conversions in the system catalogs and return them in the
5155  * ConvInfo* structure
5156  *
5157  * numConversions is set to the number of conversions read in
5158  */
5159 ConvInfo *
5160 getConversions(Archive *fout, int *numConversions)
5161 {
5162  PGresult *res;
5163  int ntups;
5164  int i;
5165  PQExpBuffer query;
5166  ConvInfo *convinfo;
5167  int i_tableoid;
5168  int i_oid;
5169  int i_conname;
5170  int i_connamespace;
5171  int i_rolname;
5172 
5173  query = createPQExpBuffer();
5174 
5175  /*
5176  * find all conversions, including builtin conversions; we filter out
5177  * system-defined conversions at dump-out time.
5178  */
5179 
5180  appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
5181  "connamespace, "
5182  "(%s conowner) AS rolname "
5183  "FROM pg_conversion",
5185 
5186  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5187 
5188  ntups = PQntuples(res);
5189  *numConversions = ntups;
5190 
5191  convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
5192 
5193  i_tableoid = PQfnumber(res, "tableoid");
5194  i_oid = PQfnumber(res, "oid");
5195  i_conname = PQfnumber(res, "conname");
5196  i_connamespace = PQfnumber(res, "connamespace");
5197  i_rolname = PQfnumber(res, "rolname");
5198 
5199  for (i = 0; i < ntups; i++)
5200  {
5201  convinfo[i].dobj.objType = DO_CONVERSION;
5202  convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5203  convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5204  AssignDumpId(&convinfo[i].dobj);
5205  convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
5206  convinfo[i].dobj.namespace =
5207  findNamespace(fout,
5208  atooid(PQgetvalue(res, i, i_connamespace)));
5209  convinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5210 
5211  /* Decide whether we want to dump it */
5212  selectDumpableObject(&(convinfo[i].dobj), fout);
5213 
5214  /* Conversions do not currently have ACLs. */
5215  convinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5216  }
5217 
5218  PQclear(res);
5219 
5220  destroyPQExpBuffer(query);
5221 
5222  return convinfo;
5223 }
5224 
5225 /*
5226  * getAccessMethods:
5227  * read all user-defined access methods in the system catalogs and return
5228  * them in the AccessMethodInfo* structure
5229  *
5230  * numAccessMethods is set to the number of access methods read in
5231  */
5233 getAccessMethods(Archive *fout, int *numAccessMethods)
5234 {
5235  PGresult *res;
5236  int ntups;
5237  int i;
5238  PQExpBuffer query;
5239  AccessMethodInfo *aminfo;
5240  int i_tableoid;
5241  int i_oid;
5242  int i_amname;
5243  int i_amhandler;
5244  int i_amtype;
5245 
5246  /* Before 9.6, there are no user-defined access methods */
5247  if (fout->remoteVersion < 90600)
5248  {
5249  *numAccessMethods = 0;
5250  return NULL;
5251  }
5252 
5253  query = createPQExpBuffer();
5254 
5255  /* Select all access methods from pg_am table */
5256  appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, amtype, "
5257  "amhandler::pg_catalog.regproc AS amhandler "
5258  "FROM pg_am");
5259 
5260  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5261 
5262  ntups = PQntuples(res);
5263  *numAccessMethods = ntups;
5264 
5265  aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
5266 
5267  i_tableoid = PQfnumber(res, "tableoid");
5268  i_oid = PQfnumber(res, "oid");
5269  i_amname = PQfnumber(res, "amname");
5270  i_amhandler = PQfnumber(res, "amhandler");
5271  i_amtype = PQfnumber(res, "amtype");
5272 
5273  for (i = 0; i < ntups; i++)
5274  {
5275  aminfo[i].dobj.objType = DO_ACCESS_METHOD;
5276  aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5277  aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5278  AssignDumpId(&aminfo[i].dobj);
5279  aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
5280  aminfo[i].dobj.namespace = NULL;
5281  aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
5282  aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
5283 
5284  /* Decide whether we want to dump it */
5285  selectDumpableAccessMethod(&(aminfo[i]), fout);
5286 
5287  /* Access methods do not currently have ACLs. */
5288  aminfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5289  }
5290 
5291  PQclear(res);
5292 
5293  destroyPQExpBuffer(query);
5294 
5295  return aminfo;
5296 }
5297 
5298 
5299 /*
5300  * getOpclasses:
5301  * read all opclasses in the system catalogs and return them in the
5302  * OpclassInfo* structure
5303  *
5304  * numOpclasses is set to the number of opclasses read in
5305  */
5306 OpclassInfo *
5307 getOpclasses(Archive *fout, int *numOpclasses)
5308 {
5309  PGresult *res;
5310  int ntups;
5311  int i;
5312