PostgreSQL Source Code  git master
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #ifdef HAVE_TERMIOS_H
37 #include <termios.h>
38 #endif
39 
40 #include "getopt_long.h"
41 
42 #include "access/attnum.h"
43 #include "access/sysattr.h"
44 #include "access/transam.h"
45 #include "catalog/pg_aggregate_d.h"
46 #include "catalog/pg_am_d.h"
47 #include "catalog/pg_attribute_d.h"
48 #include "catalog/pg_cast_d.h"
49 #include "catalog/pg_class_d.h"
50 #include "catalog/pg_default_acl_d.h"
51 #include "catalog/pg_largeobject_d.h"
52 #include "catalog/pg_largeobject_metadata_d.h"
53 #include "catalog/pg_proc_d.h"
54 #include "catalog/pg_trigger_d.h"
55 #include "catalog/pg_type_d.h"
56 #include "libpq/libpq-fs.h"
57 
58 #include "dumputils.h"
59 #include "parallel.h"
60 #include "pg_backup_db.h"
61 #include "pg_backup_utils.h"
62 #include "pg_dump.h"
63 #include "fe_utils/connect.h"
64 #include "fe_utils/string_utils.h"
65 
66 
67 typedef struct
68 {
69  const char *descr; /* comment for an object */
70  Oid classoid; /* object class (catalog OID) */
71  Oid objoid; /* object OID */
72  int objsubid; /* subobject (table column #) */
73 } CommentItem;
74 
75 typedef struct
76 {
77  const char *provider; /* label provider of this security label */
78  const char *label; /* security label for an object */
79  Oid classoid; /* object class (catalog OID) */
80  Oid objoid; /* object OID */
81  int objsubid; /* subobject (table column #) */
82 } SecLabelItem;
83 
84 typedef enum OidOptions
85 {
87  zeroAsAny = 2,
90 } OidOptions;
91 
92 /* global decls */
93 bool g_verbose; /* User wants verbose narration of our
94  * activities. */
95 static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
96 
97 /* subquery used to convert user ID (eg, datdba) to user name */
98 static const char *username_subquery;
99 
100 /*
101  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
102  * FirstNormalObjectId - 1.
103  */
104 static Oid g_last_builtin_oid; /* value of the last builtin oid */
105 
106 /* The specified names/patterns should to match at least one entity */
107 static int strict_names = 0;
108 
109 /*
110  * Object inclusion/exclusion lists
111  *
112  * The string lists record the patterns given by command-line switches,
113  * which we then convert to lists of OIDs of matching objects.
114  */
116 static SimpleOidList schema_include_oids = {NULL, NULL};
118 static SimpleOidList schema_exclude_oids = {NULL, NULL};
119 
121 static SimpleOidList table_include_oids = {NULL, NULL};
123 static SimpleOidList table_exclude_oids = {NULL, NULL};
125 static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
126 
127 
128 char g_opaque_type[10]; /* name for the opaque type */
129 
130 /* placeholders for the delimiters for comments */
132 char g_comment_end[10];
133 
134 static const CatalogId nilCatalogId = {0, 0};
135 
136 /*
137  * Macro for producing quoted, schema-qualified name of a dumpable object.
138  * Note implicit dependence on "fout"; we should get rid of that argument.
139  */
140 #define fmtQualifiedDumpable(obj) \
141  fmtQualifiedId(fout->remoteVersion, \
142  (obj)->dobj.namespace->dobj.name, \
143  (obj)->dobj.name)
144 
145 static void help(const char *progname);
146 static void setup_connection(Archive *AH,
147  const char *dumpencoding, const char *dumpsnapshot,
148  char *use_role);
149 static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
150 static void expand_schema_name_patterns(Archive *fout,
151  SimpleStringList *patterns,
152  SimpleOidList *oids,
153  bool strict_names);
154 static void expand_table_name_patterns(Archive *fout,
155  SimpleStringList *patterns,
156  SimpleOidList *oids,
157  bool strict_names);
158 static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
159 static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
160 static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
161 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
162 static void dumpComment(Archive *fout, const char *type, const char *name,
163  const char *namespace, const char *owner,
164  CatalogId catalogId, int subid, DumpId dumpId);
165 static int findComments(Archive *fout, Oid classoid, Oid objoid,
166  CommentItem **items);
167 static int collectComments(Archive *fout, CommentItem **items);
168 static void dumpSecLabel(Archive *fout, const char *type, const char *name,
169  const char *namespace, const char *owner,
170  CatalogId catalogId, int subid, DumpId dumpId);
171 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
172  SecLabelItem **items);
173 static int collectSecLabels(Archive *fout, SecLabelItem **items);
174 static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
175 static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
176 static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
177 static void dumpType(Archive *fout, TypeInfo *tyinfo);
178 static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
179 static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
180 static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
181 static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
182 static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
183 static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
184 static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
185 static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
186 static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
187 static void dumpFunc(Archive *fout, FuncInfo *finfo);
188 static void dumpCast(Archive *fout, CastInfo *cast);
189 static void dumpTransform(Archive *fout, TransformInfo *transform);
190 static void dumpOpr(Archive *fout, OprInfo *oprinfo);
191 static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
192 static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
193 static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
194 static void dumpCollation(Archive *fout, CollInfo *collinfo);
195 static void dumpConversion(Archive *fout, ConvInfo *convinfo);
196 static void dumpRule(Archive *fout, RuleInfo *rinfo);
197 static void dumpAgg(Archive *fout, AggInfo *agginfo);
198 static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
199 static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
200 static void dumpTable(Archive *fout, TableInfo *tbinfo);
201 static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
202 static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
203 static void dumpSequence(Archive *fout, TableInfo *tbinfo);
204 static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
205 static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
206 static void dumpIndexAttach(Archive *fout, IndexAttachInfo *attachinfo);
207 static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
208 static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
209 static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
210 static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
211 static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
212 static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
213 static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
214 static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
215 static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
216 static void dumpUserMappings(Archive *fout,
217  const char *servername, const char *namespace,
218  const char *owner, CatalogId catalogId, DumpId dumpId);
219 static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
220 
221 static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
222  const char *type, const char *name, const char *subname,
223  const char *nspname, const char *owner,
224  const char *acls, const char *racls,
225  const char *initacls, const char *initracls);
226 
227 static void getDependencies(Archive *fout);
228 static void BuildArchiveDependencies(Archive *fout);
230  DumpId **dependencies, int *nDeps, int *allocDeps);
231 
233 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
234  DumpableObject *boundaryObjs);
235 
236 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
237 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind);
238 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids);
239 static void buildMatViewRefreshDependencies(Archive *fout);
240 static void getTableDataFKConstraints(void);
241 static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
242  bool is_agg);
243 static char *format_function_arguments_old(Archive *fout,
244  FuncInfo *finfo, int nallargs,
245  char **allargtypes,
246  char **argmodes,
247  char **argnames);
248 static char *format_function_signature(Archive *fout,
249  FuncInfo *finfo, bool honor_quotes);
250 static char *convertRegProcReference(Archive *fout,
251  const char *proc);
252 static char *getFormattedOperatorName(Archive *fout, const char *oproid);
253 static char *convertTSFunction(Archive *fout, Oid funcOid);
254 static Oid findLastBuiltinOid_V71(Archive *fout);
255 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
256 static void getBlobs(Archive *fout);
257 static void dumpBlob(Archive *fout, BlobInfo *binfo);
258 static int dumpBlobs(Archive *fout, void *arg);
259 static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
260 static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
261 static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
262 static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
263 static void dumpDatabase(Archive *AH);
264 static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
265  const char *dbname, Oid dboid);
266 static void dumpEncoding(Archive *AH);
267 static void dumpStdStrings(Archive *AH);
268 static void dumpSearchPath(Archive *AH);
270  PQExpBuffer upgrade_buffer,
271  Oid pg_type_oid,
272  bool force_array_type);
274  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
275 static void binary_upgrade_set_pg_class_oids(Archive *fout,
276  PQExpBuffer upgrade_buffer,
277  Oid pg_class_oid, bool is_index);
278 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
279  DumpableObject *dobj,
280  const char *objtype,
281  const char *objname,
282  const char *objnamespace);
283 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
284 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
285 static bool nonemptyReloptions(const char *reloptions);
286 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
287  const char *prefix, Archive *fout);
288 static char *get_synchronized_snapshot(Archive *fout);
289 static void setupDumpWorker(Archive *AHX);
290 static TableInfo *getRootTableInfo(TableInfo *tbinfo);
291 
292 
293 int
294 main(int argc, char **argv)
295 {
296  int c;
297  const char *filename = NULL;
298  const char *format = "p";
299  TableInfo *tblinfo;
300  int numTables;
301  DumpableObject **dobjs;
302  int numObjs;
303  DumpableObject *boundaryObjs;
304  int i;
305  int optindex;
306  RestoreOptions *ropt;
307  Archive *fout; /* the script file */
308  const char *dumpencoding = NULL;
309  const char *dumpsnapshot = NULL;
310  char *use_role = NULL;
311  int numWorkers = 1;
312  trivalue prompt_password = TRI_DEFAULT;
313  int compressLevel = -1;
314  int plainText = 0;
315  ArchiveFormat archiveFormat = archUnknown;
316  ArchiveMode archiveMode;
317 
318  static DumpOptions dopt;
319 
320  static struct option long_options[] = {
321  {"data-only", no_argument, NULL, 'a'},
322  {"blobs", no_argument, NULL, 'b'},
323  {"no-blobs", no_argument, NULL, 'B'},
324  {"clean", no_argument, NULL, 'c'},
325  {"create", no_argument, NULL, 'C'},
326  {"dbname", required_argument, NULL, 'd'},
327  {"file", required_argument, NULL, 'f'},
328  {"format", required_argument, NULL, 'F'},
329  {"host", required_argument, NULL, 'h'},
330  {"jobs", 1, NULL, 'j'},
331  {"no-reconnect", no_argument, NULL, 'R'},
332  {"oids", no_argument, NULL, 'o'},
333  {"no-owner", no_argument, NULL, 'O'},
334  {"port", required_argument, NULL, 'p'},
335  {"schema", required_argument, NULL, 'n'},
336  {"exclude-schema", required_argument, NULL, 'N'},
337  {"schema-only", no_argument, NULL, 's'},
338  {"superuser", required_argument, NULL, 'S'},
339  {"table", required_argument, NULL, 't'},
340  {"exclude-table", required_argument, NULL, 'T'},
341  {"no-password", no_argument, NULL, 'w'},
342  {"password", no_argument, NULL, 'W'},
343  {"username", required_argument, NULL, 'U'},
344  {"verbose", no_argument, NULL, 'v'},
345  {"no-privileges", no_argument, NULL, 'x'},
346  {"no-acl", no_argument, NULL, 'x'},
347  {"compress", required_argument, NULL, 'Z'},
348  {"encoding", required_argument, NULL, 'E'},
349  {"help", no_argument, NULL, '?'},
350  {"version", no_argument, NULL, 'V'},
351 
352  /*
353  * the following options don't have an equivalent short option letter
354  */
355  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
356  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
357  {"column-inserts", no_argument, &dopt.column_inserts, 1},
358  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
359  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
360  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
361  {"exclude-table-data", required_argument, NULL, 4},
362  {"if-exists", no_argument, &dopt.if_exists, 1},
363  {"inserts", no_argument, &dopt.dump_inserts, 1},
364  {"lock-wait-timeout", required_argument, NULL, 2},
365  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
366  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
367  {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
368  {"role", required_argument, NULL, 3},
369  {"section", required_argument, NULL, 5},
370  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
371  {"snapshot", required_argument, NULL, 6},
372  {"strict-names", no_argument, &strict_names, 1},
373  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
374  {"no-comments", no_argument, &dopt.no_comments, 1},
375  {"no-publications", no_argument, &dopt.no_publications, 1},
376  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
377  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
378  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
379  {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
380  {"no-sync", no_argument, NULL, 7},
381  {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
382 
383  {NULL, 0, NULL, 0}
384  };
385 
386  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
387 
388  /*
389  * Initialize what we need for parallel execution, especially for thread
390  * support on Windows.
391  */
393 
394  g_verbose = false;
395 
396  strcpy(g_comment_start, "-- ");
397  g_comment_end[0] = '\0';
398  strcpy(g_opaque_type, "opaque");
399 
400  progname = get_progname(argv[0]);
401 
402  if (argc > 1)
403  {
404  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
405  {
406  help(progname);
407  exit_nicely(0);
408  }
409  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
410  {
411  puts("pg_dump (PostgreSQL) " PG_VERSION);
412  exit_nicely(0);
413  }
414  }
415 
416  InitDumpOptions(&dopt);
417 
418  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:oOp:RsS:t:T:U:vwWxZ:",
419  long_options, &optindex)) != -1)
420  {
421  switch (c)
422  {
423  case 'a': /* Dump data only */
424  dopt.dataOnly = true;
425  break;
426 
427  case 'b': /* Dump blobs */
428  dopt.outputBlobs = true;
429  break;
430 
431  case 'B': /* Don't dump blobs */
432  dopt.dontOutputBlobs = true;
433  break;
434 
435  case 'c': /* clean (i.e., drop) schema prior to create */
436  dopt.outputClean = 1;
437  break;
438 
439  case 'C': /* Create DB */
440  dopt.outputCreateDB = 1;
441  break;
442 
443  case 'd': /* database name */
444  dopt.dbname = pg_strdup(optarg);
445  break;
446 
447  case 'E': /* Dump encoding */
448  dumpencoding = pg_strdup(optarg);
449  break;
450 
451  case 'f':
452  filename = pg_strdup(optarg);
453  break;
454 
455  case 'F':
456  format = pg_strdup(optarg);
457  break;
458 
459  case 'h': /* server host */
460  dopt.pghost = pg_strdup(optarg);
461  break;
462 
463  case 'j': /* number of dump jobs */
464  numWorkers = atoi(optarg);
465  break;
466 
467  case 'n': /* include schema(s) */
468  simple_string_list_append(&schema_include_patterns, optarg);
469  dopt.include_everything = false;
470  break;
471 
472  case 'N': /* exclude schema(s) */
473  simple_string_list_append(&schema_exclude_patterns, optarg);
474  break;
475 
476  case 'o': /* Dump oids */
477  dopt.oids = true;
478  break;
479 
480  case 'O': /* Don't reconnect to match owner */
481  dopt.outputNoOwner = 1;
482  break;
483 
484  case 'p': /* server port */
485  dopt.pgport = pg_strdup(optarg);
486  break;
487 
488  case 'R':
489  /* no-op, still accepted for backwards compatibility */
490  break;
491 
492  case 's': /* dump schema only */
493  dopt.schemaOnly = true;
494  break;
495 
496  case 'S': /* Username for superuser in plain text output */
498  break;
499 
500  case 't': /* include table(s) */
501  simple_string_list_append(&table_include_patterns, optarg);
502  dopt.include_everything = false;
503  break;
504 
505  case 'T': /* exclude table(s) */
506  simple_string_list_append(&table_exclude_patterns, optarg);
507  break;
508 
509  case 'U':
510  dopt.username = pg_strdup(optarg);
511  break;
512 
513  case 'v': /* verbose */
514  g_verbose = true;
515  break;
516 
517  case 'w':
518  prompt_password = TRI_NO;
519  break;
520 
521  case 'W':
522  prompt_password = TRI_YES;
523  break;
524 
525  case 'x': /* skip ACL dump */
526  dopt.aclsSkip = true;
527  break;
528 
529  case 'Z': /* Compression Level */
530  compressLevel = atoi(optarg);
531  if (compressLevel < 0 || compressLevel > 9)
532  {
533  write_msg(NULL, "compression level must be in range 0..9\n");
534  exit_nicely(1);
535  }
536  break;
537 
538  case 0:
539  /* This covers the long options. */
540  break;
541 
542  case 2: /* lock-wait-timeout */
544  break;
545 
546  case 3: /* SET ROLE */
547  use_role = pg_strdup(optarg);
548  break;
549 
550  case 4: /* exclude table(s) data */
551  simple_string_list_append(&tabledata_exclude_patterns, optarg);
552  break;
553 
554  case 5: /* section */
556  break;
557 
558  case 6: /* snapshot */
559  dumpsnapshot = pg_strdup(optarg);
560  break;
561 
562  case 7: /* no-sync */
563  dosync = false;
564  break;
565 
566  default:
567  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
568  exit_nicely(1);
569  }
570  }
571 
572  /*
573  * Non-option argument specifies database name as long as it wasn't
574  * already specified with -d / --dbname
575  */
576  if (optind < argc && dopt.dbname == NULL)
577  dopt.dbname = argv[optind++];
578 
579  /* Complain if any arguments remain */
580  if (optind < argc)
581  {
582  fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
583  progname, argv[optind]);
584  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
585  progname);
586  exit_nicely(1);
587  }
588 
589  /* --column-inserts implies --inserts */
590  if (dopt.column_inserts)
591  dopt.dump_inserts = 1;
592 
593  /*
594  * Binary upgrade mode implies dumping sequence data even in schema-only
595  * mode. This is not exposed as a separate option, but kept separate
596  * internally for clarity.
597  */
598  if (dopt.binary_upgrade)
599  dopt.sequence_data = 1;
600 
601  if (dopt.dataOnly && dopt.schemaOnly)
602  {
603  write_msg(NULL, "options -s/--schema-only and -a/--data-only cannot be used together\n");
604  exit_nicely(1);
605  }
606 
607  if (dopt.dataOnly && dopt.outputClean)
608  {
609  write_msg(NULL, "options -c/--clean and -a/--data-only cannot be used together\n");
610  exit_nicely(1);
611  }
612 
613  if (dopt.dump_inserts && dopt.oids)
614  {
615  write_msg(NULL, "options --inserts/--column-inserts and -o/--oids cannot be used together\n");
616  write_msg(NULL, "(The INSERT command cannot set OIDs.)\n");
617  exit_nicely(1);
618  }
619 
620  if (dopt.if_exists && !dopt.outputClean)
621  exit_horribly(NULL, "option --if-exists requires option -c/--clean\n");
622 
623  if (dopt.do_nothing && !(dopt.dump_inserts || dopt.column_inserts))
624  exit_horribly(NULL, "option --on-conflict-do-nothing requires option --inserts or --column-inserts\n");
625 
626  /* Identify archive format to emit */
627  archiveFormat = parseArchiveFormat(format, &archiveMode);
628 
629  /* archiveFormat specific setup */
630  if (archiveFormat == archNull)
631  plainText = 1;
632 
633  /* Custom and directory formats are compressed by default, others not */
634  if (compressLevel == -1)
635  {
636 #ifdef HAVE_LIBZ
637  if (archiveFormat == archCustom || archiveFormat == archDirectory)
638  compressLevel = Z_DEFAULT_COMPRESSION;
639  else
640 #endif
641  compressLevel = 0;
642  }
643 
644 #ifndef HAVE_LIBZ
645  if (compressLevel != 0)
646  write_msg(NULL, "WARNING: requested compression not available in this "
647  "installation -- archive will be uncompressed\n");
648  compressLevel = 0;
649 #endif
650 
651  /*
652  * If emitting an archive format, we always want to emit a DATABASE item,
653  * in case --create is specified at pg_restore time.
654  */
655  if (!plainText)
656  dopt.outputCreateDB = 1;
657 
658  /*
659  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
660  * parallel jobs because that's the maximum limit for the
661  * WaitForMultipleObjects() call.
662  */
663  if (numWorkers <= 0
664 #ifdef WIN32
665  || numWorkers > MAXIMUM_WAIT_OBJECTS
666 #endif
667  )
668  exit_horribly(NULL, "invalid number of parallel jobs\n");
669 
670  /* Parallel backup only in the directory archive format so far */
671  if (archiveFormat != archDirectory && numWorkers > 1)
672  exit_horribly(NULL, "parallel backup only supported by the directory format\n");
673 
674  /* Open the output file */
675  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
676  archiveMode, setupDumpWorker);
677 
678  /* Make dump options accessible right away */
679  SetArchiveOptions(fout, &dopt, NULL);
680 
681  /* Register the cleanup hook */
682  on_exit_close_archive(fout);
683 
684  /* Let the archiver know how noisy to be */
685  fout->verbose = g_verbose;
686 
687  /*
688  * We allow the server to be back to 8.0, and up to any minor release of
689  * our own major version. (See also version check in pg_dumpall.c.)
690  */
691  fout->minRemoteVersion = 80000;
692  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
693 
694  fout->numWorkers = numWorkers;
695 
696  /*
697  * Open the database using the Archiver, so it knows about it. Errors mean
698  * death.
699  */
700  ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
701  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
702 
703  /*
704  * Disable security label support if server version < v9.1.x (prevents
705  * access to nonexistent pg_seclabel catalog)
706  */
707  if (fout->remoteVersion < 90100)
708  dopt.no_security_labels = 1;
709 
710  /*
711  * On hot standbys, never try to dump unlogged table data, since it will
712  * just throw an error.
713  */
714  if (fout->isStandby)
715  dopt.no_unlogged_table_data = true;
716 
717  /* Select the appropriate subquery to convert user IDs to names */
718  if (fout->remoteVersion >= 80100)
719  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
720  else
721  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
722 
723  /* check the version for the synchronized snapshots feature */
724  if (numWorkers > 1 && fout->remoteVersion < 90200
725  && !dopt.no_synchronized_snapshots)
726  exit_horribly(NULL,
727  "Synchronized snapshots are not supported by this server version.\n"
728  "Run with --no-synchronized-snapshots instead if you do not need\n"
729  "synchronized snapshots.\n");
730 
731  /* check the version when a snapshot is explicitly specified by user */
732  if (dumpsnapshot && fout->remoteVersion < 90200)
733  exit_horribly(NULL,
734  "Exported snapshots are not supported by this server version.\n");
735 
736  /*
737  * Find the last built-in OID, if needed (prior to 8.1)
738  *
739  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
740  */
741  if (fout->remoteVersion < 80100)
743  else
745 
746  if (g_verbose)
747  write_msg(NULL, "last built-in OID is %u\n", g_last_builtin_oid);
748 
749  /* Expand schema selection patterns into OID lists */
750  if (schema_include_patterns.head != NULL)
751  {
752  expand_schema_name_patterns(fout, &schema_include_patterns,
753  &schema_include_oids,
754  strict_names);
755  if (schema_include_oids.head == NULL)
756  exit_horribly(NULL, "no matching schemas were found\n");
757  }
758  expand_schema_name_patterns(fout, &schema_exclude_patterns,
759  &schema_exclude_oids,
760  false);
761  /* non-matching exclusion patterns aren't an error */
762 
763  /* Expand table selection patterns into OID lists */
764  if (table_include_patterns.head != NULL)
765  {
766  expand_table_name_patterns(fout, &table_include_patterns,
767  &table_include_oids,
768  strict_names);
769  if (table_include_oids.head == NULL)
770  exit_horribly(NULL, "no matching tables were found\n");
771  }
772  expand_table_name_patterns(fout, &table_exclude_patterns,
773  &table_exclude_oids,
774  false);
775 
776  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
777  &tabledata_exclude_oids,
778  false);
779 
780  /* non-matching exclusion patterns aren't an error */
781 
782  /*
783  * Dumping blobs is the default for dumps where an inclusion switch is not
784  * used (an "include everything" dump). -B can be used to exclude blobs
785  * from those dumps. -b can be used to include blobs even when an
786  * inclusion switch is used.
787  *
788  * -s means "schema only" and blobs are data, not schema, so we never
789  * include blobs when -s is used.
790  */
791  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
792  dopt.outputBlobs = true;
793 
794  /*
795  * Now scan the database and create DumpableObject structs for all the
796  * objects we intend to dump.
797  */
798  tblinfo = getSchemaData(fout, &numTables);
799 
800  if (fout->remoteVersion < 80400)
801  guessConstraintInheritance(tblinfo, numTables);
802 
803  if (!dopt.schemaOnly)
804  {
805  getTableData(&dopt, tblinfo, numTables, dopt.oids, 0);
807  if (dopt.dataOnly)
809  }
810 
811  if (dopt.schemaOnly && dopt.sequence_data)
812  getTableData(&dopt, tblinfo, numTables, dopt.oids, RELKIND_SEQUENCE);
813 
814  /*
815  * In binary-upgrade mode, we do not have to worry about the actual blob
816  * data or the associated metadata that resides in the pg_largeobject and
817  * pg_largeobject_metadata tables, respectively.
818  *
819  * However, we do need to collect blob information as there may be
820  * comments or other information on blobs that we do need to dump out.
821  */
822  if (dopt.outputBlobs || dopt.binary_upgrade)
823  getBlobs(fout);
824 
825  /*
826  * Collect dependency data to assist in ordering the objects.
827  */
828  getDependencies(fout);
829 
830  /* Lastly, create dummy objects to represent the section boundaries */
831  boundaryObjs = createBoundaryObjects();
832 
833  /* Get pointers to all the known DumpableObjects */
834  getDumpableObjects(&dobjs, &numObjs);
835 
836  /*
837  * Add dummy dependencies to enforce the dump section ordering.
838  */
839  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
840 
841  /*
842  * Sort the objects into a safe dump order (no forward references).
843  *
844  * We rely on dependency information to help us determine a safe order, so
845  * the initial sort is mostly for cosmetic purposes: we sort by name to
846  * ensure that logically identical schemas will dump identically.
847  */
848  sortDumpableObjectsByTypeName(dobjs, numObjs);
849 
850  /* If we do a parallel dump, we want the largest tables to go first */
851  if (archiveFormat == archDirectory && numWorkers > 1)
852  sortDataAndIndexObjectsBySize(dobjs, numObjs);
853 
854  sortDumpableObjects(dobjs, numObjs,
855  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
856 
857  /*
858  * Create archive TOC entries for all the objects to be dumped, in a safe
859  * order.
860  */
861 
862  /* First the special ENCODING, STDSTRINGS, and SEARCHPATH entries. */
863  dumpEncoding(fout);
864  dumpStdStrings(fout);
865  dumpSearchPath(fout);
866 
867  /* The database items are always next, unless we don't want them at all */
868  if (dopt.outputCreateDB)
869  dumpDatabase(fout);
870 
871  /* Now the rearrangeable objects. */
872  for (i = 0; i < numObjs; i++)
873  dumpDumpableObject(fout, dobjs[i]);
874 
875  /*
876  * Set up options info to ensure we dump what we want.
877  */
878  ropt = NewRestoreOptions();
879  ropt->filename = filename;
880 
881  /* if you change this list, see dumpOptionsFromRestoreOptions */
882  ropt->dropSchema = dopt.outputClean;
883  ropt->dataOnly = dopt.dataOnly;
884  ropt->schemaOnly = dopt.schemaOnly;
885  ropt->if_exists = dopt.if_exists;
886  ropt->column_inserts = dopt.column_inserts;
887  ropt->dumpSections = dopt.dumpSections;
888  ropt->aclsSkip = dopt.aclsSkip;
889  ropt->superuser = dopt.outputSuperuser;
890  ropt->createDB = dopt.outputCreateDB;
891  ropt->noOwner = dopt.outputNoOwner;
892  ropt->noTablespace = dopt.outputNoTablespaces;
893  ropt->disable_triggers = dopt.disable_triggers;
894  ropt->use_setsessauth = dopt.use_setsessauth;
896  ropt->dump_inserts = dopt.dump_inserts;
897  ropt->no_comments = dopt.no_comments;
898  ropt->no_publications = dopt.no_publications;
900  ropt->no_subscriptions = dopt.no_subscriptions;
901  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
904  ropt->sequence_data = dopt.sequence_data;
905  ropt->binary_upgrade = dopt.binary_upgrade;
906 
907  if (compressLevel == -1)
908  ropt->compression = 0;
909  else
910  ropt->compression = compressLevel;
911 
912  ropt->suppressDumpWarnings = true; /* We've already shown them */
913 
914  SetArchiveOptions(fout, &dopt, ropt);
915 
916  /* Mark which entries should be output */
918 
919  /*
920  * The archive's TOC entries are now marked as to which ones will actually
921  * be output, so we can set up their dependency lists properly. This isn't
922  * necessary for plain-text output, though.
923  */
924  if (!plainText)
926 
927  /*
928  * And finally we can do the actual output.
929  *
930  * Note: for non-plain-text output formats, the output file is written
931  * inside CloseArchive(). This is, um, bizarre; but not worth changing
932  * right now.
933  */
934  if (plainText)
935  RestoreArchive(fout);
936 
937  CloseArchive(fout);
938 
939  exit_nicely(0);
940 }
941 
942 
943 static void
944 help(const char *progname)
945 {
946  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
947  printf(_("Usage:\n"));
948  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
949 
950  printf(_("\nGeneral options:\n"));
951  printf(_(" -f, --file=FILENAME output file or directory name\n"));
952  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
953  " plain text (default))\n"));
954  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
955  printf(_(" -v, --verbose verbose mode\n"));
956  printf(_(" -V, --version output version information, then exit\n"));
957  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
958  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
959  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
960  printf(_(" -?, --help show this help, then exit\n"));
961 
962  printf(_("\nOptions controlling the output content:\n"));
963  printf(_(" -a, --data-only dump only the data, not the schema\n"));
964  printf(_(" -b, --blobs include large objects in dump\n"));
965  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
966  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
967  printf(_(" -C, --create include commands to create database in dump\n"));
968  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
969  printf(_(" -n, --schema=SCHEMA dump the named schema(s) only\n"));
970  printf(_(" -N, --exclude-schema=SCHEMA do NOT dump the named schema(s)\n"));
971  printf(_(" -o, --oids include OIDs in dump\n"));
972  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
973  " plain-text format\n"));
974  printf(_(" -s, --schema-only dump only the schema, no data\n"));
975  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
976  printf(_(" -t, --table=TABLE dump the named table(s) only\n"));
977  printf(_(" -T, --exclude-table=TABLE do NOT dump the named table(s)\n"));
978  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
979  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
980  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
981  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
982  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
983  printf(_(" --enable-row-security enable row security (dump only content user has\n"
984  " access to)\n"));
985  printf(_(" --exclude-table-data=TABLE do NOT dump data for the named table(s)\n"));
986  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
987  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
988  printf(_(" --load-via-partition-root load partitions via the root table\n"));
989  printf(_(" --no-comments do not dump comments\n"));
990  printf(_(" --no-publications do not dump publications\n"));
991  printf(_(" --no-security-labels do not dump security label assignments\n"));
992  printf(_(" --no-subscriptions do not dump subscriptions\n"));
993  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
994  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
995  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
996  printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
997  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
998  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
999  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
1000  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
1001  printf(_(" --strict-names require table and/or schema include patterns to\n"
1002  " match at least one entity each\n"));
1003  printf(_(" --use-set-session-authorization\n"
1004  " use SET SESSION AUTHORIZATION commands instead of\n"
1005  " ALTER OWNER commands to set ownership\n"));
1006 
1007  printf(_("\nConnection options:\n"));
1008  printf(_(" -d, --dbname=DBNAME database to dump\n"));
1009  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
1010  printf(_(" -p, --port=PORT database server port number\n"));
1011  printf(_(" -U, --username=NAME connect as specified database user\n"));
1012  printf(_(" -w, --no-password never prompt for password\n"));
1013  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
1014  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
1015 
1016  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
1017  "variable value is used.\n\n"));
1018  printf(_("Report bugs to <pgsql-bugs@postgresql.org>.\n"));
1019 }
1020 
1021 static void
1022 setup_connection(Archive *AH, const char *dumpencoding,
1023  const char *dumpsnapshot, char *use_role)
1024 {
1025  DumpOptions *dopt = AH->dopt;
1026  PGconn *conn = GetConnection(AH);
1027  const char *std_strings;
1028 
1030 
1031  /*
1032  * Set the client encoding if requested.
1033  */
1034  if (dumpencoding)
1035  {
1036  if (PQsetClientEncoding(conn, dumpencoding) < 0)
1037  exit_horribly(NULL, "invalid client encoding \"%s\" specified\n",
1038  dumpencoding);
1039  }
1040 
1041  /*
1042  * Get the active encoding and the standard_conforming_strings setting, so
1043  * we know how to escape strings.
1044  */
1045  AH->encoding = PQclientEncoding(conn);
1046 
1047  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1048  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1049 
1050  /*
1051  * Set the role if requested. In a parallel dump worker, we'll be passed
1052  * use_role == NULL, but AH->use_role is already set (if user specified it
1053  * originally) and we should use that.
1054  */
1055  if (!use_role && AH->use_role)
1056  use_role = AH->use_role;
1057 
1058  /* Set the role if requested */
1059  if (use_role && AH->remoteVersion >= 80100)
1060  {
1061  PQExpBuffer query = createPQExpBuffer();
1062 
1063  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1064  ExecuteSqlStatement(AH, query->data);
1065  destroyPQExpBuffer(query);
1066 
1067  /* save it for possible later use by parallel workers */
1068  if (!AH->use_role)
1069  AH->use_role = pg_strdup(use_role);
1070  }
1071 
1072  /* Set the datestyle to ISO to ensure the dump's portability */
1073  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1074 
1075  /* Likewise, avoid using sql_standard intervalstyle */
1076  if (AH->remoteVersion >= 80400)
1077  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1078 
1079  /*
1080  * Set extra_float_digits so that we can dump float data exactly (given
1081  * correctly implemented float I/O code, anyway)
1082  */
1083  if (AH->remoteVersion >= 90000)
1084  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1085  else
1086  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1087 
1088  /*
1089  * If synchronized scanning is supported, disable it, to prevent
1090  * unpredictable changes in row ordering across a dump and reload.
1091  */
1092  if (AH->remoteVersion >= 80300)
1093  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1094 
1095  /*
1096  * Disable timeouts if supported.
1097  */
1098  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1099  if (AH->remoteVersion >= 90300)
1100  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1101  if (AH->remoteVersion >= 90600)
1102  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1103 
1104  /*
1105  * Quote all identifiers, if requested.
1106  */
1107  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1108  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1109 
1110  /*
1111  * Adjust row-security mode, if supported.
1112  */
1113  if (AH->remoteVersion >= 90500)
1114  {
1115  if (dopt->enable_row_security)
1116  ExecuteSqlStatement(AH, "SET row_security = on");
1117  else
1118  ExecuteSqlStatement(AH, "SET row_security = off");
1119  }
1120 
1121  /*
1122  * Start transaction-snapshot mode transaction to dump consistent data.
1123  */
1124  ExecuteSqlStatement(AH, "BEGIN");
1125  if (AH->remoteVersion >= 90100)
1126  {
1127  /*
1128  * To support the combination of serializable_deferrable with the jobs
1129  * option we use REPEATABLE READ for the worker connections that are
1130  * passed a snapshot. As long as the snapshot is acquired in a
1131  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1132  * REPEATABLE READ transaction provides the appropriate integrity
1133  * guarantees. This is a kluge, but safe for back-patching.
1134  */
1135  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1137  "SET TRANSACTION ISOLATION LEVEL "
1138  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1139  else
1141  "SET TRANSACTION ISOLATION LEVEL "
1142  "REPEATABLE READ, READ ONLY");
1143  }
1144  else
1145  {
1147  "SET TRANSACTION ISOLATION LEVEL "
1148  "SERIALIZABLE, READ ONLY");
1149  }
1150 
1151  /*
1152  * If user specified a snapshot to use, select that. In a parallel dump
1153  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1154  * is already set (if the server can handle it) and we should use that.
1155  */
1156  if (dumpsnapshot)
1157  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1158 
1159  if (AH->sync_snapshot_id)
1160  {
1161  PQExpBuffer query = createPQExpBuffer();
1162 
1163  appendPQExpBuffer(query, "SET TRANSACTION SNAPSHOT ");
1164  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1165  ExecuteSqlStatement(AH, query->data);
1166  destroyPQExpBuffer(query);
1167  }
1168  else if (AH->numWorkers > 1 &&
1169  AH->remoteVersion >= 90200 &&
1171  {
1172  if (AH->isStandby && AH->remoteVersion < 100000)
1173  exit_horribly(NULL,
1174  "Synchronized snapshots on standby servers are not supported by this server version.\n"
1175  "Run with --no-synchronized-snapshots instead if you do not need\n"
1176  "synchronized snapshots.\n");
1177 
1178 
1180  }
1181 }
1182 
1183 /* Set up connection for a parallel worker process */
1184 static void
1186 {
1187  /*
1188  * We want to re-select all the same values the master connection is
1189  * using. We'll have inherited directly-usable values in
1190  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1191  * inherited encoding value back to a string to pass to setup_connection.
1192  */
1193  setup_connection(AH,
1195  NULL,
1196  NULL);
1197 }
1198 
1199 static char *
1201 {
1202  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1203  char *result;
1204  PGresult *res;
1205 
1206  res = ExecuteSqlQueryForSingleRow(fout, query);
1207  result = pg_strdup(PQgetvalue(res, 0, 0));
1208  PQclear(res);
1209 
1210  return result;
1211 }
1212 
1213 static ArchiveFormat
1215 {
1216  ArchiveFormat archiveFormat;
1217 
1218  *mode = archModeWrite;
1219 
1220  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1221  {
1222  /* This is used by pg_dumpall, and is not documented */
1223  archiveFormat = archNull;
1224  *mode = archModeAppend;
1225  }
1226  else if (pg_strcasecmp(format, "c") == 0)
1227  archiveFormat = archCustom;
1228  else if (pg_strcasecmp(format, "custom") == 0)
1229  archiveFormat = archCustom;
1230  else if (pg_strcasecmp(format, "d") == 0)
1231  archiveFormat = archDirectory;
1232  else if (pg_strcasecmp(format, "directory") == 0)
1233  archiveFormat = archDirectory;
1234  else if (pg_strcasecmp(format, "p") == 0)
1235  archiveFormat = archNull;
1236  else if (pg_strcasecmp(format, "plain") == 0)
1237  archiveFormat = archNull;
1238  else if (pg_strcasecmp(format, "t") == 0)
1239  archiveFormat = archTar;
1240  else if (pg_strcasecmp(format, "tar") == 0)
1241  archiveFormat = archTar;
1242  else
1243  exit_horribly(NULL, "invalid output format \"%s\" specified\n", format);
1244  return archiveFormat;
1245 }
1246 
1247 /*
1248  * Find the OIDs of all schemas matching the given list of patterns,
1249  * and append them to the given OID list.
1250  */
1251 static void
1253  SimpleStringList *patterns,
1254  SimpleOidList *oids,
1255  bool strict_names)
1256 {
1257  PQExpBuffer query;
1258  PGresult *res;
1259  SimpleStringListCell *cell;
1260  int i;
1261 
1262  if (patterns->head == NULL)
1263  return; /* nothing to do */
1264 
1265  query = createPQExpBuffer();
1266 
1267  /*
1268  * The loop below runs multiple SELECTs might sometimes result in
1269  * duplicate entries in the OID list, but we don't care.
1270  */
1271 
1272  for (cell = patterns->head; cell; cell = cell->next)
1273  {
1274  appendPQExpBuffer(query,
1275  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1276  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1277  false, NULL, "n.nspname", NULL, NULL);
1278 
1279  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1280  if (strict_names && PQntuples(res) == 0)
1281  exit_horribly(NULL, "no matching schemas were found for pattern \"%s\"\n", cell->val);
1282 
1283  for (i = 0; i < PQntuples(res); i++)
1284  {
1285  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1286  }
1287 
1288  PQclear(res);
1289  resetPQExpBuffer(query);
1290  }
1291 
1292  destroyPQExpBuffer(query);
1293 }
1294 
1295 /*
1296  * Find the OIDs of all tables matching the given list of patterns,
1297  * and append them to the given OID list.
1298  */
1299 static void
1301  SimpleStringList *patterns, SimpleOidList *oids,
1302  bool strict_names)
1303 {
1304  PQExpBuffer query;
1305  PGresult *res;
1306  SimpleStringListCell *cell;
1307  int i;
1308 
1309  if (patterns->head == NULL)
1310  return; /* nothing to do */
1311 
1312  query = createPQExpBuffer();
1313 
1314  /*
1315  * this might sometimes result in duplicate entries in the OID list, but
1316  * we don't care.
1317  */
1318 
1319  for (cell = patterns->head; cell; cell = cell->next)
1320  {
1321  /*
1322  * Query must remain ABSOLUTELY devoid of unqualified names. This
1323  * would be unnecessary given a pg_table_is_visible() variant taking a
1324  * search_path argument.
1325  */
1326  appendPQExpBuffer(query,
1327  "SELECT c.oid"
1328  "\nFROM pg_catalog.pg_class c"
1329  "\n LEFT JOIN pg_catalog.pg_namespace n"
1330  "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1331  "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1332  "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1333  RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
1334  RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
1335  RELKIND_PARTITIONED_TABLE);
1336  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1337  false, "n.nspname", "c.relname", NULL,
1338  "pg_catalog.pg_table_is_visible(c.oid)");
1339 
1340  ExecuteSqlStatement(fout, "RESET search_path");
1341  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1344  if (strict_names && PQntuples(res) == 0)
1345  exit_horribly(NULL, "no matching tables were found for pattern \"%s\"\n", cell->val);
1346 
1347  for (i = 0; i < PQntuples(res); i++)
1348  {
1349  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1350  }
1351 
1352  PQclear(res);
1353  resetPQExpBuffer(query);
1354  }
1355 
1356  destroyPQExpBuffer(query);
1357 }
1358 
1359 /*
1360  * checkExtensionMembership
1361  * Determine whether object is an extension member, and if so,
1362  * record an appropriate dependency and set the object's dump flag.
1363  *
1364  * It's important to call this for each object that could be an extension
1365  * member. Generally, we integrate this with determining the object's
1366  * to-be-dumped-ness, since extension membership overrides other rules for that.
1367  *
1368  * Returns true if object is an extension member, else false.
1369  */
1370 static bool
1372 {
1373  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1374 
1375  if (ext == NULL)
1376  return false;
1377 
1378  dobj->ext_member = true;
1379 
1380  /* Record dependency so that getDependencies needn't deal with that */
1381  addObjectDependency(dobj, ext->dobj.dumpId);
1382 
1383  /*
1384  * In 9.6 and above, mark the member object to have any non-initial ACL,
1385  * policies, and security labels dumped.
1386  *
1387  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1388  * extract the information about the object. We don't provide support for
1389  * initial policies and security labels and it seems unlikely for those to
1390  * ever exist, but we may have to revisit this later.
1391  *
1392  * Prior to 9.6, we do not include any extension member components.
1393  *
1394  * In binary upgrades, we still dump all components of the members
1395  * individually, since the idea is to exactly reproduce the database
1396  * contents rather than replace the extension contents with something
1397  * different.
1398  */
1399  if (fout->dopt->binary_upgrade)
1400  dobj->dump = ext->dobj.dump;
1401  else
1402  {
1403  if (fout->remoteVersion < 90600)
1404  dobj->dump = DUMP_COMPONENT_NONE;
1405  else
1406  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1409  }
1410 
1411  return true;
1412 }
1413 
1414 /*
1415  * selectDumpableNamespace: policy-setting subroutine
1416  * Mark a namespace as to be dumped or not
1417  */
1418 static void
1420 {
1421  /*
1422  * If specific tables are being dumped, do not dump any complete
1423  * namespaces. If specific namespaces are being dumped, dump just those
1424  * namespaces. Otherwise, dump all non-system namespaces.
1425  */
1426  if (table_include_oids.head != NULL)
1427  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1428  else if (schema_include_oids.head != NULL)
1429  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1430  simple_oid_list_member(&schema_include_oids,
1431  nsinfo->dobj.catId.oid) ?
1433  else if (fout->remoteVersion >= 90600 &&
1434  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1435  {
1436  /*
1437  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1438  * they are interesting (and not the original ACLs which were set at
1439  * initdb time, see pg_init_privs).
1440  */
1441  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1442  }
1443  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1444  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1445  {
1446  /* Other system schemas don't get dumped */
1447  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1448  }
1449  else if (strcmp(nsinfo->dobj.name, "public") == 0)
1450  {
1451  /*
1452  * The public schema is a strange beast that sits in a sort of
1453  * no-mans-land between being a system object and a user object. We
1454  * don't want to dump creation or comment commands for it, because
1455  * that complicates matters for non-superuser use of pg_dump. But we
1456  * should dump any ACL changes that have occurred for it, and of
1457  * course we should dump contained objects.
1458  */
1459  nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1461  }
1462  else
1463  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1464 
1465  /*
1466  * In any case, a namespace can be excluded by an exclusion switch
1467  */
1468  if (nsinfo->dobj.dump_contains &&
1469  simple_oid_list_member(&schema_exclude_oids,
1470  nsinfo->dobj.catId.oid))
1471  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1472 
1473  /*
1474  * If the schema belongs to an extension, allow extension membership to
1475  * override the dump decision for the schema itself. However, this does
1476  * not change dump_contains, so this won't change what we do with objects
1477  * within the schema. (If they belong to the extension, they'll get
1478  * suppressed by it, otherwise not.)
1479  */
1480  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1481 }
1482 
1483 /*
1484  * selectDumpableTable: policy-setting subroutine
1485  * Mark a table as to be dumped or not
1486  */
1487 static void
1489 {
1490  if (checkExtensionMembership(&tbinfo->dobj, fout))
1491  return; /* extension membership overrides all else */
1492 
1493  /*
1494  * If specific tables are being dumped, dump just those tables; else, dump
1495  * according to the parent namespace's dump flag.
1496  */
1497  if (table_include_oids.head != NULL)
1498  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1499  tbinfo->dobj.catId.oid) ?
1501  else
1502  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1503 
1504  /*
1505  * In any case, a table can be excluded by an exclusion switch
1506  */
1507  if (tbinfo->dobj.dump &&
1508  simple_oid_list_member(&table_exclude_oids,
1509  tbinfo->dobj.catId.oid))
1510  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1511 }
1512 
1513 /*
1514  * selectDumpableType: policy-setting subroutine
1515  * Mark a type as to be dumped or not
1516  *
1517  * If it's a table's rowtype or an autogenerated array type, we also apply a
1518  * special type code to facilitate sorting into the desired order. (We don't
1519  * want to consider those to be ordinary types because that would bring tables
1520  * up into the datatype part of the dump order.) We still set the object's
1521  * dump flag; that's not going to cause the dummy type to be dumped, but we
1522  * need it so that casts involving such types will be dumped correctly -- see
1523  * dumpCast. This means the flag should be set the same as for the underlying
1524  * object (the table or base type).
1525  */
1526 static void
1528 {
1529  /* skip complex types, except for standalone composite types */
1530  if (OidIsValid(tyinfo->typrelid) &&
1531  tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
1532  {
1533  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1534 
1535  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1536  if (tytable != NULL)
1537  tyinfo->dobj.dump = tytable->dobj.dump;
1538  else
1539  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1540  return;
1541  }
1542 
1543  /* skip auto-generated array types */
1544  if (tyinfo->isArray)
1545  {
1546  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1547 
1548  /*
1549  * Fall through to set the dump flag; we assume that the subsequent
1550  * rules will do the same thing as they would for the array's base
1551  * type. (We cannot reliably look up the base type here, since
1552  * getTypes may not have processed it yet.)
1553  */
1554  }
1555 
1556  if (checkExtensionMembership(&tyinfo->dobj, fout))
1557  return; /* extension membership overrides all else */
1558 
1559  /* Dump based on if the contents of the namespace are being dumped */
1560  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1561 }
1562 
1563 /*
1564  * selectDumpableDefaultACL: policy-setting subroutine
1565  * Mark a default ACL as to be dumped or not
1566  *
1567  * For per-schema default ACLs, dump if the schema is to be dumped.
1568  * Otherwise dump if we are dumping "everything". Note that dataOnly
1569  * and aclsSkip are checked separately.
1570  */
1571 static void
1573 {
1574  /* Default ACLs can't be extension members */
1575 
1576  if (dinfo->dobj.namespace)
1577  /* default ACLs are considered part of the namespace */
1578  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1579  else
1580  dinfo->dobj.dump = dopt->include_everything ?
1582 }
1583 
1584 /*
1585  * selectDumpableCast: policy-setting subroutine
1586  * Mark a cast as to be dumped or not
1587  *
1588  * Casts do not belong to any particular namespace (since they haven't got
1589  * names), nor do they have identifiable owners. To distinguish user-defined
1590  * casts from built-in ones, we must resort to checking whether the cast's
1591  * OID is in the range reserved for initdb.
1592  */
1593 static void
1595 {
1596  if (checkExtensionMembership(&cast->dobj, fout))
1597  return; /* extension membership overrides all else */
1598 
1599  /*
1600  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1601  * support ACLs currently.
1602  */
1603  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1604  cast->dobj.dump = DUMP_COMPONENT_NONE;
1605  else
1606  cast->dobj.dump = fout->dopt->include_everything ?
1608 }
1609 
1610 /*
1611  * selectDumpableProcLang: policy-setting subroutine
1612  * Mark a procedural language as to be dumped or not
1613  *
1614  * Procedural languages do not belong to any particular namespace. To
1615  * identify built-in languages, we must resort to checking whether the
1616  * language's OID is in the range reserved for initdb.
1617  */
1618 static void
1620 {
1621  if (checkExtensionMembership(&plang->dobj, fout))
1622  return; /* extension membership overrides all else */
1623 
1624  /*
1625  * Only include procedural languages when we are dumping everything.
1626  *
1627  * For from-initdb procedural languages, only include ACLs, as we do for
1628  * the pg_catalog namespace. We need this because procedural languages do
1629  * not live in any namespace.
1630  */
1631  if (!fout->dopt->include_everything)
1632  plang->dobj.dump = DUMP_COMPONENT_NONE;
1633  else
1634  {
1635  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1636  plang->dobj.dump = fout->remoteVersion < 90600 ?
1638  else
1639  plang->dobj.dump = DUMP_COMPONENT_ALL;
1640  }
1641 }
1642 
1643 /*
1644  * selectDumpableAccessMethod: policy-setting subroutine
1645  * Mark an access method as to be dumped or not
1646  *
1647  * Access methods do not belong to any particular namespace. To identify
1648  * built-in access methods, we must resort to checking whether the
1649  * method's OID is in the range reserved for initdb.
1650  */
1651 static void
1653 {
1654  if (checkExtensionMembership(&method->dobj, fout))
1655  return; /* extension membership overrides all else */
1656 
1657  /*
1658  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1659  * they do not support ACLs currently.
1660  */
1661  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1662  method->dobj.dump = DUMP_COMPONENT_NONE;
1663  else
1664  method->dobj.dump = fout->dopt->include_everything ?
1666 }
1667 
1668 /*
1669  * selectDumpableExtension: policy-setting subroutine
1670  * Mark an extension as to be dumped or not
1671  *
1672  * Built-in extensions should be skipped except for checking ACLs, since we
1673  * assume those will already be installed in the target database. We identify
1674  * such extensions by their having OIDs in the range reserved for initdb.
1675  * We dump all user-added extensions by default, or none of them if
1676  * include_everything is false (i.e., a --schema or --table switch was given).
1677  */
1678 static void
1680 {
1681  /*
1682  * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
1683  * change permissions on their member objects, if they wish to, and have
1684  * those changes preserved.
1685  */
1686  if (extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1687  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1688  else
1689  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1692 }
1693 
1694 /*
1695  * selectDumpablePublicationTable: policy-setting subroutine
1696  * Mark a publication table as to be dumped or not
1697  *
1698  * Publication tables have schemas, but those are ignored in decision making,
1699  * because publications are only dumped when we are dumping everything.
1700  */
1701 static void
1703 {
1704  if (checkExtensionMembership(dobj, fout))
1705  return; /* extension membership overrides all else */
1706 
1707  dobj->dump = fout->dopt->include_everything ?
1709 }
1710 
1711 /*
1712  * selectDumpableObject: policy-setting subroutine
1713  * Mark a generic dumpable object as to be dumped or not
1714  *
1715  * Use this only for object types without a special-case routine above.
1716  */
1717 static void
1719 {
1720  if (checkExtensionMembership(dobj, fout))
1721  return; /* extension membership overrides all else */
1722 
1723  /*
1724  * Default policy is to dump if parent namespace is dumpable, or for
1725  * non-namespace-associated items, dump if we're dumping "everything".
1726  */
1727  if (dobj->namespace)
1728  dobj->dump = dobj->namespace->dobj.dump_contains;
1729  else
1730  dobj->dump = fout->dopt->include_everything ?
1732 }
1733 
1734 /*
1735  * Dump a table's contents for loading using the COPY command
1736  * - this routine is called by the Archiver when it wants the table
1737  * to be dumped.
1738  */
1739 
1740 static int
1741 dumpTableData_copy(Archive *fout, void *dcontext)
1742 {
1743  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1744  TableInfo *tbinfo = tdinfo->tdtable;
1745  const char *classname = tbinfo->dobj.name;
1746  const bool hasoids = tbinfo->hasoids;
1747  const bool oids = tdinfo->oids;
1749 
1750  /*
1751  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1752  * which uses it already.
1753  */
1754  PQExpBuffer clistBuf = createPQExpBuffer();
1755  PGconn *conn = GetConnection(fout);
1756  PGresult *res;
1757  int ret;
1758  char *copybuf;
1759  const char *column_list;
1760 
1761  if (g_verbose)
1762  write_msg(NULL, "dumping contents of table \"%s.%s\"\n",
1763  tbinfo->dobj.namespace->dobj.name, classname);
1764 
1765  /*
1766  * Specify the column list explicitly so that we have no possibility of
1767  * retrieving data in the wrong column order. (The default column
1768  * ordering of COPY will not be what we want in certain corner cases
1769  * involving ADD COLUMN and inheritance.)
1770  */
1771  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1772 
1773  if (oids && hasoids)
1774  {
1775  appendPQExpBuffer(q, "COPY %s %s WITH OIDS TO stdout;",
1776  fmtQualifiedDumpable(tbinfo),
1777  column_list);
1778  }
1779  else if (tdinfo->filtercond)
1780  {
1781  /* Note: this syntax is only supported in 8.2 and up */
1782  appendPQExpBufferStr(q, "COPY (SELECT ");
1783  /* klugery to get rid of parens in column list */
1784  if (strlen(column_list) > 2)
1785  {
1786  appendPQExpBufferStr(q, column_list + 1);
1787  q->data[q->len - 1] = ' ';
1788  }
1789  else
1790  appendPQExpBufferStr(q, "* ");
1791  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1792  fmtQualifiedDumpable(tbinfo),
1793  tdinfo->filtercond);
1794  }
1795  else
1796  {
1797  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1798  fmtQualifiedDumpable(tbinfo),
1799  column_list);
1800  }
1801  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1802  PQclear(res);
1803  destroyPQExpBuffer(clistBuf);
1804 
1805  for (;;)
1806  {
1807  ret = PQgetCopyData(conn, &copybuf, 0);
1808 
1809  if (ret < 0)
1810  break; /* done or error */
1811 
1812  if (copybuf)
1813  {
1814  WriteData(fout, copybuf, ret);
1815  PQfreemem(copybuf);
1816  }
1817 
1818  /* ----------
1819  * THROTTLE:
1820  *
1821  * There was considerable discussion in late July, 2000 regarding
1822  * slowing down pg_dump when backing up large tables. Users with both
1823  * slow & fast (multi-processor) machines experienced performance
1824  * degradation when doing a backup.
1825  *
1826  * Initial attempts based on sleeping for a number of ms for each ms
1827  * of work were deemed too complex, then a simple 'sleep in each loop'
1828  * implementation was suggested. The latter failed because the loop
1829  * was too tight. Finally, the following was implemented:
1830  *
1831  * If throttle is non-zero, then
1832  * See how long since the last sleep.
1833  * Work out how long to sleep (based on ratio).
1834  * If sleep is more than 100ms, then
1835  * sleep
1836  * reset timer
1837  * EndIf
1838  * EndIf
1839  *
1840  * where the throttle value was the number of ms to sleep per ms of
1841  * work. The calculation was done in each loop.
1842  *
1843  * Most of the hard work is done in the backend, and this solution
1844  * still did not work particularly well: on slow machines, the ratio
1845  * was 50:1, and on medium paced machines, 1:1, and on fast
1846  * multi-processor machines, it had little or no effect, for reasons
1847  * that were unclear.
1848  *
1849  * Further discussion ensued, and the proposal was dropped.
1850  *
1851  * For those people who want this feature, it can be implemented using
1852  * gettimeofday in each loop, calculating the time since last sleep,
1853  * multiplying that by the sleep ratio, then if the result is more
1854  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1855  * function to sleep for a subsecond period ie.
1856  *
1857  * select(0, NULL, NULL, NULL, &tvi);
1858  *
1859  * This will return after the interval specified in the structure tvi.
1860  * Finally, call gettimeofday again to save the 'last sleep time'.
1861  * ----------
1862  */
1863  }
1864  archprintf(fout, "\\.\n\n\n");
1865 
1866  if (ret == -2)
1867  {
1868  /* copy data transfer failed */
1869  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.\n", classname);
1870  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1871  write_msg(NULL, "The command was: %s\n", q->data);
1872  exit_nicely(1);
1873  }
1874 
1875  /* Check command status and return to normal libpq state */
1876  res = PQgetResult(conn);
1877  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1878  {
1879  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetResult() failed.\n", classname);
1880  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1881  write_msg(NULL, "The command was: %s\n", q->data);
1882  exit_nicely(1);
1883  }
1884  PQclear(res);
1885 
1886  /* Do this to ensure we've pumped libpq back to idle state */
1887  if (PQgetResult(conn) != NULL)
1888  write_msg(NULL, "WARNING: unexpected extra results during COPY of table \"%s\"\n",
1889  classname);
1890 
1891  destroyPQExpBuffer(q);
1892  return 1;
1893 }
1894 
1895 /*
1896  * Dump table data using INSERT commands.
1897  *
1898  * Caution: when we restore from an archive file direct to database, the
1899  * INSERT commands emitted by this function have to be parsed by
1900  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
1901  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
1902  */
1903 static int
1904 dumpTableData_insert(Archive *fout, void *dcontext)
1905 {
1906  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1907  TableInfo *tbinfo = tdinfo->tdtable;
1908  DumpOptions *dopt = fout->dopt;
1910  PQExpBuffer insertStmt = NULL;
1911  PGresult *res;
1912  int tuple;
1913  int nfields;
1914  int field;
1915 
1916  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
1917  "SELECT * FROM ONLY %s",
1918  fmtQualifiedDumpable(tbinfo));
1919  if (tdinfo->filtercond)
1920  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
1921 
1922  ExecuteSqlStatement(fout, q->data);
1923 
1924  while (1)
1925  {
1926  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
1927  PGRES_TUPLES_OK);
1928  nfields = PQnfields(res);
1929  for (tuple = 0; tuple < PQntuples(res); tuple++)
1930  {
1931  /*
1932  * First time through, we build as much of the INSERT statement as
1933  * possible in "insertStmt", which we can then just print for each
1934  * line. If the table happens to have zero columns then this will
1935  * be a complete statement, otherwise it will end in "VALUES(" and
1936  * be ready to have the row's column values appended.
1937  */
1938  if (insertStmt == NULL)
1939  {
1940  TableInfo *targettab;
1941 
1942  insertStmt = createPQExpBuffer();
1943 
1944  /*
1945  * When load-via-partition-root is set, get the root table
1946  * name for the partition table, so that we can reload data
1947  * through the root table.
1948  */
1949  if (dopt->load_via_partition_root && tbinfo->ispartition)
1950  targettab = getRootTableInfo(tbinfo);
1951  else
1952  targettab = tbinfo;
1953 
1954  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
1955  fmtQualifiedDumpable(targettab));
1956 
1957  /* corner case for zero-column table */
1958  if (nfields == 0)
1959  {
1960  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
1961  }
1962  else
1963  {
1964  /* append the list of column names if required */
1965  if (dopt->column_inserts)
1966  {
1967  appendPQExpBufferChar(insertStmt, '(');
1968  for (field = 0; field < nfields; field++)
1969  {
1970  if (field > 0)
1971  appendPQExpBufferStr(insertStmt, ", ");
1972  appendPQExpBufferStr(insertStmt,
1973  fmtId(PQfname(res, field)));
1974  }
1975  appendPQExpBufferStr(insertStmt, ") ");
1976  }
1977 
1978  if (tbinfo->needs_override)
1979  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
1980 
1981  appendPQExpBufferStr(insertStmt, "VALUES (");
1982  }
1983  }
1984 
1985  archputs(insertStmt->data, fout);
1986 
1987  /* if it is zero-column table then we're done */
1988  if (nfields == 0)
1989  continue;
1990 
1991  for (field = 0; field < nfields; field++)
1992  {
1993  if (field > 0)
1994  archputs(", ", fout);
1995  if (PQgetisnull(res, tuple, field))
1996  {
1997  archputs("NULL", fout);
1998  continue;
1999  }
2000 
2001  /* XXX This code is partially duplicated in ruleutils.c */
2002  switch (PQftype(res, field))
2003  {
2004  case INT2OID:
2005  case INT4OID:
2006  case INT8OID:
2007  case OIDOID:
2008  case FLOAT4OID:
2009  case FLOAT8OID:
2010  case NUMERICOID:
2011  {
2012  /*
2013  * These types are printed without quotes unless
2014  * they contain values that aren't accepted by the
2015  * scanner unquoted (e.g., 'NaN'). Note that
2016  * strtod() and friends might accept NaN, so we
2017  * can't use that to test.
2018  *
2019  * In reality we only need to defend against
2020  * infinity and NaN, so we need not get too crazy
2021  * about pattern matching here.
2022  */
2023  const char *s = PQgetvalue(res, tuple, field);
2024 
2025  if (strspn(s, "0123456789 +-eE.") == strlen(s))
2026  archputs(s, fout);
2027  else
2028  archprintf(fout, "'%s'", s);
2029  }
2030  break;
2031 
2032  case BITOID:
2033  case VARBITOID:
2034  archprintf(fout, "B'%s'",
2035  PQgetvalue(res, tuple, field));
2036  break;
2037 
2038  case BOOLOID:
2039  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2040  archputs("true", fout);
2041  else
2042  archputs("false", fout);
2043  break;
2044 
2045  default:
2046  /* All other types are printed as string literals. */
2047  resetPQExpBuffer(q);
2049  PQgetvalue(res, tuple, field),
2050  fout);
2051  archputs(q->data, fout);
2052  break;
2053  }
2054  }
2055 
2056  if (!dopt->do_nothing)
2057  archputs(");\n", fout);
2058  else
2059  archputs(") ON CONFLICT DO NOTHING;\n", fout);
2060  }
2061 
2062  if (PQntuples(res) <= 0)
2063  {
2064  PQclear(res);
2065  break;
2066  }
2067  PQclear(res);
2068  }
2069 
2070  archputs("\n\n", fout);
2071 
2072  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2073 
2074  destroyPQExpBuffer(q);
2075  if (insertStmt != NULL)
2076  destroyPQExpBuffer(insertStmt);
2077 
2078  return 1;
2079 }
2080 
2081 /*
2082  * getRootTableInfo:
2083  * get the root TableInfo for the given partition table.
2084  */
2085 static TableInfo *
2087 {
2088  TableInfo *parentTbinfo;
2089 
2090  Assert(tbinfo->ispartition);
2091  Assert(tbinfo->numParents == 1);
2092 
2093  parentTbinfo = tbinfo->parents[0];
2094  while (parentTbinfo->ispartition)
2095  {
2096  Assert(parentTbinfo->numParents == 1);
2097  parentTbinfo = parentTbinfo->parents[0];
2098  }
2099 
2100  return parentTbinfo;
2101 }
2102 
2103 /*
2104  * dumpTableData -
2105  * dump the contents of a single table
2106  *
2107  * Actually, this just makes an ArchiveEntry for the table contents.
2108  */
2109 static void
2111 {
2112  DumpOptions *dopt = fout->dopt;
2113  TableInfo *tbinfo = tdinfo->tdtable;
2114  PQExpBuffer copyBuf = createPQExpBuffer();
2115  PQExpBuffer clistBuf = createPQExpBuffer();
2116  DataDumperPtr dumpFn;
2117  char *copyStmt;
2118  const char *copyFrom;
2119 
2120  if (!dopt->dump_inserts)
2121  {
2122  /* Dump/restore using COPY */
2123  dumpFn = dumpTableData_copy;
2124 
2125  /*
2126  * When load-via-partition-root is set, get the root table name for
2127  * the partition table, so that we can reload data through the root
2128  * table.
2129  */
2130  if (dopt->load_via_partition_root && tbinfo->ispartition)
2131  {
2132  TableInfo *parentTbinfo;
2133 
2134  parentTbinfo = getRootTableInfo(tbinfo);
2135  copyFrom = fmtQualifiedDumpable(parentTbinfo);
2136  }
2137  else
2138  copyFrom = fmtQualifiedDumpable(tbinfo);
2139 
2140  /* must use 2 steps here 'cause fmtId is nonreentrant */
2141  appendPQExpBuffer(copyBuf, "COPY %s ",
2142  copyFrom);
2143  appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n",
2144  fmtCopyColumnList(tbinfo, clistBuf),
2145  (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
2146  copyStmt = copyBuf->data;
2147  }
2148  else
2149  {
2150  /* Restore using INSERT */
2151  dumpFn = dumpTableData_insert;
2152  copyStmt = NULL;
2153  }
2154 
2155  /*
2156  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2157  * dependency on its table as "special" and pass it to ArchiveEntry now.
2158  * See comments for BuildArchiveDependencies.
2159  */
2160  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2161  ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2162  tbinfo->dobj.name, tbinfo->dobj.namespace->dobj.name,
2163  NULL, tbinfo->rolname,
2164  false, "TABLE DATA", SECTION_DATA,
2165  "", "", copyStmt,
2166  &(tbinfo->dobj.dumpId), 1,
2167  dumpFn, tdinfo);
2168 
2169  destroyPQExpBuffer(copyBuf);
2170  destroyPQExpBuffer(clistBuf);
2171 }
2172 
2173 /*
2174  * refreshMatViewData -
2175  * load or refresh the contents of a single materialized view
2176  *
2177  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2178  * statement.
2179  */
2180 static void
2182 {
2183  TableInfo *tbinfo = tdinfo->tdtable;
2184  PQExpBuffer q;
2185 
2186  /* If the materialized view is not flagged as populated, skip this. */
2187  if (!tbinfo->relispopulated)
2188  return;
2189 
2190  q = createPQExpBuffer();
2191 
2192  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2193  fmtQualifiedDumpable(tbinfo));
2194 
2195  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2196  ArchiveEntry(fout,
2197  tdinfo->dobj.catId, /* catalog ID */
2198  tdinfo->dobj.dumpId, /* dump ID */
2199  tbinfo->dobj.name, /* Name */
2200  tbinfo->dobj.namespace->dobj.name, /* Namespace */
2201  NULL, /* Tablespace */
2202  tbinfo->rolname, /* Owner */
2203  false, /* with oids */
2204  "MATERIALIZED VIEW DATA", /* Desc */
2205  SECTION_POST_DATA, /* Section */
2206  q->data, /* Create */
2207  "", /* Del */
2208  NULL, /* Copy */
2209  tdinfo->dobj.dependencies, /* Deps */
2210  tdinfo->dobj.nDeps, /* # Deps */
2211  NULL, /* Dumper */
2212  NULL); /* Dumper Arg */
2213 
2214  destroyPQExpBuffer(q);
2215 }
2216 
2217 /*
2218  * getTableData -
2219  * set up dumpable objects representing the contents of tables
2220  */
2221 static void
2222 getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind)
2223 {
2224  int i;
2225 
2226  for (i = 0; i < numTables; i++)
2227  {
2228  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2229  (!relkind || tblinfo[i].relkind == relkind))
2230  makeTableDataInfo(dopt, &(tblinfo[i]), oids);
2231  }
2232 }
2233 
2234 /*
2235  * Make a dumpable object for the data of this specific table
2236  *
2237  * Note: we make a TableDataInfo if and only if we are going to dump the
2238  * table data; the "dump" flag in such objects isn't used.
2239  */
2240 static void
2241 makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids)
2242 {
2243  TableDataInfo *tdinfo;
2244 
2245  /*
2246  * Nothing to do if we already decided to dump the table. This will
2247  * happen for "config" tables.
2248  */
2249  if (tbinfo->dataObj != NULL)
2250  return;
2251 
2252  /* Skip VIEWs (no data to dump) */
2253  if (tbinfo->relkind == RELKIND_VIEW)
2254  return;
2255  /* Skip FOREIGN TABLEs (no data to dump) */
2256  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2257  return;
2258  /* Skip partitioned tables (data in partitions) */
2259  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2260  return;
2261 
2262  /* Don't dump data in unlogged tables, if so requested */
2263  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2264  dopt->no_unlogged_table_data)
2265  return;
2266 
2267  /* Check that the data is not explicitly excluded */
2268  if (simple_oid_list_member(&tabledata_exclude_oids,
2269  tbinfo->dobj.catId.oid))
2270  return;
2271 
2272  /* OK, let's dump it */
2273  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2274 
2275  if (tbinfo->relkind == RELKIND_MATVIEW)
2276  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2277  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2278  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2279  else
2280  tdinfo->dobj.objType = DO_TABLE_DATA;
2281 
2282  /*
2283  * Note: use tableoid 0 so that this object won't be mistaken for
2284  * something that pg_depend entries apply to.
2285  */
2286  tdinfo->dobj.catId.tableoid = 0;
2287  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2288  AssignDumpId(&tdinfo->dobj);
2289  tdinfo->dobj.name = tbinfo->dobj.name;
2290  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2291  tdinfo->tdtable = tbinfo;
2292  tdinfo->oids = oids;
2293  tdinfo->filtercond = NULL; /* might get set later */
2294  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2295 
2296  tbinfo->dataObj = tdinfo;
2297 }
2298 
2299 /*
2300  * The refresh for a materialized view must be dependent on the refresh for
2301  * any materialized view that this one is dependent on.
2302  *
2303  * This must be called after all the objects are created, but before they are
2304  * sorted.
2305  */
2306 static void
2308 {
2309  PQExpBuffer query;
2310  PGresult *res;
2311  int ntups,
2312  i;
2313  int i_classid,
2314  i_objid,
2315  i_refobjid;
2316 
2317  /* No Mat Views before 9.3. */
2318  if (fout->remoteVersion < 90300)
2319  return;
2320 
2321  query = createPQExpBuffer();
2322 
2323  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2324  "( "
2325  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2326  "FROM pg_depend d1 "
2327  "JOIN pg_class c1 ON c1.oid = d1.objid "
2328  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2329  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2330  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2331  "AND d2.objid = r1.oid "
2332  "AND d2.refobjid <> d1.objid "
2333  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2334  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2335  CppAsString2(RELKIND_VIEW) ") "
2336  "WHERE d1.classid = 'pg_class'::regclass "
2337  "UNION "
2338  "SELECT w.objid, d3.refobjid, c3.relkind "
2339  "FROM w "
2340  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2341  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2342  "AND d3.objid = r3.oid "
2343  "AND d3.refobjid <> w.refobjid "
2344  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2345  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2346  CppAsString2(RELKIND_VIEW) ") "
2347  ") "
2348  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2349  "FROM w "
2350  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2351 
2352  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2353 
2354  ntups = PQntuples(res);
2355 
2356  i_classid = PQfnumber(res, "classid");
2357  i_objid = PQfnumber(res, "objid");
2358  i_refobjid = PQfnumber(res, "refobjid");
2359 
2360  for (i = 0; i < ntups; i++)
2361  {
2362  CatalogId objId;
2363  CatalogId refobjId;
2364  DumpableObject *dobj;
2365  DumpableObject *refdobj;
2366  TableInfo *tbinfo;
2367  TableInfo *reftbinfo;
2368 
2369  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2370  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2371  refobjId.tableoid = objId.tableoid;
2372  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2373 
2374  dobj = findObjectByCatalogId(objId);
2375  if (dobj == NULL)
2376  continue;
2377 
2378  Assert(dobj->objType == DO_TABLE);
2379  tbinfo = (TableInfo *) dobj;
2380  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2381  dobj = (DumpableObject *) tbinfo->dataObj;
2382  if (dobj == NULL)
2383  continue;
2384  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2385 
2386  refdobj = findObjectByCatalogId(refobjId);
2387  if (refdobj == NULL)
2388  continue;
2389 
2390  Assert(refdobj->objType == DO_TABLE);
2391  reftbinfo = (TableInfo *) refdobj;
2392  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2393  refdobj = (DumpableObject *) reftbinfo->dataObj;
2394  if (refdobj == NULL)
2395  continue;
2396  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2397 
2398  addObjectDependency(dobj, refdobj->dumpId);
2399 
2400  if (!reftbinfo->relispopulated)
2401  tbinfo->relispopulated = false;
2402  }
2403 
2404  PQclear(res);
2405 
2406  destroyPQExpBuffer(query);
2407 }
2408 
2409 /*
2410  * getTableDataFKConstraints -
2411  * add dump-order dependencies reflecting foreign key constraints
2412  *
2413  * This code is executed only in a data-only dump --- in schema+data dumps
2414  * we handle foreign key issues by not creating the FK constraints until
2415  * after the data is loaded. In a data-only dump, however, we want to
2416  * order the table data objects in such a way that a table's referenced
2417  * tables are restored first. (In the presence of circular references or
2418  * self-references this may be impossible; we'll detect and complain about
2419  * that during the dependency sorting step.)
2420  */
2421 static void
2423 {
2424  DumpableObject **dobjs;
2425  int numObjs;
2426  int i;
2427 
2428  /* Search through all the dumpable objects for FK constraints */
2429  getDumpableObjects(&dobjs, &numObjs);
2430  for (i = 0; i < numObjs; i++)
2431  {
2432  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2433  {
2434  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2435  TableInfo *ftable;
2436 
2437  /* Not interesting unless both tables are to be dumped */
2438  if (cinfo->contable == NULL ||
2439  cinfo->contable->dataObj == NULL)
2440  continue;
2441  ftable = findTableByOid(cinfo->confrelid);
2442  if (ftable == NULL ||
2443  ftable->dataObj == NULL)
2444  continue;
2445 
2446  /*
2447  * Okay, make referencing table's TABLE_DATA object depend on the
2448  * referenced table's TABLE_DATA object.
2449  */
2451  ftable->dataObj->dobj.dumpId);
2452  }
2453  }
2454  free(dobjs);
2455 }
2456 
2457 
2458 /*
2459  * guessConstraintInheritance:
2460  * In pre-8.4 databases, we can't tell for certain which constraints
2461  * are inherited. We assume a CHECK constraint is inherited if its name
2462  * matches the name of any constraint in the parent. Originally this code
2463  * tried to compare the expression texts, but that can fail for various
2464  * reasons --- for example, if the parent and child tables are in different
2465  * schemas, reverse-listing of function calls may produce different text
2466  * (schema-qualified or not) depending on search path.
2467  *
2468  * In 8.4 and up we can rely on the conislocal field to decide which
2469  * constraints must be dumped; much safer.
2470  *
2471  * This function assumes all conislocal flags were initialized to true.
2472  * It clears the flag on anything that seems to be inherited.
2473  */
2474 static void
2476 {
2477  int i,
2478  j,
2479  k;
2480 
2481  for (i = 0; i < numTables; i++)
2482  {
2483  TableInfo *tbinfo = &(tblinfo[i]);
2484  int numParents;
2485  TableInfo **parents;
2486  TableInfo *parent;
2487 
2488  /* Sequences and views never have parents */
2489  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2490  tbinfo->relkind == RELKIND_VIEW)
2491  continue;
2492 
2493  /* Don't bother computing anything for non-target tables, either */
2494  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2495  continue;
2496 
2497  numParents = tbinfo->numParents;
2498  parents = tbinfo->parents;
2499 
2500  if (numParents == 0)
2501  continue; /* nothing to see here, move along */
2502 
2503  /* scan for inherited CHECK constraints */
2504  for (j = 0; j < tbinfo->ncheck; j++)
2505  {
2506  ConstraintInfo *constr;
2507 
2508  constr = &(tbinfo->checkexprs[j]);
2509 
2510  for (k = 0; k < numParents; k++)
2511  {
2512  int l;
2513 
2514  parent = parents[k];
2515  for (l = 0; l < parent->ncheck; l++)
2516  {
2517  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2518 
2519  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2520  {
2521  constr->conislocal = false;
2522  break;
2523  }
2524  }
2525  if (!constr->conislocal)
2526  break;
2527  }
2528  }
2529  }
2530 }
2531 
2532 
2533 /*
2534  * dumpDatabase:
2535  * dump the database definition
2536  */
2537 static void
2539 {
2540  DumpOptions *dopt = fout->dopt;
2541  PQExpBuffer dbQry = createPQExpBuffer();
2542  PQExpBuffer delQry = createPQExpBuffer();
2543  PQExpBuffer creaQry = createPQExpBuffer();
2544  PQExpBuffer labelq = createPQExpBuffer();
2545  PGconn *conn = GetConnection(fout);
2546  PGresult *res;
2547  int i_tableoid,
2548  i_oid,
2549  i_datname,
2550  i_dba,
2551  i_encoding,
2552  i_collate,
2553  i_ctype,
2554  i_frozenxid,
2555  i_minmxid,
2556  i_datacl,
2557  i_rdatacl,
2558  i_datistemplate,
2559  i_datconnlimit,
2560  i_tablespace;
2561  CatalogId dbCatId;
2562  DumpId dbDumpId;
2563  const char *datname,
2564  *dba,
2565  *encoding,
2566  *collate,
2567  *ctype,
2568  *datacl,
2569  *rdatacl,
2570  *datistemplate,
2571  *datconnlimit,
2572  *tablespace;
2573  uint32 frozenxid,
2574  minmxid;
2575  char *qdatname;
2576 
2577  if (g_verbose)
2578  write_msg(NULL, "saving database definition\n");
2579 
2580  /* Fetch the database-level properties for this database */
2581  if (fout->remoteVersion >= 90600)
2582  {
2583  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2584  "(%s datdba) AS dba, "
2585  "pg_encoding_to_char(encoding) AS encoding, "
2586  "datcollate, datctype, datfrozenxid, datminmxid, "
2587  "(SELECT array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
2588  " SELECT unnest(coalesce(datacl,acldefault('d',datdba))) AS acl "
2589  " EXCEPT SELECT unnest(acldefault('d',datdba))) as datacls)"
2590  " AS datacl, "
2591  "(SELECT array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
2592  " SELECT unnest(acldefault('d',datdba)) AS acl "
2593  " EXCEPT SELECT unnest(coalesce(datacl,acldefault('d',datdba)))) as rdatacls)"
2594  " AS rdatacl, "
2595  "datistemplate, datconnlimit, "
2596  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2597  "shobj_description(oid, 'pg_database') AS description "
2598 
2599  "FROM pg_database "
2600  "WHERE datname = current_database()",
2602  }
2603  else if (fout->remoteVersion >= 90300)
2604  {
2605  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2606  "(%s datdba) AS dba, "
2607  "pg_encoding_to_char(encoding) AS encoding, "
2608  "datcollate, datctype, datfrozenxid, datminmxid, "
2609  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2610  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2611  "shobj_description(oid, 'pg_database') AS description "
2612 
2613  "FROM pg_database "
2614  "WHERE datname = current_database()",
2616  }
2617  else if (fout->remoteVersion >= 80400)
2618  {
2619  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2620  "(%s datdba) AS dba, "
2621  "pg_encoding_to_char(encoding) AS encoding, "
2622  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2623  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2624  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2625  "shobj_description(oid, 'pg_database') AS description "
2626 
2627  "FROM pg_database "
2628  "WHERE datname = current_database()",
2630  }
2631  else if (fout->remoteVersion >= 80200)
2632  {
2633  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2634  "(%s datdba) AS dba, "
2635  "pg_encoding_to_char(encoding) AS encoding, "
2636  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2637  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2638  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2639  "shobj_description(oid, 'pg_database') AS description "
2640 
2641  "FROM pg_database "
2642  "WHERE datname = current_database()",
2644  }
2645  else
2646  {
2647  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2648  "(%s datdba) AS dba, "
2649  "pg_encoding_to_char(encoding) AS encoding, "
2650  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2651  "datacl, '' as rdatacl, datistemplate, "
2652  "-1 as datconnlimit, "
2653  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2654  "FROM pg_database "
2655  "WHERE datname = current_database()",
2657  }
2658 
2659  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2660 
2661  i_tableoid = PQfnumber(res, "tableoid");
2662  i_oid = PQfnumber(res, "oid");
2663  i_datname = PQfnumber(res, "datname");
2664  i_dba = PQfnumber(res, "dba");
2665  i_encoding = PQfnumber(res, "encoding");
2666  i_collate = PQfnumber(res, "datcollate");
2667  i_ctype = PQfnumber(res, "datctype");
2668  i_frozenxid = PQfnumber(res, "datfrozenxid");
2669  i_minmxid = PQfnumber(res, "datminmxid");
2670  i_datacl = PQfnumber(res, "datacl");
2671  i_rdatacl = PQfnumber(res, "rdatacl");
2672  i_datistemplate = PQfnumber(res, "datistemplate");
2673  i_datconnlimit = PQfnumber(res, "datconnlimit");
2674  i_tablespace = PQfnumber(res, "tablespace");
2675 
2676  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2677  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2678  datname = PQgetvalue(res, 0, i_datname);
2679  dba = PQgetvalue(res, 0, i_dba);
2680  encoding = PQgetvalue(res, 0, i_encoding);
2681  collate = PQgetvalue(res, 0, i_collate);
2682  ctype = PQgetvalue(res, 0, i_ctype);
2683  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2684  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2685  datacl = PQgetvalue(res, 0, i_datacl);
2686  rdatacl = PQgetvalue(res, 0, i_rdatacl);
2687  datistemplate = PQgetvalue(res, 0, i_datistemplate);
2688  datconnlimit = PQgetvalue(res, 0, i_datconnlimit);
2689  tablespace = PQgetvalue(res, 0, i_tablespace);
2690 
2691  qdatname = pg_strdup(fmtId(datname));
2692 
2693  /*
2694  * Prepare the CREATE DATABASE command. We must specify encoding, locale,
2695  * and tablespace since those can't be altered later. Other DB properties
2696  * are left to the DATABASE PROPERTIES entry, so that they can be applied
2697  * after reconnecting to the target DB.
2698  */
2699  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2700  qdatname);
2701  if (strlen(encoding) > 0)
2702  {
2703  appendPQExpBufferStr(creaQry, " ENCODING = ");
2704  appendStringLiteralAH(creaQry, encoding, fout);
2705  }
2706  if (strlen(collate) > 0)
2707  {
2708  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2709  appendStringLiteralAH(creaQry, collate, fout);
2710  }
2711  if (strlen(ctype) > 0)
2712  {
2713  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2714  appendStringLiteralAH(creaQry, ctype, fout);
2715  }
2716 
2717  /*
2718  * Note: looking at dopt->outputNoTablespaces here is completely the wrong
2719  * thing; the decision whether to specify a tablespace should be left till
2720  * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
2721  * label the DATABASE entry with the tablespace and let the normal
2722  * tablespace selection logic work ... but CREATE DATABASE doesn't pay
2723  * attention to default_tablespace, so that won't work.
2724  */
2725  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2726  !dopt->outputNoTablespaces)
2727  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2728  fmtId(tablespace));
2729  appendPQExpBufferStr(creaQry, ";\n");
2730 
2731  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2732  qdatname);
2733 
2734  dbDumpId = createDumpId();
2735 
2736  ArchiveEntry(fout,
2737  dbCatId, /* catalog ID */
2738  dbDumpId, /* dump ID */
2739  datname, /* Name */
2740  NULL, /* Namespace */
2741  NULL, /* Tablespace */
2742  dba, /* Owner */
2743  false, /* with oids */
2744  "DATABASE", /* Desc */
2745  SECTION_PRE_DATA, /* Section */
2746  creaQry->data, /* Create */
2747  delQry->data, /* Del */
2748  NULL, /* Copy */
2749  NULL, /* Deps */
2750  0, /* # Deps */
2751  NULL, /* Dumper */
2752  NULL); /* Dumper Arg */
2753 
2754  /* Compute correct tag for archive entry */
2755  appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
2756 
2757  /* Dump DB comment if any */
2758  if (fout->remoteVersion >= 80200)
2759  {
2760  /*
2761  * 8.2 and up keep comments on shared objects in a shared table, so we
2762  * cannot use the dumpComment() code used for other database objects.
2763  * Be careful that the ArchiveEntry parameters match that function.
2764  */
2765  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2766 
2767  if (comment && *comment && !dopt->no_comments)
2768  {
2769  resetPQExpBuffer(dbQry);
2770 
2771  /*
2772  * Generates warning when loaded into a differently-named
2773  * database.
2774  */
2775  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
2776  appendStringLiteralAH(dbQry, comment, fout);
2777  appendPQExpBufferStr(dbQry, ";\n");
2778 
2779  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2780  labelq->data, NULL, NULL, dba,
2781  false, "COMMENT", SECTION_NONE,
2782  dbQry->data, "", NULL,
2783  &(dbDumpId), 1,
2784  NULL, NULL);
2785  }
2786  }
2787  else
2788  {
2789  dumpComment(fout, "DATABASE", qdatname, NULL, dba,
2790  dbCatId, 0, dbDumpId);
2791  }
2792 
2793  /* Dump DB security label, if enabled */
2794  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2795  {
2796  PGresult *shres;
2797  PQExpBuffer seclabelQry;
2798 
2799  seclabelQry = createPQExpBuffer();
2800 
2801  buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry);
2802  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2803  resetPQExpBuffer(seclabelQry);
2804  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2805  if (seclabelQry->len > 0)
2806  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2807  labelq->data, NULL, NULL, dba,
2808  false, "SECURITY LABEL", SECTION_NONE,
2809  seclabelQry->data, "", NULL,
2810  &(dbDumpId), 1,
2811  NULL, NULL);
2812  destroyPQExpBuffer(seclabelQry);
2813  PQclear(shres);
2814  }
2815 
2816  /*
2817  * Dump ACL if any. Note that we do not support initial privileges
2818  * (pg_init_privs) on databases.
2819  */
2820  dumpACL(fout, dbCatId, dbDumpId, "DATABASE",
2821  qdatname, NULL, NULL,
2822  dba, datacl, rdatacl, "", "");
2823 
2824  /*
2825  * Now construct a DATABASE PROPERTIES archive entry to restore any
2826  * non-default database-level properties. (The reason this must be
2827  * separate is that we cannot put any additional commands into the TOC
2828  * entry that has CREATE DATABASE. pg_restore would execute such a group
2829  * in an implicit transaction block, and the backend won't allow CREATE
2830  * DATABASE in that context.)
2831  */
2832  resetPQExpBuffer(creaQry);
2833  resetPQExpBuffer(delQry);
2834 
2835  if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
2836  appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
2837  qdatname, datconnlimit);
2838 
2839  if (strcmp(datistemplate, "t") == 0)
2840  {
2841  appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
2842  qdatname);
2843 
2844  /*
2845  * The backend won't accept DROP DATABASE on a template database. We
2846  * can deal with that by removing the template marking before the DROP
2847  * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
2848  * since no such command is currently supported, fake it with a direct
2849  * UPDATE on pg_database.
2850  */
2851  appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
2852  "SET datistemplate = false WHERE datname = ");
2853  appendStringLiteralAH(delQry, datname, fout);
2854  appendPQExpBufferStr(delQry, ";\n");
2855  }
2856 
2857  /* Add database-specific SET options */
2858  dumpDatabaseConfig(fout, creaQry, datname, dbCatId.oid);
2859 
2860  /*
2861  * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
2862  * entry, too, for lack of a better place.
2863  */
2864  if (dopt->binary_upgrade)
2865  {
2866  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
2867  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
2868  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
2869  "WHERE datname = ",
2870  frozenxid, minmxid);
2871  appendStringLiteralAH(creaQry, datname, fout);
2872  appendPQExpBufferStr(creaQry, ";\n");
2873  }
2874 
2875  if (creaQry->len > 0)
2876  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2877  datname, NULL, NULL, dba,
2878  false, "DATABASE PROPERTIES", SECTION_PRE_DATA,
2879  creaQry->data, delQry->data, NULL,
2880  &(dbDumpId), 1,
2881  NULL, NULL);
2882 
2883  /*
2884  * pg_largeobject and pg_largeobject_metadata come from the old system
2885  * intact, so set their relfrozenxids and relminmxids.
2886  */
2887  if (dopt->binary_upgrade)
2888  {
2889  PGresult *lo_res;
2890  PQExpBuffer loFrozenQry = createPQExpBuffer();
2891  PQExpBuffer loOutQry = createPQExpBuffer();
2892  int i_relfrozenxid,
2893  i_relminmxid;
2894 
2895  /*
2896  * pg_largeobject
2897  */
2898  if (fout->remoteVersion >= 90300)
2899  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2900  "FROM pg_catalog.pg_class\n"
2901  "WHERE oid = %u;\n",
2902  LargeObjectRelationId);
2903  else
2904  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2905  "FROM pg_catalog.pg_class\n"
2906  "WHERE oid = %u;\n",
2907  LargeObjectRelationId);
2908 
2909  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2910 
2911  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2912  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2913 
2914  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
2915  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2916  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2917  "WHERE oid = %u;\n",
2918  atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2919  atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
2920  LargeObjectRelationId);
2921  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2922  "pg_largeobject", NULL, NULL, "",
2923  false, "pg_largeobject", SECTION_PRE_DATA,
2924  loOutQry->data, "", NULL,
2925  NULL, 0,
2926  NULL, NULL);
2927 
2928  PQclear(lo_res);
2929 
2930  /*
2931  * pg_largeobject_metadata
2932  */
2933  if (fout->remoteVersion >= 90000)
2934  {
2935  resetPQExpBuffer(loFrozenQry);
2936  resetPQExpBuffer(loOutQry);
2937 
2938  if (fout->remoteVersion >= 90300)
2939  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2940  "FROM pg_catalog.pg_class\n"
2941  "WHERE oid = %u;\n",
2942  LargeObjectMetadataRelationId);
2943  else
2944  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2945  "FROM pg_catalog.pg_class\n"
2946  "WHERE oid = %u;\n",
2947  LargeObjectMetadataRelationId);
2948 
2949  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2950 
2951  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2952  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2953 
2954  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
2955  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2956  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2957  "WHERE oid = %u;\n",
2958  atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2959  atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
2960  LargeObjectMetadataRelationId);
2961  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2962  "pg_largeobject_metadata", NULL, NULL, "",
2963  false, "pg_largeobject_metadata", SECTION_PRE_DATA,
2964  loOutQry->data, "", NULL,
2965  NULL, 0,
2966  NULL, NULL);
2967 
2968  PQclear(lo_res);
2969  }
2970 
2971  destroyPQExpBuffer(loFrozenQry);
2972  destroyPQExpBuffer(loOutQry);
2973  }
2974 
2975  PQclear(res);
2976 
2977  free(qdatname);
2978  destroyPQExpBuffer(dbQry);
2979  destroyPQExpBuffer(delQry);
2980  destroyPQExpBuffer(creaQry);
2981  destroyPQExpBuffer(labelq);
2982 }
2983 
2984 /*
2985  * Collect any database-specific or role-and-database-specific SET options
2986  * for this database, and append them to outbuf.
2987  */
2988 static void
2990  const char *dbname, Oid dboid)
2991 {
2992  PGconn *conn = GetConnection(AH);
2994  PGresult *res;
2995  int count = 1;
2996 
2997  /*
2998  * First collect database-specific options. Pre-8.4 server versions lack
2999  * unnest(), so we do this the hard way by querying once per subscript.
3000  */
3001  for (;;)
3002  {
3003  if (AH->remoteVersion >= 90000)
3004  printfPQExpBuffer(buf, "SELECT setconfig[%d] FROM pg_db_role_setting "
3005  "WHERE setrole = 0 AND setdatabase = '%u'::oid",
3006  count, dboid);
3007  else
3008  printfPQExpBuffer(buf, "SELECT datconfig[%d] FROM pg_database WHERE oid = '%u'::oid", count, dboid);
3009 
3010  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3011 
3012  if (PQntuples(res) == 1 &&
3013  !PQgetisnull(res, 0, 0))
3014  {
3015  makeAlterConfigCommand(conn, PQgetvalue(res, 0, 0),
3016  "DATABASE", dbname, NULL, NULL,
3017  outbuf);
3018  PQclear(res);
3019  count++;
3020  }
3021  else
3022  {
3023  PQclear(res);
3024  break;
3025  }
3026  }
3027 
3028  /* Now look for role-and-database-specific options */
3029  if (AH->remoteVersion >= 90000)
3030  {
3031  /* Here we can assume we have unnest() */
3032  printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
3033  "FROM pg_db_role_setting s, pg_roles r "
3034  "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
3035  dboid);
3036 
3037  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3038 
3039  if (PQntuples(res) > 0)
3040  {
3041  int i;
3042 
3043  for (i = 0; i < PQntuples(res); i++)
3044  makeAlterConfigCommand(conn, PQgetvalue(res, i, 1),
3045  "ROLE", PQgetvalue(res, i, 0),
3046  "DATABASE", dbname,
3047  outbuf);
3048  }
3049 
3050  PQclear(res);
3051  }
3052 
3053  destroyPQExpBuffer(buf);
3054 }
3055 
3056 /*
3057  * dumpEncoding: put the correct encoding into the archive
3058  */
3059 static void
3061 {
3062  const char *encname = pg_encoding_to_char(AH->encoding);
3064 
3065  if (g_verbose)
3066  write_msg(NULL, "saving encoding = %s\n", encname);
3067 
3068  appendPQExpBufferStr(qry, "SET client_encoding = ");
3069  appendStringLiteralAH(qry, encname, AH);
3070  appendPQExpBufferStr(qry, ";\n");
3071 
3072  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3073  "ENCODING", NULL, NULL, "",
3074  false, "ENCODING", SECTION_PRE_DATA,
3075  qry->data, "", NULL,
3076  NULL, 0,
3077  NULL, NULL);
3078 
3079  destroyPQExpBuffer(qry);
3080 }
3081 
3082 
3083 /*
3084  * dumpStdStrings: put the correct escape string behavior into the archive
3085  */
3086 static void
3088 {
3089  const char *stdstrings = AH->std_strings ? "on" : "off";
3091 
3092  if (g_verbose)
3093  write_msg(NULL, "saving standard_conforming_strings = %s\n",
3094  stdstrings);
3095 
3096  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3097  stdstrings);
3098 
3099  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3100  "STDSTRINGS", NULL, NULL, "",
3101  false, "STDSTRINGS", SECTION_PRE_DATA,
3102  qry->data, "", NULL,
3103  NULL, 0,
3104  NULL, NULL);
3105 
3106  destroyPQExpBuffer(qry);
3107 }
3108 
3109 /*
3110  * dumpSearchPath: record the active search_path in the archive
3111  */
3112 static void
3114 {
3116  PQExpBuffer path = createPQExpBuffer();
3117  PGresult *res;
3118  char **schemanames = NULL;
3119  int nschemanames = 0;
3120  int i;
3121 
3122  /*
3123  * We use the result of current_schemas(), not the search_path GUC,
3124  * because that might contain wildcards such as "$user", which won't
3125  * necessarily have the same value during restore. Also, this way avoids
3126  * listing schemas that may appear in search_path but not actually exist,
3127  * which seems like a prudent exclusion.
3128  */
3129  res = ExecuteSqlQueryForSingleRow(AH,
3130  "SELECT pg_catalog.current_schemas(false)");
3131 
3132  if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3133  exit_horribly(NULL, "could not parse result of current_schemas()\n");
3134 
3135  /*
3136  * We use set_config(), not a simple "SET search_path" command, because
3137  * the latter has less-clean behavior if the search path is empty. While
3138  * that's likely to get fixed at some point, it seems like a good idea to
3139  * be as backwards-compatible as possible in what we put into archives.
3140  */
3141  for (i = 0; i < nschemanames; i++)
3142  {
3143  if (i > 0)
3144  appendPQExpBufferStr(path, ", ");
3145  appendPQExpBufferStr(path, fmtId(schemanames[i]));
3146  }
3147 
3148  appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3149  appendStringLiteralAH(qry, path->data, AH);
3150  appendPQExpBufferStr(qry, ", false);\n");
3151 
3152  if (g_verbose)
3153  write_msg(NULL, "saving search_path = %s\n", path->data);
3154 
3155  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3156  "SEARCHPATH", NULL, NULL, "",
3157  false, "SEARCHPATH", SECTION_PRE_DATA,
3158  qry->data, "", NULL,
3159  NULL, 0,
3160  NULL, NULL);
3161 
3162  /* Also save it in AH->searchpath, in case we're doing plain text dump */
3163  AH->searchpath = pg_strdup(qry->data);
3164 
3165  if (schemanames)
3166  free(schemanames);
3167  PQclear(res);
3168  destroyPQExpBuffer(qry);
3169  destroyPQExpBuffer(path);
3170 }
3171 
3172 
3173 /*
3174  * getBlobs:
3175  * Collect schema-level data about large objects
3176  */
3177 static void
3179 {
3180  DumpOptions *dopt = fout->dopt;
3181  PQExpBuffer blobQry = createPQExpBuffer();
3182  BlobInfo *binfo;
3183  DumpableObject *bdata;
3184  PGresult *res;
3185  int ntups;
3186  int i;
3187  int i_oid;
3188  int i_lomowner;
3189  int i_lomacl;
3190  int i_rlomacl;
3191  int i_initlomacl;
3192  int i_initrlomacl;
3193 
3194  /* Verbose message */
3195  if (g_verbose)
3196  write_msg(NULL, "reading large objects\n");
3197 
3198  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
3199  if (fout->remoteVersion >= 90600)
3200  {
3201  PQExpBuffer acl_subquery = createPQExpBuffer();
3202  PQExpBuffer racl_subquery = createPQExpBuffer();
3203  PQExpBuffer init_acl_subquery = createPQExpBuffer();
3204  PQExpBuffer init_racl_subquery = createPQExpBuffer();
3205 
3206  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
3207  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
3208  dopt->binary_upgrade);
3209 
3210  appendPQExpBuffer(blobQry,
3211  "SELECT l.oid, (%s l.lomowner) AS rolname, "
3212  "%s AS lomacl, "
3213  "%s AS rlomacl, "
3214  "%s AS initlomacl, "
3215  "%s AS initrlomacl "
3216  "FROM pg_largeobject_metadata l "
3217  "LEFT JOIN pg_init_privs pip ON "
3218  "(l.oid = pip.objoid "
3219  "AND pip.classoid = 'pg_largeobject'::regclass "
3220  "AND pip.objsubid = 0) ",
3222  acl_subquery->data,
3223  racl_subquery->data,
3224  init_acl_subquery->data,
3225  init_racl_subquery->data);
3226 
3227  destroyPQExpBuffer(acl_subquery);
3228  destroyPQExpBuffer(racl_subquery);
3229  destroyPQExpBuffer(init_acl_subquery);
3230  destroyPQExpBuffer(init_racl_subquery);
3231  }
3232  else if (fout->remoteVersion >= 90000)
3233  appendPQExpBuffer(blobQry,
3234  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
3235  "NULL AS rlomacl, NULL AS initlomacl, "
3236  "NULL AS initrlomacl "
3237  " FROM pg_largeobject_metadata",
3239  else
3240  appendPQExpBufferStr(blobQry,
3241  "SELECT DISTINCT loid AS oid, "
3242  "NULL::name AS rolname, NULL::oid AS lomacl, "
3243  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
3244  "NULL::oid AS initrlomacl "
3245  " FROM pg_largeobject");
3246 
3247  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
3248 
3249  i_oid = PQfnumber(res, "oid");
3250  i_lomowner = PQfnumber(res, "rolname");
3251  i_lomacl = PQfnumber(res, "lomacl");
3252  i_rlomacl = PQfnumber(res, "rlomacl");
3253  i_initlomacl = PQfnumber(res, "initlomacl");
3254  i_initrlomacl = PQfnumber(res, "initrlomacl");
3255 
3256  ntups = PQntuples(res);
3257 
3258  /*
3259  * Each large object has its own BLOB archive entry.
3260  */
3261  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
3262 
3263  for (i = 0; i < ntups; i++)
3264  {
3265  binfo[i].dobj.objType = DO_BLOB;
3266  binfo[i].dobj.catId.tableoid = LargeObjectRelationId;
3267  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3268  AssignDumpId(&binfo[i].dobj);
3269 
3270  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
3271  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
3272  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
3273  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
3274  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
3275  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
3276 
3277  if (PQgetisnull(res, i, i_lomacl) &&
3278  PQgetisnull(res, i, i_rlomacl) &&
3279  PQgetisnull(res, i, i_initlomacl) &&
3280  PQgetisnull(res, i, i_initrlomacl))
3281  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
3282 
3283  /*
3284  * In binary-upgrade mode for blobs, we do *not* dump out the data or
3285  * the ACLs, should any exist. The data and ACL (if any) will be
3286  * copied by pg_upgrade, which simply copies the pg_largeobject and
3287  * pg_largeobject_metadata tables.
3288  *
3289  * We *do* dump out the definition of the blob because we need that to
3290  * make the restoration of the comments, and anything else, work since
3291  * pg_upgrade copies the files behind pg_largeobject and
3292  * pg_largeobject_metadata after the dump is restored.
3293  */
3294  if (dopt->binary_upgrade)
3296  }
3297 
3298  /*
3299  * If we have any large objects, a "BLOBS" archive entry is needed. This
3300  * is just a placeholder for sorting; it carries no data now.
3301  */
3302  if (ntups > 0)
3303  {
3304  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
3305  bdata->objType = DO_BLOB_DATA;
3306  bdata->catId = nilCatalogId;
3307  AssignDumpId(bdata);
3308  bdata->name = pg_strdup("BLOBS");
3309  }
3310 
3311  PQclear(res);
3312  destroyPQExpBuffer(blobQry);
3313 }
3314 
3315 /*
3316  * dumpBlob
3317  *
3318  * dump the definition (metadata) of the given large object
3319  */
3320 static void
3321 dumpBlob(Archive *fout, BlobInfo *binfo)
3322 {
3323  PQExpBuffer cquery = createPQExpBuffer();
3324  PQExpBuffer dquery = createPQExpBuffer();
3325 
3326  appendPQExpBuffer(cquery,
3327  "SELECT pg_catalog.lo_create('%s');\n",
3328  binfo->dobj.name);
3329 
3330  appendPQExpBuffer(dquery,
3331  "SELECT pg_catalog.lo_unlink('%s');\n",
3332  binfo->dobj.name);
3333 
3334  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
3335  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
3336  binfo->dobj.name,
3337  NULL, NULL,
3338  binfo->rolname, false,
3339  "BLOB", SECTION_PRE_DATA,
3340  cquery->data, dquery->data, NULL,
3341  NULL, 0,
3342  NULL, NULL);
3343 
3344  /* Dump comment if any */
3345  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3346  dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
3347  NULL, binfo->rolname,
3348  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3349 
3350  /* Dump security label if any */
3351  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3352  dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
3353  NULL, binfo->rolname,
3354  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3355 
3356  /* Dump ACL if any */
3357  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3358  dumpACL(fout, binfo->dobj.catId, binfo->dobj.dumpId, "LARGE OBJECT",
3359  binfo->dobj.name, NULL,
3360  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3361  binfo->initblobacl, binfo->initrblobacl);
3362 
3363  destroyPQExpBuffer(cquery);
3364  destroyPQExpBuffer(dquery);
3365 }
3366 
3367 /*
3368  * dumpBlobs:
3369  * dump the data contents of all large objects
3370  */
3371 static int
3372 dumpBlobs(Archive *fout, void *arg)
3373 {
3374  const char *blobQry;
3375  const char *blobFetchQry;
3376  PGconn *conn = GetConnection(fout);
3377  PGresult *res;
3378  char buf[LOBBUFSIZE];
3379  int ntups;
3380  int i;
3381  int cnt;
3382 
3383  if (g_verbose)
3384  write_msg(NULL, "saving large objects\n");
3385 
3386  /*
3387  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3388  * the already-in-memory dumpable objects instead...
3389  */
3390  if (fout->remoteVersion >= 90000)
3391  blobQry = "DECLARE bloboid CURSOR FOR SELECT oid FROM pg_largeobject_metadata";
3392  else
3393  blobQry = "DECLARE bloboid CURSOR FOR SELECT DISTINCT loid FROM pg_largeobject";
3394 
3395  ExecuteSqlStatement(fout, blobQry);
3396 
3397  /* Command to fetch from cursor */
3398  blobFetchQry = "FETCH 1000 IN bloboid";
3399 
3400  do
3401  {
3402  /* Do a fetch */
3403  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3404 
3405  /* Process the tuples, if any */
3406  ntups = PQntuples(res);
3407  for (i = 0; i < ntups; i++)
3408  {
3409  Oid blobOid;
3410  int loFd;
3411 
3412  blobOid = atooid(PQgetvalue(res, i, 0));
3413  /* Open the BLOB */
3414  loFd = lo_open(conn, blobOid, INV_READ);
3415  if (loFd == -1)
3416  exit_horribly(NULL, "could not open large object %u: %s",
3417  blobOid, PQerrorMessage(conn));
3418 
3419  StartBlob(fout, blobOid);
3420 
3421  /* Now read it in chunks, sending data to archive */
3422  do
3423  {
3424  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3425  if (cnt < 0)
3426  exit_horribly(NULL, "error reading large object %u: %s",
3427  blobOid, PQerrorMessage(conn));
3428 
3429  WriteData(fout, buf, cnt);
3430  } while (cnt > 0);
3431 
3432  lo_close(conn, loFd);
3433 
3434  EndBlob(fout, blobOid);
3435  }
3436 
3437  PQclear(res);
3438  } while (ntups > 0);
3439 
3440  return 1;
3441 }
3442 
3443 /*
3444  * getPolicies
3445  * get information about policies on a dumpable table.
3446  */
3447 void
3448 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3449 {
3450  PQExpBuffer query;
3451  PGresult *res;
3452  PolicyInfo *polinfo;
3453  int i_oid;
3454  int i_tableoid;
3455  int i_polname;
3456  int i_polcmd;
3457  int i_polpermissive;
3458  int i_polroles;
3459  int i_polqual;
3460  int i_polwithcheck;
3461  int i,
3462  j,
3463  ntups;
3464 
3465  if (fout->remoteVersion < 90500)
3466  return;
3467 
3468  query = createPQExpBuffer();
3469 
3470  for (i = 0; i < numTables; i++)
3471  {
3472  TableInfo *tbinfo = &tblinfo[i];
3473 
3474  /* Ignore row security on tables not to be dumped */
3475  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3476  continue;
3477 
3478  if (g_verbose)
3479  write_msg(NULL, "reading row security enabled for table \"%s.%s\"\n",
3480  tbinfo->dobj.namespace->dobj.name,
3481  tbinfo->dobj.name);
3482 
3483  /*
3484  * Get row security enabled information for the table. We represent
3485  * RLS enabled on a table by creating PolicyInfo object with an empty
3486  * policy.
3487  */
3488  if (tbinfo->rowsec)
3489  {
3490  /*
3491  * Note: use tableoid 0 so that this object won't be mistaken for
3492  * something that pg_depend entries apply to.
3493  */
3494  polinfo = pg_malloc(sizeof(PolicyInfo));
3495  polinfo->dobj.objType = DO_POLICY;
3496  polinfo->dobj.catId.tableoid = 0;
3497  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3498  AssignDumpId(&polinfo->dobj);
3499  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3500  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3501  polinfo->poltable = tbinfo;
3502  polinfo->polname = NULL;
3503  polinfo->polcmd = '\0';
3504  polinfo->polpermissive = 0;
3505  polinfo->polroles = NULL;
3506  polinfo->polqual = NULL;
3507  polinfo->polwithcheck = NULL;
3508  }
3509 
3510  if (g_verbose)
3511  write_msg(NULL, "reading policies for table \"%s.%s\"\n",
3512  tbinfo->dobj.namespace->dobj.name,
3513  tbinfo->dobj.name);
3514 
3515  resetPQExpBuffer(query);
3516 
3517  /* Get the policies for the table. */
3518  if (fout->remoteVersion >= 100000)
3519  appendPQExpBuffer(query,
3520  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3521  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3522  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3523  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3524  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3525  "FROM pg_catalog.pg_policy pol "
3526  "WHERE polrelid = '%u'",
3527  tbinfo->dobj.catId.oid);
3528  else
3529  appendPQExpBuffer(query,
3530  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3531  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3532  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3533  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3534  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3535  "FROM pg_catalog.pg_policy pol "
3536  "WHERE polrelid = '%u'",
3537  tbinfo->dobj.catId.oid);
3538  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3539 
3540  ntups = PQntuples(res);
3541 
3542  if (ntups == 0)
3543  {
3544  /*
3545  * No explicit policies to handle (only the default-deny policy,
3546  * which is handled as part of the table definition). Clean up
3547  * and return.
3548  */
3549  PQclear(res);
3550  continue;
3551  }
3552 
3553  i_oid = PQfnumber(res, "oid");
3554  i_tableoid = PQfnumber(res, "tableoid");
3555  i_polname = PQfnumber(res, "polname");
3556  i_polcmd = PQfnumber(res, "polcmd");
3557  i_polpermissive = PQfnumber(res, "polpermissive");
3558  i_polroles = PQfnumber(res, "polroles");
3559  i_polqual = PQfnumber(res, "polqual");
3560  i_polwithcheck = PQfnumber(res, "polwithcheck");
3561 
3562  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3563 
3564  for (j = 0; j < ntups; j++)
3565  {
3566  polinfo[j].dobj.objType = DO_POLICY;
3567  polinfo[j].dobj.catId.tableoid =
3568  atooid(PQgetvalue(res, j, i_tableoid));
3569  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3570  AssignDumpId(&polinfo[j].dobj);
3571  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3572  polinfo[j].poltable = tbinfo;
3573  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3574  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3575 
3576  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3577  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3578 
3579  if (PQgetisnull(res, j, i_polroles))
3580  polinfo[j].polroles = NULL;
3581  else
3582  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3583 
3584  if (PQgetisnull(res, j, i_polqual))
3585  polinfo[j].polqual = NULL;
3586  else
3587  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3588 
3589  if (PQgetisnull(res, j, i_polwithcheck))
3590  polinfo[j].polwithcheck = NULL;
3591  else
3592  polinfo[j].polwithcheck
3593  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3594  }
3595  PQclear(res);
3596  }
3597  destroyPQExpBuffer(query);
3598 }
3599 
3600 /*
3601  * dumpPolicy
3602  * dump the definition of the given policy
3603  */
3604 static void
3606 {
3607  DumpOptions *dopt = fout->dopt;
3608  TableInfo *tbinfo = polinfo->poltable;
3609  PQExpBuffer query;
3610  PQExpBuffer delqry;
3611  const char *cmd;
3612  char *tag;
3613 
3614  if (dopt->dataOnly)
3615  return;
3616 
3617  /*
3618  * If polname is NULL, then this record is just indicating that ROW LEVEL
3619  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3620  * ROW LEVEL SECURITY.
3621  */
3622  if (polinfo->polname == NULL)
3623  {
3624  query = createPQExpBuffer();
3625 
3626  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3627  fmtQualifiedDumpable(polinfo));
3628 
3629  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3630  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3631  polinfo->dobj.name,
3632  polinfo->dobj.namespace->dobj.name,
3633  NULL,
3634  tbinfo->rolname, false,
3635  "ROW SECURITY", SECTION_POST_DATA,
3636  query->data, "", NULL,
3637  NULL, 0,
3638  NULL, NULL);
3639 
3640  destroyPQExpBuffer(query);
3641  return;
3642  }
3643 
3644  if (polinfo->polcmd == '*')
3645  cmd = "";
3646  else if (polinfo->polcmd == 'r')
3647  cmd = " FOR SELECT";
3648  else if (polinfo->polcmd == 'a')
3649  cmd = " FOR INSERT";
3650  else if (polinfo->polcmd == 'w')
3651  cmd = " FOR UPDATE";
3652  else if (polinfo->polcmd == 'd')
3653  cmd = " FOR DELETE";
3654  else
3655  {
3656  write_msg(NULL, "unexpected policy command type: %c\n",
3657  polinfo->polcmd);
3658  exit_nicely(1);
3659  }
3660 
3661  query = createPQExpBuffer();
3662  delqry = createPQExpBuffer();
3663 
3664  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3665 
3666  appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
3667  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3668 
3669  if (polinfo->polroles != NULL)
3670  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3671 
3672  if (polinfo->polqual != NULL)
3673  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3674 
3675  if (polinfo->polwithcheck != NULL)
3676  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3677 
3678  appendPQExpBuffer(query, ";\n");
3679 
3680  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3681  appendPQExpBuffer(delqry, " ON %s;\n", fmtQualifiedDumpable(tbinfo));
3682 
3683  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3684 
3685  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3686  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3687  tag,
3688  polinfo->dobj.namespace->dobj.name,
3689  NULL,
3690  tbinfo->rolname, false,
3691  "POLICY", SECTION_POST_DATA,
3692  query->data, delqry->data, NULL,
3693  NULL, 0,
3694  NULL, NULL);
3695 
3696  free(tag);
3697  destroyPQExpBuffer(query);
3698  destroyPQExpBuffer(delqry);
3699 }
3700 
3701 /*
3702  * getPublications
3703  * get information about publications
3704  */
3705 void
3707 {
3708  DumpOptions *dopt = fout->dopt;
3709  PQExpBuffer query;
3710  PGresult *res;
3711  PublicationInfo *pubinfo;
3712  int i_tableoid;
3713  int i_oid;
3714  int i_pubname;
3715  int i_rolname;
3716  int i_puballtables;
3717  int i_pubinsert;
3718  int i_pubupdate;
3719  int i_pubdelete;
3720  int i_pubtruncate;
3721  int i,
3722  ntups;
3723 
3724  if (dopt->no_publications || fout->remoteVersion < 100000)
3725  return;
3726 
3727  query = createPQExpBuffer();
3728 
3729  resetPQExpBuffer(query);
3730 
3731  /* Get the publications. */
3732  if (fout->remoteVersion >= 110000)
3733  appendPQExpBuffer(query,
3734  "SELECT p.tableoid, p.oid, p.pubname, "
3735  "(%s p.pubowner) AS rolname, "
3736  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate "
3737  "FROM pg_publication p",
3739  else
3740  appendPQExpBuffer(query,
3741  "SELECT p.tableoid, p.oid, p.pubname, "
3742  "(%s p.pubowner) AS rolname, "
3743  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, false AS pubtruncate "
3744  "FROM pg_publication p",
3746 
3747  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3748 
3749  ntups = PQntuples(res);
3750 
3751  i_tableoid = PQfnumber(res, "tableoid");
3752  i_oid = PQfnumber(res, "oid");
3753  i_pubname = PQfnumber(res, "pubname");
3754  i_rolname = PQfnumber(res, "rolname");
3755  i_puballtables = PQfnumber(res, "puballtables");
3756  i_pubinsert = PQfnumber(res, "pubinsert");
3757  i_pubupdate = PQfnumber(res, "pubupdate");
3758  i_pubdelete = PQfnumber(res, "pubdelete");
3759  i_pubtruncate = PQfnumber(res, "pubtruncate");
3760 
3761  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3762 
3763  for (i = 0; i < ntups; i++)
3764  {
3765  pubinfo[i].dobj.objType = DO_PUBLICATION;
3766  pubinfo[i].dobj.catId.tableoid =
3767  atooid(PQgetvalue(res, i, i_tableoid));
3768  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3769  AssignDumpId(&pubinfo[i].dobj);
3770  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3771  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3772  pubinfo[i].puballtables =
3773  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3774  pubinfo[i].pubinsert =
3775  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3776  pubinfo[i].pubupdate =
3777  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3778  pubinfo[i].pubdelete =
3779  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3780  pubinfo[i].pubtruncate =
3781  (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
3782 
3783  if (strlen(pubinfo[i].rolname) == 0)
3784  write_msg(NULL, "WARNING: owner of publication \"%s\" appears to be invalid\n",
3785  pubinfo[i].dobj.name);
3786 
3787  /* Decide whether we want to dump it */
3788  selectDumpableObject(&(pubinfo[i].dobj), fout);
3789  }
3790  PQclear(res);
3791 
3792  destroyPQExpBuffer(query);
3793 }
3794 
3795 /*
3796  * dumpPublication
3797  * dump the definition of the given publication
3798  */
3799 static void
3801 {
3802  PQExpBuffer delq;
3803  PQExpBuffer query;
3804  char *qpubname;
3805  bool first = true;
3806 
3807  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3808  return;
3809 
3810  delq = createPQExpBuffer();
3811  query = createPQExpBuffer();
3812 
3813  qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
3814 
3815  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3816  qpubname);
3817 
3818  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3819  qpubname);
3820 
3821  if (pubinfo->puballtables)
3822  appendPQExpBufferStr(query, " FOR ALL TABLES");
3823 
3824  appendPQExpBufferStr(query, " WITH (publish = '");
3825  if (pubinfo->pubinsert)
3826  {
3827  appendPQExpBufferStr(query, "insert");
3828  first = false;
3829  }
3830 
3831  if (pubinfo->pubupdate)
3832  {
3833  if (!first)
3834  appendPQExpBufferStr(query, ", ");
3835 
3836  appendPQExpBufferStr(query, "update");
3837  first = false;
3838  }
3839 
3840  if (pubinfo->pubdelete)
3841  {
3842  if (!first)
3843  appendPQExpBufferStr(query, ", ");
3844 
3845  appendPQExpBufferStr(query, "delete");
3846  first = false;
3847  }
3848 
3849  if (pubinfo->pubtruncate)
3850  {
3851  if (!first)
3852  appendPQExpBufferStr(query, ", ");
3853 
3854  appendPQExpBufferStr(query, "truncate");
3855  first = false;
3856  }
3857 
3858  appendPQExpBufferStr(query, "');\n");
3859 
3860  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
3861  pubinfo->dobj.name,
3862  NULL,
3863  NULL,
3864  pubinfo->rolname, false,
3865  "PUBLICATION", SECTION_POST_DATA,
3866  query->data, delq->data, NULL,
3867  NULL, 0,
3868  NULL, NULL);
3869 
3870  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3871  dumpComment(fout, "PUBLICATION", qpubname,
3872  NULL, pubinfo->rolname,
3873  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3874 
3875  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3876  dumpSecLabel(fout, "PUBLICATION", qpubname,
3877  NULL, pubinfo->rolname,
3878  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3879 
3880  destroyPQExpBuffer(delq);
3881  destroyPQExpBuffer(query);
3882  free(qpubname);
3883 }
3884 
3885 /*
3886  * getPublicationTables
3887  * get information about publication membership for dumpable tables.
3888  */
3889 void
3891 {
3892  PQExpBuffer query;
3893  PGresult *res;
3894  PublicationRelInfo *pubrinfo;
3895  int i_tableoid;
3896  int i_oid;
3897  int i_pubname;
3898  int i,
3899  j,
3900  ntups;
3901 
3902  if (fout->remoteVersion < 100000)
3903  return;
3904 
3905  query = createPQExpBuffer();
3906 
3907  for (i = 0; i < numTables; i++)
3908  {
3909  TableInfo *tbinfo = &tblinfo[i];
3910 
3911  /* Only plain tables can be aded to publications. */
3912  if (tbinfo->relkind != RELKIND_RELATION)
3913  continue;
3914 
3915  /*
3916  * Ignore publication membership of tables whose definitions are not
3917  * to be dumped.
3918  */
3919  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3920  continue;
3921 
3922  if (g_verbose)
3923  write_msg(NULL, "reading publication membership for table \"%s.%s\"\n",
3924  tbinfo->dobj.namespace->dobj.name,
3925  tbinfo->dobj.name);
3926 
3927  resetPQExpBuffer(query);
3928 
3929  /* Get the publication membership for the table. */
3930  appendPQExpBuffer(query,
3931  "SELECT pr.tableoid, pr.oid, p.pubname "
3932  "FROM pg_publication_rel pr, pg_publication p "
3933  "WHERE pr.prrelid = '%u'"
3934  " AND p.oid = pr.prpubid",
3935  tbinfo->dobj.catId.oid);
3936  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3937 
3938  ntups = PQntuples(res);
3939 
3940  if (ntups == 0)
3941  {
3942  /*
3943  * Table is not member of any publications. Clean up and return.
3944  */
3945  PQclear(res);
3946  continue;
3947  }
3948 
3949  i_tableoid = PQfnumber(res, "tableoid");
3950  i_oid = PQfnumber(res, "oid");
3951  i_pubname = PQfnumber(res, "pubname");
3952 
3953  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
3954 
3955  for (j = 0; j < ntups; j++)
3956  {
3957  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
3958  pubrinfo[j].dobj.catId.tableoid =
3959  atooid(PQgetvalue(res, j, i_tableoid));
3960  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3961  AssignDumpId(&pubrinfo[j].dobj);
3962  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3963  pubrinfo[j].dobj.name = tbinfo->dobj.name;
3964  pubrinfo[j].pubname = pg_strdup(PQgetvalue(res, j, i_pubname));
3965  pubrinfo[j].pubtable = tbinfo;
3966 
3967  /* Decide whether we want to dump it */
3968  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
3969  }
3970  PQclear(res);
3971  }
3972  destroyPQExpBuffer(query);
3973 }
3974 
3975 /*
3976  * dumpPublicationTable
3977  * dump the definition of the given publication table mapping
3978  */
3979 static void
3981 {
3982  TableInfo *tbinfo = pubrinfo->pubtable;
3983  PQExpBuffer query;
3984  char *tag;
3985 
3986  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3987  return;
3988 
3989  tag = psprintf("%s %s", pubrinfo->pubname, tbinfo->dobj.name);
3990 
3991  query = createPQExpBuffer();
3992 
3993  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
3994  fmtId(pubrinfo->pubname));
3995  appendPQExpBuffer(query, " %s;\n",
3996  fmtQualifiedDumpable(tbinfo));
3997 
3998  /*
3999  * There is no point in creating drop query as drop query as the drop is
4000  * done by table drop.
4001  */
4002  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
4003  tag,
4004  tbinfo->dobj.namespace->dobj.name,
4005  NULL,
4006  "", false,
4007  "PUBLICATION TABLE", SECTION_POST_DATA,
4008  query->data, "", NULL,
4009  NULL, 0,
4010  NULL, NULL);
4011 
4012  free(tag);
4013  destroyPQExpBuffer(query);
4014 }
4015 
4016 /*
4017  * Is the currently connected user a superuser?
4018  */
4019 static bool
4021 {
4022  ArchiveHandle *AH = (ArchiveHandle *) fout;
4023  const char *val;
4024 
4025  val = PQparameterStatus(AH->connection, "is_superuser");
4026 
4027  if (val && strcmp(val, "on") == 0)
4028  return true;
4029 
4030  return false;
4031 }
4032 
4033 /*
4034  * getSubscriptions
4035  * get information about subscriptions
4036  */
4037 void
4039 {
4040  DumpOptions *dopt = fout->dopt;
4041  PQExpBuffer query;
4042  PGresult *res;
4043  SubscriptionInfo *subinfo;
4044  int i_tableoid;
4045  int i_oid;
4046  int i_subname;
4047  int i_rolname;
4048  int i_subconninfo;
4049  int i_subslotname;
4050  int i_subsynccommit;
4051  int i_subpublications;
4052  int i,
4053  ntups;
4054 
4055  if (dopt->no_subscriptions || fout->remoteVersion < 100000)
4056  return;
4057 
4058  if (!is_superuser(fout))
4059  {
4060  int n;
4061 
4062  res = ExecuteSqlQuery(fout,
4063  "SELECT count(*) FROM pg_subscription "
4064  "WHERE subdbid = (SELECT oid FROM pg_database"
4065  " WHERE datname = current_database())",
4066  PGRES_TUPLES_OK);
4067  n = atoi(PQgetvalue(res, 0, 0));
4068  if (n > 0)
4069  write_msg(NULL, "WARNING: subscriptions not dumped because current user is not a superuser\n");
4070  PQclear(res);
4071  return;
4072  }
4073 
4074  query = createPQExpBuffer();
4075 
4076  resetPQExpBuffer(query);
4077 
4078  /* Get the subscriptions in current database. */
4079  appendPQExpBuffer(query,
4080  "SELECT s.tableoid, s.oid, s.subname,"
4081  "(%s s.subowner) AS rolname, "
4082  " s.subconninfo, s.subslotname, s.subsynccommit, "
4083  " s.subpublications "
4084  "FROM pg_subscription s "
4085  "WHERE s.subdbid = (SELECT oid FROM pg_database"
4086  " WHERE datname = current_database())",
4088  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4089 
4090  ntups = PQntuples(res);
4091 
4092  i_tableoid = PQfnumber(res, "tableoid");
4093  i_oid = PQfnumber(res, "oid");
4094  i_subname = PQfnumber(res, "subname");
4095  i_rolname = PQfnumber(res, "rolname");
4096  i_subconninfo = PQfnumber(res, "subconninfo");
4097  i_subslotname = PQfnumber(res, "subslotname");
4098  i_subsynccommit = PQfnumber(res, "subsynccommit");
4099  i_subpublications = PQfnumber(res, "subpublications");
4100 
4101  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
4102 
4103  for (i = 0; i < ntups; i++)
4104  {
4105  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
4106  subinfo[i].dobj.catId.tableoid =
4107  atooid(PQgetvalue(res, i, i_tableoid));
4108  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4109  AssignDumpId(&subinfo[i].dobj);
4110  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
4111  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4112  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
4113  if (PQgetisnull(res, i, i_subslotname))
4114  subinfo[i].subslotname = NULL;
4115  else
4116  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
4117  subinfo[i].subsynccommit =
4118  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
4119  subinfo[i].subpublications =
4120  pg_strdup(PQgetvalue(res, i, i_subpublications));
4121 
4122  if (strlen(subinfo[i].rolname) == 0)
4123  write_msg(NULL, "WARNING: owner of subscription \"%s\" appears to be invalid\n",
4124  subinfo[i].dobj.name);
4125 
4126  /* Decide whether we want to dump it */
4127  selectDumpableObject(&(subinfo[i].dobj), fout);
4128  }
4129  PQclear(res);
4130 
4131  destroyPQExpBuffer(query);
4132 }
4133 
4134 /*
4135  * dumpSubscription
4136  * dump the definition of the given subscription
4137  */
4138 static void
4140 {
4141  PQExpBuffer delq;
4142  PQExpBuffer query;
4143  PQExpBuffer publications;
4144  char *qsubname;
4145  char **pubnames = NULL;
4146  int npubnames = 0;
4147  int i;
4148 
4149  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4150  return;
4151 
4152  delq = createPQExpBuffer();
4153  query = createPQExpBuffer();
4154 
4155  qsubname = pg_strdup(fmtId(subinfo->dobj.name));
4156 
4157  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
4158  qsubname);
4159 
4160  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
4161  qsubname);
4162  appendStringLiteralAH(query, subinfo->subconninfo, fout);
4163 
4164  /* Build list of quoted publications and append them to query. */
4165  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
4166  {
4167  write_msg(NULL,
4168  "WARNING: could not parse subpublications array\n");
4169  if (pubnames)
4170  free(pubnames);
4171  pubnames = NULL;
4172  npubnames = 0;
4173  }
4174 
4175  publications = createPQExpBuffer();
4176  for (i = 0; i < npubnames; i++)
4177  {
4178  if (i > 0)
4179  appendPQExpBufferStr(publications, ", ");
4180 
4181  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
4182  }
4183 
4184  appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
4185  if (subinfo->subslotname)
4186  appendStringLiteralAH(query, subinfo->subslotname, fout);
4187  else
4188  appendPQExpBufferStr(query, "NONE");
4189 
4190  if (strcmp(subinfo->subsynccommit, "off") != 0)
4191  appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
4192 
4193  appendPQExpBufferStr(query, ");\n");
4194 
4195  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
4196  subinfo->dobj.name,
4197  NULL,
4198  NULL,
4199  subinfo->rolname, false,
4200  "SUBSCRIPTION", SECTION_POST_DATA,
4201  query->data, delq->data, NULL,
4202  NULL, 0,
4203  NULL, NULL);
4204 
4205  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4206  dumpComment(fout, "SUBSCRIPTION", qsubname,
4207  NULL, subinfo->rolname,
4208  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4209 
4210  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4211  dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
4212  NULL, subinfo->rolname,
4213  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4214 
4215  destroyPQExpBuffer(publications);
4216  if (pubnames)
4217  free(pubnames);
4218 
4219  destroyPQExpBuffer(delq);
4220  destroyPQExpBuffer(query);
4221  free(qsubname);
4222 }
4223 
4224 static void
4226  PQExpBuffer upgrade_buffer,
4227  Oid pg_type_oid,
4228  bool force_array_type)
4229 {
4230  PQExpBuffer upgrade_query = createPQExpBuffer();
4231  PGresult *res;
4232  Oid pg_type_array_oid;
4233 
4234  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
4235  appendPQExpBuffer(upgrade_buffer,
4236  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4237  pg_type_oid);
4238 
4239  /* we only support old >= 8.3 for binary upgrades */
4240  appendPQExpBuffer(upgrade_query,
4241  "SELECT typarray "
4242  "FROM pg_catalog.pg_type "
4243  "WHERE oid = '%u'::pg_catalog.oid;",
4244  pg_type_oid);
4245 
4246  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4247 
4248  pg_type_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
4249 
4250  PQclear(res);
4251 
4252  if (!OidIsValid(pg_type_array_oid) && force_array_type)
4253  {
4254  /*
4255  * If the old version didn't assign an array type, but the new version
4256  * does, we must select an unused type OID to assign. This currently
4257  * only happens for domains, when upgrading pre-v11 to v11 and up.
4258  *
4259  * Note: local state here is kind of ugly, but we must have some,
4260  * since we mustn't choose the same unused OID more than once.
4261  */
4262  static Oid next_possible_free_oid = FirstNormalObjectId;
4263  bool is_dup;
4264 
4265  do
4266  {
4267  ++next_possible_free_oid;
4268  printfPQExpBuffer(upgrade_query,
4269  "SELECT EXISTS(SELECT 1 "
4270  "FROM pg_catalog.pg_type "
4271  "WHERE oid = '%u'::pg_catalog.oid);",
4272  next_possible_free_oid);
4273  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4274  is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
4275  PQclear(res);
4276  } while (is_dup);
4277 
4278  pg_type_array_oid = next_possible_free_oid;
4279  }
4280 
4281  if (OidIsValid(pg_type_array_oid))
4282  {
4283  appendPQExpBufferStr(upgrade_buffer,
4284  "\n-- For binary upgrade, must preserve pg_type array oid\n");
4285  appendPQExpBuffer(upgrade_buffer,
4286  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4287  pg_type_array_oid);
4288  }
4289 
4290  destroyPQExpBuffer(upgrade_query);
4291 }
4292 
4293 static bool
4295  PQExpBuffer upgrade_buffer,
4296  Oid pg_rel_oid)
4297 {
4298  PQExpBuffer upgrade_query = createPQExpBuffer();
4299  PGresult *upgrade_res;
4300  Oid pg_type_oid;
4301  bool toast_set = false;
4302 
4303  /* we only support old >= 8.3 for binary upgrades */
4304  appendPQExpBuffer(upgrade_query,
4305  "SELECT c.reltype AS crel, t.reltype AS trel "
4306  "FROM pg_catalog.pg_class c "
4307  "LEFT JOIN pg_catalog.pg_class t ON "
4308  " (c.reltoastrelid = t.oid) "
4309  "WHERE c.oid = '%u'::pg_catalog.oid;",
4310  pg_rel_oid);
4311 
4312  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4313 
4314  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
4315 
4316  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
4317  pg_type_oid, false);
4318 
4319  if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
4320  {
4321  /* Toast tables do not have pg_type array rows */
4322  Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
4323  PQfnumber(upgrade_res, "trel")));
4324 
4325  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
4326  appendPQExpBuffer(upgrade_buffer,
4327  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4328  pg_type_toast_oid);
4329 
4330  toast_set = true;
4331  }
4332 
4333  PQclear(upgrade_res);
4334  destroyPQExpBuffer(upgrade_query);
4335 
4336  return toast_set;
4337 }
4338 
4339 static void
4341  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
4342  bool is_index)
4343 {
4344  PQExpBuffer upgrade_query = createPQExpBuffer();
4345  PGresult *upgrade_res;
4346  Oid pg_class_reltoastrelid;
4347  Oid pg_index_indexrelid;
4348 
4349  appendPQExpBuffer(upgrade_query,
4350  "SELECT c.reltoastrelid, i.indexrelid "
4351  "FROM pg_catalog.pg_class c LEFT JOIN "
4352  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
4353  "WHERE c.oid = '%u'::pg_catalog.oid;",
4354  pg_class_oid);
4355 
4356  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4357 
4358  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastrelid")));
4359  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
4360 
4361  appendPQExpBufferStr(upgrade_buffer,
4362  "\n-- For binary upgrade, must preserve pg_class oids\n");
4363 
4364  if (!is_index)
4365  {
4366  appendPQExpBuffer(upgrade_buffer,
4367  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
4368  pg_class_oid);
4369  /* only tables have toast tables, not indexes */
4370  if (OidIsValid(pg_class_reltoastrelid))
4371  {
4372  /*
4373  * One complexity is that the table definition might not require
4374  * the creation of a TOAST table, and the TOAST table might have
4375  * been created long after table creation, when the table was
4376  * loaded with wide data. By setting the TOAST oid we force
4377  * creation of the TOAST heap and TOAST index by the backend so we
4378  * can cleanly copy the files during binary upgrade.
4379  */
4380 
4381  appendPQExpBuffer(upgrade_buffer,
4382  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
4383  pg_class_reltoastrelid);
4384 
4385  /* every toast table has an index */
4386  appendPQExpBuffer(upgrade_buffer,
4387  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4388  pg_index_indexrelid);
4389  }
4390  }
4391  else
4392  appendPQExpBuffer(upgrade_buffer,
4393  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4394  pg_class_oid);
4395 
4396  appendPQExpBufferChar(upgrade_buffer, '\n');
4397 
4398  PQclear(upgrade_res);
4399  destroyPQExpBuffer(upgrade_query);
4400 }
4401 
4402 /*
4403  * If the DumpableObject is a member of an extension, add a suitable
4404  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4405  *
4406  * For somewhat historical reasons, objname should already be quoted,
4407  * but not objnamespace (if any).
4408  */
4409 static void
4411  DumpableObject *dobj,
4412  const char *objtype,
4413  const char *objname,
4414  const char *objnamespace)
4415 {
4416  DumpableObject *extobj = NULL;
4417  int i;
4418 
4419  if (!dobj->ext_member)
4420  return;
4421 
4422  /*
4423  * Find the parent extension. We could avoid this search if we wanted to
4424  * add a link field to DumpableObject, but the space costs of that would
4425  * be considerable. We assume that member objects could only have a
4426  * direct dependency on their own extension, not any others.
4427  */
4428  for (i = 0; i < dobj->nDeps; i++)
4429  {
4430  extobj = findObjectByDumpId(dobj->dependencies[i]);
4431  if (extobj && extobj->objType == DO_EXTENSION)
4432  break;
4433  extobj = NULL;
4434  }
4435  if (extobj == NULL)
4436  exit_horribly(NULL, "could not find parent extension for %s %s\n",
4437  objtype, objname);
4438 
4439  appendPQExpBufferStr(upgrade_buffer,
4440  "\n-- For binary upgrade, handle extension membership the hard way\n");
4441  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
4442  fmtId(extobj->name),
4443  objtype);
4444  if (objnamespace && *objnamespace)
4445  appendPQExpBuffer(upgrade_buffer, "%s.", fmtId(objnamespace));
4446  appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
4447 }
4448 
4449 /*
4450  * getNamespaces:
4451  * read all namespaces in the system catalogs and return them in the
4452  * NamespaceInfo* structure
4453  *
4454  * numNamespaces is set to the number of namespaces read in
4455  */
4456 NamespaceInfo *
4458 {
4459  DumpOptions *dopt = fout->dopt;
4460  PGresult *res;
4461  int ntups;
4462  int i;
4463  PQExpBuffer query;
4464  NamespaceInfo *nsinfo;
4465  int i_tableoid;
4466  int i_oid;
4467  int i_nspname;
4468  int i_rolname;
4469  int i_nspacl;
4470  int i_rnspacl;
4471  int i_initnspacl;
4472  int i_initrnspacl;
4473 
4474  query = createPQExpBuffer();
4475 
4476  /*
4477  * we fetch all namespaces including system ones, so that every object we
4478  * read in can be linked to a containing namespace.
4479  */
4480  if (fout->remoteVersion >= 90600)
4481  {
4482  PQExpBuffer acl_subquery = createPQExpBuffer();
4483  PQExpBuffer racl_subquery = createPQExpBuffer();
4484  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4485  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4486 
4487  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4488  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4489  dopt->binary_upgrade);
4490 
4491  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4492  "(%s nspowner) AS rolname, "
4493  "%s as nspacl, "
4494  "%s as rnspacl, "
4495  "%s as initnspacl, "
4496  "%s as initrnspacl "
4497  "FROM pg_namespace n "
4498  "LEFT JOIN pg_init_privs pip "
4499  "ON (n.oid = pip.objoid "
4500  "AND pip.classoid = 'pg_namespace'::regclass "
4501  "AND pip.objsubid = 0",
4503  acl_subquery->data,
4504  racl_subquery->data,
4505  init_acl_subquery->data,
4506  init_racl_subquery->data);
4507 
4508  appendPQExpBuffer(query, ") ");
4509 
4510  destroyPQExpBuffer(acl_subquery);
4511  destroyPQExpBuffer(racl_subquery);
4512  destroyPQExpBuffer(init_acl_subquery);
4513  destroyPQExpBuffer(init_racl_subquery);
4514  }
4515  else
4516  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4517  "(%s nspowner) AS rolname, "
4518  "nspacl, NULL as rnspacl, "
4519  "NULL AS initnspacl, NULL as initrnspacl "
4520  "FROM pg_namespace",
4522 
4523  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4524 
4525  ntups = PQntuples(res);
4526 
4527  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4528 
4529  i_tableoid = PQfnumber(res, "tableoid");
4530  i_oid = PQfnumber(res, "oid");
4531  i_nspname = PQfnumber(res, "nspname");
4532  i_rolname = PQfnumber(res, "rolname");
4533  i_nspacl = PQfnumber(res, "nspacl");
4534  i_rnspacl = PQfnumber(res, "rnspacl");
4535  i_initnspacl = PQfnumber(res, "initnspacl");
4536  i_initrnspacl = PQfnumber(res, "initrnspacl");
4537 
4538  for (i = 0; i < ntups; i++)
4539  {
4540  nsinfo[i].dobj.objType = DO_NAMESPACE;
4541  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4542  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4543  AssignDumpId(&nsinfo[i].dobj);
4544  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4545  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4546  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4547  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4548  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4549  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4550 
4551  /* Decide whether to dump this namespace */
4552  selectDumpableNamespace(&nsinfo[i], fout);
4553 
4554  /*
4555  * Do not try to dump ACL if the ACL is empty or the default.
4556  *
4557  * This is useful because, for some schemas/objects, the only
4558  * component we are going to try and dump is the ACL and if we can
4559  * remove that then 'dump' goes to zero/false and we don't consider
4560  * this object for dumping at all later on.
4561  */
4562  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4563  PQgetisnull(res, i, i_initnspacl) &&
4564  PQgetisnull(res, i, i_initrnspacl))
4565  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4566 
4567  if (strlen(nsinfo[i].rolname) == 0)
4568  write_msg(NULL, "WARNING: owner of schema \"%s\" appears to be invalid\n",
4569  nsinfo[i].dobj.name);
4570  }
4571 
4572  PQclear(res);
4573  destroyPQExpBuffer(query);
4574 
4575  *numNamespaces = ntups;
4576 
4577  return nsinfo;
4578 }
4579 
4580 /*
4581  * findNamespace:
4582  * given a namespace OID, look up the info read by getNamespaces
4583  */
4584 static NamespaceInfo *
4586 {
4587  NamespaceInfo *nsinfo;
4588 
4589  nsinfo = findNamespaceByOid(nsoid);
4590  if (nsinfo == NULL)
4591  exit_horribly(NULL, "schema with OID %u does not exist\n", nsoid);
4592  return nsinfo;
4593 }
4594 
4595 /*
4596  * getExtensions:
4597  * read all extensions in the system catalogs and return them in the
4598  * ExtensionInfo* structure
4599  *
4600  * numExtensions is set to the number of extensions read in
4601  */
4602 ExtensionInfo *
4604 {
4605  DumpOptions *dopt = fout->dopt;
4606  PGresult *res;
4607  int ntups;
4608  int i;
4609  PQExpBuffer query;
4610  ExtensionInfo *extinfo;
4611  int i_tableoid;
4612  int i_oid;
4613  int i_extname;
4614  int i_nspname;
4615  int i_extrelocatable;
4616  int i_extversion;
4617  int i_extconfig;
4618  int i_extcondition;
4619 
4620  /*
4621  * Before 9.1, there are no extensions.
4622  */
4623  if (fout->remoteVersion < 90100)
4624  {
4625  *numExtensions = 0;
4626  return NULL;
4627  }
4628 
4629  query = createPQExpBuffer();
4630 
4631  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4632  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4633  "FROM pg_extension x "
4634  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4635 
4636  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4637 
4638  ntups = PQntuples(res);
4639 
4640  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4641 
4642  i_tableoid = PQfnumber(res, "tableoid");
4643  i_oid = PQfnumber(res, "oid");
4644  i_extname = PQfnumber(res, "extname");
4645  i_nspname = PQfnumber(res, "nspname");
4646  i_extrelocatable = PQfnumber(res, "extrelocatable");
4647  i_extversion = PQfnumber(res, "extversion");
4648  i_extconfig = PQfnumber(res, "extconfig");
4649  i_extcondition = PQfnumber(res, "extcondition");
4650 
4651  for (i = 0; i < ntups; i++)
4652  {
4653  extinfo[i].dobj.objType = DO_EXTENSION;
4654  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4655  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4656  AssignDumpId(&extinfo[i].dobj);
4657  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4658  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4659  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4660  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4661  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4662  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4663 
4664  /* Decide whether we want to dump it */
4665  selectDumpableExtension(&(extinfo[i]), dopt);
4666  }
4667 
4668  PQclear(res);
4669  destroyPQExpBuffer(query);
4670 
4671  *numExtensions = ntups;
4672 
4673  return extinfo;
4674 }
4675 
4676 /*
4677  * getTypes:
4678  * read all types in the system catalogs and return them in the
4679  * TypeInfo* structure
4680  *
4681  * numTypes is set to the number of types read in
4682  *
4683  * NB: this must run after getFuncs() because we assume we can do
4684  * findFuncByOid().
4685  */
4686 TypeInfo *
4688 {
4689  DumpOptions *dopt = fout->dopt;
4690  PGresult *res;
4691  int ntups;
4692  int i;
4693  PQExpBuffer query = createPQExpBuffer();
4694  TypeInfo *tyinfo;
4695  ShellTypeInfo *stinfo;
4696  int i_tableoid;
4697  int i_oid;
4698  int i_typname;
4699  int i_typnamespace;
4700  int i_typacl;
4701  int i_rtypacl;
4702  int i_inittypacl;
4703  int i_initrtypacl;
4704  int i_rolname;
4705  int i_typelem;
4706  int i_typrelid;
4707  int i_typrelkind;
4708  int i_typtype;
4709  int i_typisdefined;
4710  int i_isarray;
4711 
4712  /*
4713  * we include even the built-in types because those may be used as array
4714  * elements by user-defined types
4715  *
4716  * we filter out the built-in types when we dump out the types
4717  *
4718  * same approach for undefined (shell) types and array types
4719  *
4720  * Note: as of 8.3 we can reliably detect whether a type is an
4721  * auto-generated array type by checking the element type's typarray.
4722  * (Before that the test is capable of generating false positives.) We
4723  * still check for name beginning with '_', though, so as to avoid the
4724  * cost of the subselect probe for all standard types. This would have to
4725  * be revisited if the backend ever allows renaming of array types.
4726  */
4727 
4728  if (fout->remoteVersion >= 90600)
4729  {
4730  PQExpBuffer acl_subquery = createPQExpBuffer();
4731  PQExpBuffer racl_subquery = createPQExpBuffer();
4732  PQExpBuffer initacl_subquery = createPQExpBuffer();
4733  PQExpBuffer initracl_subquery = createPQExpBuffer();
4734 
4735  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
4736  initracl_subquery, "t.typacl", "t.typowner", "'T'",
4737  dopt->binary_upgrade);
4738 
4739  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
4740  "t.typnamespace, "
4741  "%s AS typacl, "
4742  "%s AS rtypacl, "
4743  "%s AS inittypacl, "
4744  "%s AS initrtypacl, "
4745  "(%s t.typowner) AS rolname, "
4746  "t.typelem, t.typrelid, "
4747  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
4748  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
4749  "t.typtype, t.typisdefined, "
4750  "t.typname[0] = '_' AND t.typelem != 0 AND "
4751  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
4752  "FROM pg_type t "
4753  "LEFT JOIN pg_init_privs pip ON "
4754  "(t.oid = pip.objoid "
4755  "AND pip.classoid = 'pg_type'::regclass "
4756  "AND pip.objsubid = 0) ",
4757  acl_subquery->data,
4758  racl_subquery->data,
4759  initacl_subquery->data,
4760  initracl_subquery->data,
4762 
4763  destroyPQExpBuffer(acl_subquery);
4764  destroyPQExpBuffer(racl_subquery);
4765  destroyPQExpBuffer(initacl_subquery);
4766  destroyPQExpBuffer(initracl_subquery);
4767  }
4768  else if (fout->remoteVersion >= 90200)
4769  {
4770  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4771  "typnamespace, typacl, NULL as rtypacl, "
4772  "NULL AS inittypacl, NULL AS initrtypacl, "
4773  "(%s typowner) AS rolname, "
4774  "typelem, typrelid, "
4775  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4776  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4777  "typtype, typisdefined, "
4778  "typname[0] = '_' AND typelem != 0 AND "
4779  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4780  "FROM pg_type",
4782  }
4783  else if (fout->remoteVersion >= 80300)
4784  {
4785  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4786  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4787  "NULL AS inittypacl, NULL AS initrtypacl, "
4788  "(%s typowner) AS rolname, "
4789  "typelem, typrelid, "
4790  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4791  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4792  "typtype, typisdefined, "
4793  "typname[0] = '_' AND typelem != 0 AND "
4794  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4795  "FROM pg_type",
4797  }
4798  else
4799  {
4800  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4801  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4802  "NULL AS inittypacl, NULL AS initrtypacl, "
4803  "(%s typowner) AS rolname, "
4804  "typelem, typrelid, "
4805  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4806  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4807  "typtype, typisdefined, "
4808  "typname[0] = '_' AND typelem != 0 AS isarray "
4809  "FROM pg_type",
4811  }
4812 
4813  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4814 
4815  ntups = PQntuples(res);
4816 
4817  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
4818 
4819  i_tableoid = PQfnumber(res, "tableoid");
4820  i_oid = PQfnumber(res, "oid");
4821  i_typname = PQfnumber(res, "typname");
4822  i_typnamespace = PQfnumber(res, "typnamespace");
4823  i_typacl = PQfnumber(res, "typacl");
4824  i_rtypacl = PQfnumber(res, "rtypacl");
4825  i_inittypacl = PQfnumber(res, "inittypacl");
4826  i_initrtypacl = PQfnumber(res, "initrtypacl");
4827  i_rolname = PQfnumber(res, "rolname");
4828  i_typelem = PQfnumber(res, "typelem");
4829  i_typrelid = PQfnumber(res, "typrelid");
4830  i_typrelkind = PQfnumber(res, "typrelkind");
4831  i_typtype = PQfnumber(res, "typtype");
4832  i_typisdefined = PQfnumber(res, "typisdefined");
4833  i_isarray = PQfnumber(res, "isarray");
4834 
4835  for (i = 0; i < ntups; i++)
4836  {
4837  tyinfo[i].dobj.objType = DO_TYPE;
4838  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4839  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4840  AssignDumpId(&tyinfo[i].dobj);
4841  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
4842  tyinfo[i].dobj.namespace =
4843  findNamespace(fout,
4844  atooid(PQgetvalue(res, i, i_typnamespace)));
4845  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4846  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
4847  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
4848  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
4849  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
4850  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
4851  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
4852  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
4853  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
4854  tyinfo[i].shellType = NULL;
4855 
4856  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
4857  tyinfo[i].isDefined = true;
4858  else
4859  tyinfo[i].isDefined = false;
4860 
4861  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
4862  tyinfo[i].isArray = true;
4863  else
4864  tyinfo[i].isArray = false;
4865 
4866  /* Decide whether we want to dump it */
4867  selectDumpableType(&tyinfo[i], fout);
4868 
4869  /* Do not try to dump ACL if no ACL exists. */
4870  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
4871  PQgetisnull(res, i, i_inittypacl) &&
4872  PQgetisnull(res, i, i_initrtypacl))
4873  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4874 
4875  /*
4876  * If it's a domain, fetch info about its constraints, if any
4877  */
4878  tyinfo[i].nDomChecks = 0;
4879  tyinfo[i].domChecks = NULL;
4880  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4881  tyinfo[i].typtype == TYPTYPE_DOMAIN)
4882  getDomainConstraints(fout, &(tyinfo[i]));
4883 
4884  /*
4885  * If it's a base type, make a DumpableObject representing a shell
4886  * definition of the type. We will need to dump that ahead of the I/O
4887  * functions for the type. Similarly, range types need a shell
4888  * definition in case they have a canonicalize function.
4889  *
4890  * Note: the shell type doesn't have a catId. You might think it
4891  * should copy the base type's catId, but then it might capture the
4892  * pg_depend entries for the type, which we don't want.
4893  */
4894  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4895  (tyinfo[i].typtype == TYPTYPE_BASE ||
4896  tyinfo[i].typtype == TYPTYPE_RANGE))
4897  {
4898  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
4899  stinfo->dobj.objType = DO_SHELL_TYPE;
4900  stinfo->dobj.catId = nilCatalogId;
4901  AssignDumpId(&stinfo->dobj);
4902  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
4903  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
4904  stinfo->baseType = &(tyinfo[i]);
4905  tyinfo[i].shellType = stinfo;
4906 
4907  /*
4908  * Initially mark the shell type as not to be dumped. We'll only
4909  * dump it if the I/O or canonicalize functions need to be dumped;
4910  * this is taken care of while sorting dependencies.
4911  */
4912  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
4913  }
4914 
4915  if (strlen(tyinfo[i].rolname) == 0)
4916  write_msg(NULL, "WARNING: owner of data type \"%s\" appears to be invalid\n",
4917  tyinfo[i].dobj.name);
4918  }
4919 
4920  *numTypes = ntups;
4921 
4922  PQclear(res);
4923 
4924  destroyPQExpBuffer(query);
4925 
4926  return tyinfo;
4927 }
4928 
4929 /*
4930  * getOperators:
4931  * read all operators in the system catalogs and return them in the
4932  * OprInfo* structure
4933  *
4934  * numOprs is set to the number of operators read in
4935  */
4936 OprInfo *
4937 getOperators(Archive *fout, int *numOprs)
4938 {
4939  PGresult *res;
4940  int ntups;
4941  int i;
4942  PQExpBuffer query = createPQExpBuffer();
4943  OprInfo *oprinfo;
4944  int i_tableoid;
4945  int i_oid;
4946  int i_oprname;
4947  int i_oprnamespace;
4948  int i_rolname;
4949  int i_oprkind;
4950  int i_oprcode;
4951 
4952  /*
4953  * find all operators, including builtin operators; we filter out
4954  * system-defined operators at dump-out time.
4955  */
4956 
4957  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
4958  "oprnamespace, "
4959  "(%s oprowner) AS rolname, "
4960  "oprkind, "
4961  "oprcode::oid AS oprcode "
4962  "FROM pg_operator",
4964 
4965  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4966 
4967  ntups = PQntuples(res);
4968  *numOprs = ntups;
4969 
4970  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
4971 
4972  i_tableoid = PQfnumber(res, "tableoid");
4973  i_oid = PQfnumber(res, "oid");
4974  i_oprname = PQfnumber(res, "oprname");
4975  i_oprnamespace = PQfnumber(res, "oprnamespace");
4976  i_rolname = PQfnumber(res, "rolname");
4977  i_oprkind = PQfnumber(res, "oprkind");
4978  i_oprcode = PQfnumber(res, "oprcode");
4979 
4980  for (i = 0; i < ntups; i++)
4981  {
4982  oprinfo[i].dobj.objType = DO_OPERATOR;
4983  oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4984  oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4985  AssignDumpId(&oprinfo[i].dobj);
4986  oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
4987  oprinfo[i].dobj.namespace =
4988  findNamespace(fout,
4989  atooid(PQgetvalue(res, i, i_oprnamespace)));
4990  oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4991  oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
4992  oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
4993 
4994  /* Decide whether we want to dump it */
4995  selectDumpableObject(&(oprinfo[i].dobj), fout);
4996 
4997  /* Operators do not currently have ACLs. */
4998  oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4999 
5000  if (strlen(oprinfo[i].rolname) == 0)
5001  write_msg(NULL, "WARNING: owner of operator \"%s\" appears to be invalid\n",
5002  oprinfo[i].dobj.name);
5003  }
5004 
5005  PQclear(res);
5006 
5007  destroyPQExpBuffer(query);
5008 
5009  return oprinfo;
5010 }
5011 
5012 /*
5013  * getCollations:
5014  * read all collations in the system catalogs and return them in the
5015  * CollInfo* structure
5016  *
5017  * numCollations is set to the number of collations read in
5018  */
5019 CollInfo *
5021 {
5022  PGresult *res;
5023  int ntups;
5024  int i;
5025  PQExpBuffer query;
5026  CollInfo *collinfo;
5027  int i_tableoid;
5028  int i_oid;
5029  int i_collname;
5030  int i_collnamespace;
5031  int i_rolname;
5032 
5033  /* Collations didn't exist pre-9.1 */
5034  if (fout->remoteVersion < 90100)
5035  {
5036  *numCollations = 0;
5037  return NULL;
5038  }
5039 
5040  query = createPQExpBuffer();
5041 
5042  /*
5043  * find all collations, including builtin collations; we filter out
5044  * system-defined collations at dump-out time.
5045  */
5046 
5047  appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
5048  "collnamespace, "
5049  "(%s collowner) AS rolname "
5050  "FROM pg_collation",
5052 
5053  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5054 
5055  ntups = PQntuples(res);
5056  *numCollations = ntups;
5057 
5058  collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
5059 
5060  i_tableoid = PQfnumber(res, "tableoid");
5061  i_oid = PQfnumber(res, "oid");
5062  i_collname = PQfnumber(res, "collname");
5063  i_collnamespace = PQfnumber(res, "collnamespace");
5064  i_rolname = PQfnumber(res, "rolname");
5065 
5066  for (i = 0; i < ntups; i++)
5067  {
5068  collinfo[i].dobj.objType = DO_COLLATION;
5069  collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5070  collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5071  AssignDumpId(&collinfo[i].dobj);
5072  collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
5073  collinfo[i].dobj.namespace =
5074  findNamespace(fout,
5075  atooid(PQgetvalue(res, i, i_collnamespace)));
5076  collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5077 
5078  /* Decide whether we want to dump it */
5079  selectDumpableObject(&(collinfo[i].dobj), fout);
5080 
5081  /* Collations do not currently have ACLs. */
5082  collinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5083  }
5084 
5085  PQclear(res);
5086 
5087  destroyPQExpBuffer(query);
5088 
5089  return collinfo;
5090 }
5091 
5092 /*
5093  * getConversions:
5094  * read all conversions in the system catalogs and return them in the
5095  * ConvInfo* structure
5096  *
5097  * numConversions is set to the number of conversions read in
5098  */
5099 ConvInfo *
5100 getConversions(Archive *fout, int *numConversions)
5101 {
5102  PGresult *res;
5103  int ntups;
5104  int i;
5105  PQExpBuffer query;
5106  ConvInfo *convinfo;
5107  int i_tableoid;
5108  int i_oid;
5109  int i_conname;
5110  int i_connamespace;
5111  int i_rolname;
5112 
5113  query = createPQExpBuffer();
5114 
5115  /*
5116  * find all conversions, including builtin conversions; we filter out
5117  * system-defined conversions at dump-out time.
5118  */
5119 
5120  appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
5121  "connamespace, "
5122  "(%s conowner) AS rolname "
5123  "FROM pg_conversion",
5125 
5126  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5127 
5128  ntups = PQntuples(res);
5129  *numConversions = ntups;
5130 
5131  convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
5132 
5133  i_tableoid = PQfnumber(res, "tableoid");
5134  i_oid = PQfnumber(res, "oid");
5135  i_conname = PQfnumber(res, "conname");
5136  i_connamespace = PQfnumber(res, "connamespace");
5137  i_rolname = PQfnumber(res, "rolname");
5138 
5139  for (i = 0; i < ntups; i++)
5140  {
5141  convinfo[i].dobj.objType = DO_CONVERSION;
5142  convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5143  convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5144  AssignDumpId(&convinfo[i].dobj);
5145  convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
5146  convinfo[i].dobj.namespace =
5147  findNamespace(fout,
5148  atooid(PQgetvalue(res, i, i_connamespace)));
5149  convinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5150 
5151  /* Decide whether we want to dump it */
5152  selectDumpableObject(&(convinfo[i].dobj), fout);
5153 
5154  /* Conversions do not currently have ACLs. */
5155  convinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5156  }
5157 
5158  PQclear(res);
5159 
5160  destroyPQExpBuffer(query);
5161 
5162  return convinfo;
5163 }
5164 
5165 /*
5166  * getAccessMethods:
5167  * read all user-defined access methods in the system catalogs and return
5168  * them in the AccessMethodInfo* structure
5169  *
5170  * numAccessMethods is set to the number of access methods read in
5171  */
5173 getAccessMethods(Archive *fout, int *numAccessMethods)
5174 {
5175  PGresult *res;
5176  int ntups;
5177  int i;
5178  PQExpBuffer query;
5179  AccessMethodInfo *aminfo;
5180  int i_tableoid;
5181  int i_oid;
5182  int i_amname;
5183  int i_amhandler;
5184  int i_amtype;
5185 
5186  /* Before 9.6, there are no user-defined access methods */
5187  if (fout->remoteVersion < 90600)
5188  {
5189  *numAccessMethods = 0;
5190  return NULL;
5191  }
5192 
5193  query = createPQExpBuffer();
5194 
5195  /* Select all access methods from pg_am table */
5196  appendPQExpBuffer(query, "SELECT tableoid, oid, amname, amtype, "
5197  "amhandler::pg_catalog.regproc AS amhandler "
5198  "FROM pg_am");
5199 
5200  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5201 
5202  ntups = PQntuples(res);
5203  *numAccessMethods = ntups;
5204 
5205  aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
5206 
5207  i_tableoid = PQfnumber(res, "tableoid");
5208  i_oid = PQfnumber(res, "oid");
5209  i_amname = PQfnumber(res, "amname");
5210  i_amhandler = PQfnumber(res, "amhandler");
5211  i_amtype = PQfnumber(res, "amtype");
5212 
5213  for (i = 0; i < ntups; i++)
5214  {
5215  aminfo[i].dobj.objType = DO_ACCESS_METHOD;
5216  aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5217  aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5218  AssignDumpId(&aminfo[i].dobj);
5219  aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
5220  aminfo[i].dobj.namespace = NULL;
5221  aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
5222  aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
5223 
5224  /* Decide whether we want to dump it */
5225  selectDumpableAccessMethod(&(aminfo[i]), fout);
5226 
5227  /* Access methods do not currently have ACLs. */
5228  aminfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5229  }
5230 
5231  PQclear(res);
5232 
5233  destroyPQExpBuffer(query);
5234 
5235  return aminfo;
5236 }
5237 
5238 
5239 /*
5240  * getOpclasses:
5241  * read all opclasses in the system catalogs and return them in the
5242  * OpclassInfo* structure
5243  *
5244  * numOpclasses is set to the number of opclasses read in
5245  */
5246 OpclassInfo *
5247 getOpclasses(Archive *fout, int *numOpclasses)
5248 {
5249  PGresult *res;
5250  int ntups;
5251  int i;
5252  PQExpBuffer query = createPQExpBuffer();
5253  OpclassInfo *opcinfo;
5254  int i_tableoid;
5255  int i_oid;
5256  int i_opcname;
5257  int i_opcnamespace;
5258  int i_rolname;
5259 
5260  /*
5261  * find all opclasses, including builtin opclasses; we filter out
5262  * system-defined opclasses at dump-out time.
5263  */
5264 
5265  appendPQExpBuffer(query, "SELECT tableoid, oid, opcname, "
5266  "opcnamespace, "
5267  "(%s opcowner) AS rolname "
5268  "FROM pg_opclass",
5270 
5271  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5272 
5273  ntups = PQntuples(res);
5274  *numOpclasses = ntups;
5275 
5276  opcinfo = (OpclassInfo *) pg_malloc(ntups * sizeof(OpclassInfo));
5277 
5278  i_tableoid = PQfnumber(res, "tableoid");
5279  i_oid = PQfnumber(res, "oid");
5280  i_opcname = PQfnumber(res, "opcname");
5281  i_opcnamespace = PQfnumber(res, "opcnamespace");
5282  i_rolname = PQfnumber(res, "rolname");
5283 
5284  for (i = 0; i < ntups; i++)
5285  {
5286  opcinfo[i].dobj.objType = DO_OPCLASS;
5287  opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5288  opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5289  AssignDumpId(&opcinfo[i].dobj);
5290  opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
5291  opcinfo[i].dobj.namespace =
5292  findNamespace(fout,
5293  atooid(PQgetvalue(res, i, i_opcnamespace)));
5294  opcinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5295 
5296  /* Decide whether we want to dump it */
5297  selectDumpableObject(&(opcinfo[i].dobj), fout);
5298 
5299  /* Op Classes do not currently have ACLs. */
5300  opcinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5301 
5302  if (strlen(opcinfo[i].rolname) == 0)
5303  write_msg(NULL, "WARNING: owner of operator class \"%s\" appears to be invalid\n",
5304  opcinfo[i].dobj.name);
5305  }
5306 
5307  PQclear(res);
5308 
5309  destroyPQExpBuffer(query);
5310 
5311  return opcinfo;
5312 }
5313 
5314 /*
5315  * getOpfamilies:
5316  * read all opfamilies in the system catalogs and return them in the
5317  * OpfamilyInfo* structure
5318  *
5319  * numOpfamilies is set to the number of opfamilies read in
5320  */
5321 OpfamilyInfo *
5322 getOpfamilies(Archive *fout, int *numOpfamilies)
5323 {
5324  PGresult *res;
5325  int ntups;
5326  int i;
5327  PQExpBuffer query;
5328