PostgreSQL Source Code  git master
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #include <limits.h>
37 #ifdef HAVE_TERMIOS_H
38 #include <termios.h>
39 #endif
40 
41 #include "getopt_long.h"
42 
43 #include "access/attnum.h"
44 #include "access/sysattr.h"
45 #include "access/transam.h"
46 #include "catalog/pg_aggregate_d.h"
47 #include "catalog/pg_am_d.h"
48 #include "catalog/pg_attribute_d.h"
49 #include "catalog/pg_cast_d.h"
50 #include "catalog/pg_class_d.h"
51 #include "catalog/pg_default_acl_d.h"
52 #include "catalog/pg_largeobject_d.h"
53 #include "catalog/pg_largeobject_metadata_d.h"
54 #include "catalog/pg_proc_d.h"
55 #include "catalog/pg_trigger_d.h"
56 #include "catalog/pg_type_d.h"
57 #include "libpq/libpq-fs.h"
58 #include "storage/block.h"
59 
60 #include "dumputils.h"
61 #include "parallel.h"
62 #include "pg_backup_db.h"
63 #include "pg_backup_utils.h"
64 #include "pg_dump.h"
65 #include "fe_utils/connect.h"
66 #include "fe_utils/string_utils.h"
67 
68 
69 typedef struct
70 {
71  const char *descr; /* comment for an object */
72  Oid classoid; /* object class (catalog OID) */
73  Oid objoid; /* object OID */
74  int objsubid; /* subobject (table column #) */
75 } CommentItem;
76 
77 typedef struct
78 {
79  const char *provider; /* label provider of this security label */
80  const char *label; /* security label for an object */
81  Oid classoid; /* object class (catalog OID) */
82  Oid objoid; /* object OID */
83  int objsubid; /* subobject (table column #) */
84 } SecLabelItem;
85 
86 typedef enum OidOptions
87 {
89  zeroAsAny = 2,
92 } OidOptions;
93 
94 /* global decls */
95 static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
96 
97 /* subquery used to convert user ID (eg, datdba) to user name */
98 static const char *username_subquery;
99 
100 /*
101  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
102  * FirstNormalObjectId - 1.
103  */
104 static Oid g_last_builtin_oid; /* value of the last builtin oid */
105 
106 /* The specified names/patterns should to match at least one entity */
107 static int strict_names = 0;
108 
109 /*
110  * Object inclusion/exclusion lists
111  *
112  * The string lists record the patterns given by command-line switches,
113  * which we then convert to lists of OIDs of matching objects.
114  */
116 static SimpleOidList schema_include_oids = {NULL, NULL};
118 static SimpleOidList schema_exclude_oids = {NULL, NULL};
119 
121 static SimpleOidList table_include_oids = {NULL, NULL};
123 static SimpleOidList table_exclude_oids = {NULL, NULL};
125 static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
126 
127 
128 char g_opaque_type[10]; /* name for the opaque type */
129 
130 /* placeholders for the delimiters for comments */
132 char g_comment_end[10];
133 
134 static const CatalogId nilCatalogId = {0, 0};
135 
136 /* override for standard extra_float_digits setting */
137 static bool have_extra_float_digits = false;
139 
140 /*
141  * The default number of rows per INSERT when
142  * --inserts is specified without --rows-per-insert
143  */
144 #define DUMP_DEFAULT_ROWS_PER_INSERT 1
145 
146 /*
147  * Macro for producing quoted, schema-qualified name of a dumpable object.
148  */
149 #define fmtQualifiedDumpable(obj) \
150  fmtQualifiedId((obj)->dobj.namespace->dobj.name, \
151  (obj)->dobj.name)
152 
153 static void help(const char *progname);
154 static void setup_connection(Archive *AH,
155  const char *dumpencoding, const char *dumpsnapshot,
156  char *use_role);
158 static void expand_schema_name_patterns(Archive *fout,
159  SimpleStringList *patterns,
160  SimpleOidList *oids,
161  bool strict_names);
162 static void expand_table_name_patterns(Archive *fout,
163  SimpleStringList *patterns,
164  SimpleOidList *oids,
165  bool strict_names);
166 static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
167 static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
168 static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
169 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
170 static void dumpComment(Archive *fout, const char *type, const char *name,
171  const char *namespace, const char *owner,
172  CatalogId catalogId, int subid, DumpId dumpId);
173 static int findComments(Archive *fout, Oid classoid, Oid objoid,
174  CommentItem **items);
175 static int collectComments(Archive *fout, CommentItem **items);
176 static void dumpSecLabel(Archive *fout, const char *type, const char *name,
177  const char *namespace, const char *owner,
178  CatalogId catalogId, int subid, DumpId dumpId);
179 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
180  SecLabelItem **items);
181 static int collectSecLabels(Archive *fout, SecLabelItem **items);
182 static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
183 static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
184 static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
185 static void dumpType(Archive *fout, TypeInfo *tyinfo);
186 static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
187 static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
188 static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
189 static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
190 static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
191 static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
192 static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
193 static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
194 static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
195 static void dumpFunc(Archive *fout, FuncInfo *finfo);
196 static void dumpCast(Archive *fout, CastInfo *cast);
197 static void dumpTransform(Archive *fout, TransformInfo *transform);
198 static void dumpOpr(Archive *fout, OprInfo *oprinfo);
199 static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
200 static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
201 static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
202 static void dumpCollation(Archive *fout, CollInfo *collinfo);
203 static void dumpConversion(Archive *fout, ConvInfo *convinfo);
204 static void dumpRule(Archive *fout, RuleInfo *rinfo);
205 static void dumpAgg(Archive *fout, AggInfo *agginfo);
206 static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
207 static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
208 static void dumpTable(Archive *fout, TableInfo *tbinfo);
209 static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
210 static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
211 static void dumpSequence(Archive *fout, TableInfo *tbinfo);
212 static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
213 static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
214 static void dumpIndexAttach(Archive *fout, IndexAttachInfo *attachinfo);
215 static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
216 static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
217 static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
218 static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
219 static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
220 static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
221 static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
222 static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
223 static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
224 static void dumpUserMappings(Archive *fout,
225  const char *servername, const char *namespace,
226  const char *owner, CatalogId catalogId, DumpId dumpId);
227 static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
228 
229 static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
230  const char *type, const char *name, const char *subname,
231  const char *nspname, const char *owner,
232  const char *acls, const char *racls,
233  const char *initacls, const char *initracls);
234 
235 static void getDependencies(Archive *fout);
236 static void BuildArchiveDependencies(Archive *fout);
238  DumpId **dependencies, int *nDeps, int *allocDeps);
239 
241 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
242  DumpableObject *boundaryObjs);
243 
244 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
245 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
246 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo);
247 static void buildMatViewRefreshDependencies(Archive *fout);
248 static void getTableDataFKConstraints(void);
249 static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
250  bool is_agg);
251 static char *format_function_arguments_old(Archive *fout,
252  FuncInfo *finfo, int nallargs,
253  char **allargtypes,
254  char **argmodes,
255  char **argnames);
256 static char *format_function_signature(Archive *fout,
257  FuncInfo *finfo, bool honor_quotes);
258 static char *convertRegProcReference(Archive *fout,
259  const char *proc);
260 static char *getFormattedOperatorName(Archive *fout, const char *oproid);
261 static char *convertTSFunction(Archive *fout, Oid funcOid);
262 static Oid findLastBuiltinOid_V71(Archive *fout);
263 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
264 static void getBlobs(Archive *fout);
265 static void dumpBlob(Archive *fout, BlobInfo *binfo);
266 static int dumpBlobs(Archive *fout, void *arg);
267 static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
268 static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
269 static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
270 static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
271 static void dumpDatabase(Archive *AH);
272 static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
273  const char *dbname, Oid dboid);
274 static void dumpEncoding(Archive *AH);
275 static void dumpStdStrings(Archive *AH);
276 static void dumpSearchPath(Archive *AH);
278  PQExpBuffer upgrade_buffer,
279  Oid pg_type_oid,
280  bool force_array_type);
282  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
283 static void binary_upgrade_set_pg_class_oids(Archive *fout,
284  PQExpBuffer upgrade_buffer,
285  Oid pg_class_oid, bool is_index);
286 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
287  DumpableObject *dobj,
288  const char *objtype,
289  const char *objname,
290  const char *objnamespace);
291 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
292 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
293 static bool nonemptyReloptions(const char *reloptions);
294 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
295  const char *prefix, Archive *fout);
296 static char *get_synchronized_snapshot(Archive *fout);
297 static void setupDumpWorker(Archive *AHX);
298 static TableInfo *getRootTableInfo(TableInfo *tbinfo);
299 
300 
301 int
302 main(int argc, char **argv)
303 {
304  int c;
305  const char *filename = NULL;
306  const char *format = "p";
307  TableInfo *tblinfo;
308  int numTables;
309  DumpableObject **dobjs;
310  int numObjs;
311  DumpableObject *boundaryObjs;
312  int i;
313  int optindex;
314  char *endptr;
315  RestoreOptions *ropt;
316  Archive *fout; /* the script file */
317  bool g_verbose = false;
318  const char *dumpencoding = NULL;
319  const char *dumpsnapshot = NULL;
320  char *use_role = NULL;
321  long rowsPerInsert;
322  int numWorkers = 1;
323  trivalue prompt_password = TRI_DEFAULT;
324  int compressLevel = -1;
325  int plainText = 0;
326  ArchiveFormat archiveFormat = archUnknown;
327  ArchiveMode archiveMode;
328 
329  static DumpOptions dopt;
330 
331  static struct option long_options[] = {
332  {"data-only", no_argument, NULL, 'a'},
333  {"blobs", no_argument, NULL, 'b'},
334  {"no-blobs", no_argument, NULL, 'B'},
335  {"clean", no_argument, NULL, 'c'},
336  {"create", no_argument, NULL, 'C'},
337  {"dbname", required_argument, NULL, 'd'},
338  {"file", required_argument, NULL, 'f'},
339  {"format", required_argument, NULL, 'F'},
340  {"host", required_argument, NULL, 'h'},
341  {"jobs", 1, NULL, 'j'},
342  {"no-reconnect", no_argument, NULL, 'R'},
343  {"no-owner", no_argument, NULL, 'O'},
344  {"port", required_argument, NULL, 'p'},
345  {"schema", required_argument, NULL, 'n'},
346  {"exclude-schema", required_argument, NULL, 'N'},
347  {"schema-only", no_argument, NULL, 's'},
348  {"superuser", required_argument, NULL, 'S'},
349  {"table", required_argument, NULL, 't'},
350  {"exclude-table", required_argument, NULL, 'T'},
351  {"no-password", no_argument, NULL, 'w'},
352  {"password", no_argument, NULL, 'W'},
353  {"username", required_argument, NULL, 'U'},
354  {"verbose", no_argument, NULL, 'v'},
355  {"no-privileges", no_argument, NULL, 'x'},
356  {"no-acl", no_argument, NULL, 'x'},
357  {"compress", required_argument, NULL, 'Z'},
358  {"encoding", required_argument, NULL, 'E'},
359  {"help", no_argument, NULL, '?'},
360  {"version", no_argument, NULL, 'V'},
361 
362  /*
363  * the following options don't have an equivalent short option letter
364  */
365  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
366  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
367  {"column-inserts", no_argument, &dopt.column_inserts, 1},
368  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
369  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
370  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
371  {"exclude-table-data", required_argument, NULL, 4},
372  {"extra-float-digits", required_argument, NULL, 8},
373  {"if-exists", no_argument, &dopt.if_exists, 1},
374  {"inserts", no_argument, NULL, 9},
375  {"lock-wait-timeout", required_argument, NULL, 2},
376  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
377  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
378  {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
379  {"role", required_argument, NULL, 3},
380  {"section", required_argument, NULL, 5},
381  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
382  {"snapshot", required_argument, NULL, 6},
383  {"strict-names", no_argument, &strict_names, 1},
384  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
385  {"no-comments", no_argument, &dopt.no_comments, 1},
386  {"no-publications", no_argument, &dopt.no_publications, 1},
387  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
388  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
389  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
390  {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
391  {"no-sync", no_argument, NULL, 7},
392  {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
393  {"rows-per-insert", required_argument, NULL, 10},
394 
395  {NULL, 0, NULL, 0}
396  };
397 
398  pg_logging_init(argv[0]);
400  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
401 
402  /*
403  * Initialize what we need for parallel execution, especially for thread
404  * support on Windows.
405  */
407 
408  strcpy(g_comment_start, "-- ");
409  g_comment_end[0] = '\0';
410  strcpy(g_opaque_type, "opaque");
411 
412  progname = get_progname(argv[0]);
413 
414  if (argc > 1)
415  {
416  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
417  {
418  help(progname);
419  exit_nicely(0);
420  }
421  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
422  {
423  puts("pg_dump (PostgreSQL) " PG_VERSION);
424  exit_nicely(0);
425  }
426  }
427 
428  InitDumpOptions(&dopt);
429 
430  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxZ:",
431  long_options, &optindex)) != -1)
432  {
433  switch (c)
434  {
435  case 'a': /* Dump data only */
436  dopt.dataOnly = true;
437  break;
438 
439  case 'b': /* Dump blobs */
440  dopt.outputBlobs = true;
441  break;
442 
443  case 'B': /* Don't dump blobs */
444  dopt.dontOutputBlobs = true;
445  break;
446 
447  case 'c': /* clean (i.e., drop) schema prior to create */
448  dopt.outputClean = 1;
449  break;
450 
451  case 'C': /* Create DB */
452  dopt.outputCreateDB = 1;
453  break;
454 
455  case 'd': /* database name */
456  dopt.dbname = pg_strdup(optarg);
457  break;
458 
459  case 'E': /* Dump encoding */
460  dumpencoding = pg_strdup(optarg);
461  break;
462 
463  case 'f':
464  filename = pg_strdup(optarg);
465  break;
466 
467  case 'F':
468  format = pg_strdup(optarg);
469  break;
470 
471  case 'h': /* server host */
472  dopt.pghost = pg_strdup(optarg);
473  break;
474 
475  case 'j': /* number of dump jobs */
476  numWorkers = atoi(optarg);
477  break;
478 
479  case 'n': /* include schema(s) */
480  simple_string_list_append(&schema_include_patterns, optarg);
481  dopt.include_everything = false;
482  break;
483 
484  case 'N': /* exclude schema(s) */
485  simple_string_list_append(&schema_exclude_patterns, optarg);
486  break;
487 
488  case 'O': /* Don't reconnect to match owner */
489  dopt.outputNoOwner = 1;
490  break;
491 
492  case 'p': /* server port */
493  dopt.pgport = pg_strdup(optarg);
494  break;
495 
496  case 'R':
497  /* no-op, still accepted for backwards compatibility */
498  break;
499 
500  case 's': /* dump schema only */
501  dopt.schemaOnly = true;
502  break;
503 
504  case 'S': /* Username for superuser in plain text output */
506  break;
507 
508  case 't': /* include table(s) */
509  simple_string_list_append(&table_include_patterns, optarg);
510  dopt.include_everything = false;
511  break;
512 
513  case 'T': /* exclude table(s) */
514  simple_string_list_append(&table_exclude_patterns, optarg);
515  break;
516 
517  case 'U':
518  dopt.username = pg_strdup(optarg);
519  break;
520 
521  case 'v': /* verbose */
522  g_verbose = true;
524  break;
525 
526  case 'w':
527  prompt_password = TRI_NO;
528  break;
529 
530  case 'W':
531  prompt_password = TRI_YES;
532  break;
533 
534  case 'x': /* skip ACL dump */
535  dopt.aclsSkip = true;
536  break;
537 
538  case 'Z': /* Compression Level */
539  compressLevel = atoi(optarg);
540  if (compressLevel < 0 || compressLevel > 9)
541  {
542  pg_log_error("compression level must be in range 0..9");
543  exit_nicely(1);
544  }
545  break;
546 
547  case 0:
548  /* This covers the long options. */
549  break;
550 
551  case 2: /* lock-wait-timeout */
553  break;
554 
555  case 3: /* SET ROLE */
556  use_role = pg_strdup(optarg);
557  break;
558 
559  case 4: /* exclude table(s) data */
560  simple_string_list_append(&tabledata_exclude_patterns, optarg);
561  break;
562 
563  case 5: /* section */
565  break;
566 
567  case 6: /* snapshot */
568  dumpsnapshot = pg_strdup(optarg);
569  break;
570 
571  case 7: /* no-sync */
572  dosync = false;
573  break;
574 
575  case 8:
577  extra_float_digits = atoi(optarg);
578  if (extra_float_digits < -15 || extra_float_digits > 3)
579  {
580  pg_log_error("extra_float_digits must be in range -15..3");
581  exit_nicely(1);
582  }
583  break;
584 
585  case 9: /* inserts */
586 
587  /*
588  * dump_inserts also stores --rows-per-insert, careful not to
589  * overwrite that.
590  */
591  if (dopt.dump_inserts == 0)
593  break;
594 
595  case 10: /* rows per insert */
596  errno = 0;
597  rowsPerInsert = strtol(optarg, &endptr, 10);
598 
599  if (endptr == optarg || *endptr != '\0' ||
600  rowsPerInsert <= 0 || rowsPerInsert > INT_MAX ||
601  errno == ERANGE)
602  {
603  pg_log_error("rows-per-insert must be in range %d..%d",
604  1, INT_MAX);
605  exit_nicely(1);
606  }
607  dopt.dump_inserts = (int) rowsPerInsert;
608  break;
609 
610  default:
611  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
612  exit_nicely(1);
613  }
614  }
615 
616  /*
617  * Non-option argument specifies database name as long as it wasn't
618  * already specified with -d / --dbname
619  */
620  if (optind < argc && dopt.dbname == NULL)
621  dopt.dbname = argv[optind++];
622 
623  /* Complain if any arguments remain */
624  if (optind < argc)
625  {
626  pg_log_error("too many command-line arguments (first is \"%s\")",
627  argv[optind]);
628  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
629  progname);
630  exit_nicely(1);
631  }
632 
633  /* --column-inserts implies --inserts */
634  if (dopt.column_inserts && dopt.dump_inserts == 0)
636 
637  /*
638  * Binary upgrade mode implies dumping sequence data even in schema-only
639  * mode. This is not exposed as a separate option, but kept separate
640  * internally for clarity.
641  */
642  if (dopt.binary_upgrade)
643  dopt.sequence_data = 1;
644 
645  if (dopt.dataOnly && dopt.schemaOnly)
646  {
647  pg_log_error("options -s/--schema-only and -a/--data-only cannot be used together");
648  exit_nicely(1);
649  }
650 
651  if (dopt.dataOnly && dopt.outputClean)
652  {
653  pg_log_error("options -c/--clean and -a/--data-only cannot be used together");
654  exit_nicely(1);
655  }
656 
657  if (dopt.if_exists && !dopt.outputClean)
658  fatal("option --if-exists requires option -c/--clean");
659 
660  /*
661  * --inserts are already implied above if --column-inserts or
662  * --rows-per-insert were specified.
663  */
664  if (dopt.do_nothing && dopt.dump_inserts == 0)
665  fatal("option --on-conflict-do-nothing requires option --inserts, --rows-per-insert, or --column-inserts");
666 
667  /* Identify archive format to emit */
668  archiveFormat = parseArchiveFormat(format, &archiveMode);
669 
670  /* archiveFormat specific setup */
671  if (archiveFormat == archNull)
672  plainText = 1;
673 
674  /* Custom and directory formats are compressed by default, others not */
675  if (compressLevel == -1)
676  {
677 #ifdef HAVE_LIBZ
678  if (archiveFormat == archCustom || archiveFormat == archDirectory)
679  compressLevel = Z_DEFAULT_COMPRESSION;
680  else
681 #endif
682  compressLevel = 0;
683  }
684 
685 #ifndef HAVE_LIBZ
686  if (compressLevel != 0)
687  pg_log_warning("requested compression not available in this installation -- archive will be uncompressed");
688  compressLevel = 0;
689 #endif
690 
691  /*
692  * If emitting an archive format, we always want to emit a DATABASE item,
693  * in case --create is specified at pg_restore time.
694  */
695  if (!plainText)
696  dopt.outputCreateDB = 1;
697 
698  /*
699  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
700  * parallel jobs because that's the maximum limit for the
701  * WaitForMultipleObjects() call.
702  */
703  if (numWorkers <= 0
704 #ifdef WIN32
705  || numWorkers > MAXIMUM_WAIT_OBJECTS
706 #endif
707  )
708  fatal("invalid number of parallel jobs");
709 
710  /* Parallel backup only in the directory archive format so far */
711  if (archiveFormat != archDirectory && numWorkers > 1)
712  fatal("parallel backup only supported by the directory format");
713 
714  /* Open the output file */
715  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
716  archiveMode, setupDumpWorker);
717 
718  /* Make dump options accessible right away */
719  SetArchiveOptions(fout, &dopt, NULL);
720 
721  /* Register the cleanup hook */
722  on_exit_close_archive(fout);
723 
724  /* Let the archiver know how noisy to be */
725  fout->verbose = g_verbose;
726 
727 
728  /*
729  * We allow the server to be back to 8.0, and up to any minor release of
730  * our own major version. (See also version check in pg_dumpall.c.)
731  */
732  fout->minRemoteVersion = 80000;
733  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
734 
735  fout->numWorkers = numWorkers;
736 
737  /*
738  * Open the database using the Archiver, so it knows about it. Errors mean
739  * death.
740  */
741  ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
742  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
743 
744  /*
745  * Disable security label support if server version < v9.1.x (prevents
746  * access to nonexistent pg_seclabel catalog)
747  */
748  if (fout->remoteVersion < 90100)
749  dopt.no_security_labels = 1;
750 
751  /*
752  * On hot standbys, never try to dump unlogged table data, since it will
753  * just throw an error.
754  */
755  if (fout->isStandby)
756  dopt.no_unlogged_table_data = true;
757 
758  /* Select the appropriate subquery to convert user IDs to names */
759  if (fout->remoteVersion >= 80100)
760  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
761  else
762  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
763 
764  /* check the version for the synchronized snapshots feature */
765  if (numWorkers > 1 && fout->remoteVersion < 90200
766  && !dopt.no_synchronized_snapshots)
767  fatal("Synchronized snapshots are not supported by this server version.\n"
768  "Run with --no-synchronized-snapshots instead if you do not need\n"
769  "synchronized snapshots.");
770 
771  /* check the version when a snapshot is explicitly specified by user */
772  if (dumpsnapshot && fout->remoteVersion < 90200)
773  fatal("Exported snapshots are not supported by this server version.");
774 
775  /*
776  * Find the last built-in OID, if needed (prior to 8.1)
777  *
778  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
779  */
780  if (fout->remoteVersion < 80100)
782  else
784 
785  pg_log_info("last built-in OID is %u", g_last_builtin_oid);
786 
787  /* Expand schema selection patterns into OID lists */
788  if (schema_include_patterns.head != NULL)
789  {
790  expand_schema_name_patterns(fout, &schema_include_patterns,
791  &schema_include_oids,
792  strict_names);
793  if (schema_include_oids.head == NULL)
794  fatal("no matching schemas were found");
795  }
796  expand_schema_name_patterns(fout, &schema_exclude_patterns,
797  &schema_exclude_oids,
798  false);
799  /* non-matching exclusion patterns aren't an error */
800 
801  /* Expand table selection patterns into OID lists */
802  if (table_include_patterns.head != NULL)
803  {
804  expand_table_name_patterns(fout, &table_include_patterns,
805  &table_include_oids,
806  strict_names);
807  if (table_include_oids.head == NULL)
808  fatal("no matching tables were found");
809  }
810  expand_table_name_patterns(fout, &table_exclude_patterns,
811  &table_exclude_oids,
812  false);
813 
814  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
815  &tabledata_exclude_oids,
816  false);
817 
818  /* non-matching exclusion patterns aren't an error */
819 
820  /*
821  * Dumping blobs is the default for dumps where an inclusion switch is not
822  * used (an "include everything" dump). -B can be used to exclude blobs
823  * from those dumps. -b can be used to include blobs even when an
824  * inclusion switch is used.
825  *
826  * -s means "schema only" and blobs are data, not schema, so we never
827  * include blobs when -s is used.
828  */
829  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
830  dopt.outputBlobs = true;
831 
832  /*
833  * Now scan the database and create DumpableObject structs for all the
834  * objects we intend to dump.
835  */
836  tblinfo = getSchemaData(fout, &numTables);
837 
838  if (fout->remoteVersion < 80400)
839  guessConstraintInheritance(tblinfo, numTables);
840 
841  if (!dopt.schemaOnly)
842  {
843  getTableData(&dopt, tblinfo, numTables, 0);
845  if (dopt.dataOnly)
847  }
848 
849  if (dopt.schemaOnly && dopt.sequence_data)
850  getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
851 
852  /*
853  * In binary-upgrade mode, we do not have to worry about the actual blob
854  * data or the associated metadata that resides in the pg_largeobject and
855  * pg_largeobject_metadata tables, respectively.
856  *
857  * However, we do need to collect blob information as there may be
858  * comments or other information on blobs that we do need to dump out.
859  */
860  if (dopt.outputBlobs || dopt.binary_upgrade)
861  getBlobs(fout);
862 
863  /*
864  * Collect dependency data to assist in ordering the objects.
865  */
866  getDependencies(fout);
867 
868  /* Lastly, create dummy objects to represent the section boundaries */
869  boundaryObjs = createBoundaryObjects();
870 
871  /* Get pointers to all the known DumpableObjects */
872  getDumpableObjects(&dobjs, &numObjs);
873 
874  /*
875  * Add dummy dependencies to enforce the dump section ordering.
876  */
877  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
878 
879  /*
880  * Sort the objects into a safe dump order (no forward references).
881  *
882  * We rely on dependency information to help us determine a safe order, so
883  * the initial sort is mostly for cosmetic purposes: we sort by name to
884  * ensure that logically identical schemas will dump identically.
885  */
886  sortDumpableObjectsByTypeName(dobjs, numObjs);
887 
888  sortDumpableObjects(dobjs, numObjs,
889  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
890 
891  /*
892  * Create archive TOC entries for all the objects to be dumped, in a safe
893  * order.
894  */
895 
896  /* First the special ENCODING, STDSTRINGS, and SEARCHPATH entries. */
897  dumpEncoding(fout);
898  dumpStdStrings(fout);
899  dumpSearchPath(fout);
900 
901  /* The database items are always next, unless we don't want them at all */
902  if (dopt.outputCreateDB)
903  dumpDatabase(fout);
904 
905  /* Now the rearrangeable objects. */
906  for (i = 0; i < numObjs; i++)
907  dumpDumpableObject(fout, dobjs[i]);
908 
909  /*
910  * Set up options info to ensure we dump what we want.
911  */
912  ropt = NewRestoreOptions();
913  ropt->filename = filename;
914 
915  /* if you change this list, see dumpOptionsFromRestoreOptions */
916  ropt->dropSchema = dopt.outputClean;
917  ropt->dataOnly = dopt.dataOnly;
918  ropt->schemaOnly = dopt.schemaOnly;
919  ropt->if_exists = dopt.if_exists;
920  ropt->column_inserts = dopt.column_inserts;
921  ropt->dumpSections = dopt.dumpSections;
922  ropt->aclsSkip = dopt.aclsSkip;
923  ropt->superuser = dopt.outputSuperuser;
924  ropt->createDB = dopt.outputCreateDB;
925  ropt->noOwner = dopt.outputNoOwner;
926  ropt->noTablespace = dopt.outputNoTablespaces;
927  ropt->disable_triggers = dopt.disable_triggers;
928  ropt->use_setsessauth = dopt.use_setsessauth;
930  ropt->dump_inserts = dopt.dump_inserts;
931  ropt->no_comments = dopt.no_comments;
932  ropt->no_publications = dopt.no_publications;
934  ropt->no_subscriptions = dopt.no_subscriptions;
935  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
938  ropt->sequence_data = dopt.sequence_data;
939  ropt->binary_upgrade = dopt.binary_upgrade;
940 
941  if (compressLevel == -1)
942  ropt->compression = 0;
943  else
944  ropt->compression = compressLevel;
945 
946  ropt->suppressDumpWarnings = true; /* We've already shown them */
947 
948  SetArchiveOptions(fout, &dopt, ropt);
949 
950  /* Mark which entries should be output */
952 
953  /*
954  * The archive's TOC entries are now marked as to which ones will actually
955  * be output, so we can set up their dependency lists properly. This isn't
956  * necessary for plain-text output, though.
957  */
958  if (!plainText)
960 
961  /*
962  * And finally we can do the actual output.
963  *
964  * Note: for non-plain-text output formats, the output file is written
965  * inside CloseArchive(). This is, um, bizarre; but not worth changing
966  * right now.
967  */
968  if (plainText)
969  RestoreArchive(fout);
970 
971  CloseArchive(fout);
972 
973  exit_nicely(0);
974 }
975 
976 
977 static void
978 help(const char *progname)
979 {
980  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
981  printf(_("Usage:\n"));
982  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
983 
984  printf(_("\nGeneral options:\n"));
985  printf(_(" -f, --file=FILENAME output file or directory name\n"));
986  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
987  " plain text (default))\n"));
988  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
989  printf(_(" -v, --verbose verbose mode\n"));
990  printf(_(" -V, --version output version information, then exit\n"));
991  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
992  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
993  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
994  printf(_(" -?, --help show this help, then exit\n"));
995 
996  printf(_("\nOptions controlling the output content:\n"));
997  printf(_(" -a, --data-only dump only the data, not the schema\n"));
998  printf(_(" -b, --blobs include large objects in dump\n"));
999  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
1000  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
1001  printf(_(" -C, --create include commands to create database in dump\n"));
1002  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
1003  printf(_(" -n, --schema=PATTERN dump the specified schema(s) only\n"));
1004  printf(_(" -N, --exclude-schema=PATTERN do NOT dump the specified schema(s)\n"));
1005  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
1006  " plain-text format\n"));
1007  printf(_(" -s, --schema-only dump only the schema, no data\n"));
1008  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
1009  printf(_(" -t, --table=PATTERN dump the specified table(s) only\n"));
1010  printf(_(" -T, --exclude-table=PATTERN do NOT dump the specified table(s)\n"));
1011  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
1012  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
1013  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
1014  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
1015  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
1016  printf(_(" --enable-row-security enable row security (dump only content user has\n"
1017  " access to)\n"));
1018  printf(_(" --exclude-table-data=PATTERN do NOT dump data for the specified table(s)\n"));
1019  printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
1020  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
1021  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
1022  printf(_(" --load-via-partition-root load partitions via the root table\n"));
1023  printf(_(" --no-comments do not dump comments\n"));
1024  printf(_(" --no-publications do not dump publications\n"));
1025  printf(_(" --no-security-labels do not dump security label assignments\n"));
1026  printf(_(" --no-subscriptions do not dump subscriptions\n"));
1027  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
1028  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
1029  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
1030  printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
1031  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
1032  printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
1033  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
1034  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
1035  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
1036  printf(_(" --strict-names require table and/or schema include patterns to\n"
1037  " match at least one entity each\n"));
1038  printf(_(" --use-set-session-authorization\n"
1039  " use SET SESSION AUTHORIZATION commands instead of\n"
1040  " ALTER OWNER commands to set ownership\n"));
1041 
1042  printf(_("\nConnection options:\n"));
1043  printf(_(" -d, --dbname=DBNAME database to dump\n"));
1044  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
1045  printf(_(" -p, --port=PORT database server port number\n"));
1046  printf(_(" -U, --username=NAME connect as specified database user\n"));
1047  printf(_(" -w, --no-password never prompt for password\n"));
1048  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
1049  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
1050 
1051  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
1052  "variable value is used.\n\n"));
1053  printf(_("Report bugs to <pgsql-bugs@lists.postgresql.org>.\n"));
1054 }
1055 
1056 static void
1057 setup_connection(Archive *AH, const char *dumpencoding,
1058  const char *dumpsnapshot, char *use_role)
1059 {
1060  DumpOptions *dopt = AH->dopt;
1061  PGconn *conn = GetConnection(AH);
1062  const char *std_strings;
1063 
1065 
1066  /*
1067  * Set the client encoding if requested.
1068  */
1069  if (dumpencoding)
1070  {
1071  if (PQsetClientEncoding(conn, dumpencoding) < 0)
1072  fatal("invalid client encoding \"%s\" specified",
1073  dumpencoding);
1074  }
1075 
1076  /*
1077  * Get the active encoding and the standard_conforming_strings setting, so
1078  * we know how to escape strings.
1079  */
1080  AH->encoding = PQclientEncoding(conn);
1081 
1082  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1083  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1084 
1085  /*
1086  * Set the role if requested. In a parallel dump worker, we'll be passed
1087  * use_role == NULL, but AH->use_role is already set (if user specified it
1088  * originally) and we should use that.
1089  */
1090  if (!use_role && AH->use_role)
1091  use_role = AH->use_role;
1092 
1093  /* Set the role if requested */
1094  if (use_role && AH->remoteVersion >= 80100)
1095  {
1096  PQExpBuffer query = createPQExpBuffer();
1097 
1098  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1099  ExecuteSqlStatement(AH, query->data);
1100  destroyPQExpBuffer(query);
1101 
1102  /* save it for possible later use by parallel workers */
1103  if (!AH->use_role)
1104  AH->use_role = pg_strdup(use_role);
1105  }
1106 
1107  /* Set the datestyle to ISO to ensure the dump's portability */
1108  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1109 
1110  /* Likewise, avoid using sql_standard intervalstyle */
1111  if (AH->remoteVersion >= 80400)
1112  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1113 
1114  /*
1115  * Use an explicitly specified extra_float_digits if it has been provided.
1116  * Otherwise, set extra_float_digits so that we can dump float data
1117  * exactly (given correctly implemented float I/O code, anyway).
1118  */
1120  {
1122 
1123  appendPQExpBuffer(q, "SET extra_float_digits TO %d",
1125  ExecuteSqlStatement(AH, q->data);
1126  destroyPQExpBuffer(q);
1127  }
1128  else if (AH->remoteVersion >= 90000)
1129  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1130  else
1131  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1132 
1133  /*
1134  * If synchronized scanning is supported, disable it, to prevent
1135  * unpredictable changes in row ordering across a dump and reload.
1136  */
1137  if (AH->remoteVersion >= 80300)
1138  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1139 
1140  /*
1141  * Disable timeouts if supported.
1142  */
1143  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1144  if (AH->remoteVersion >= 90300)
1145  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1146  if (AH->remoteVersion >= 90600)
1147  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1148 
1149  /*
1150  * Quote all identifiers, if requested.
1151  */
1152  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1153  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1154 
1155  /*
1156  * Adjust row-security mode, if supported.
1157  */
1158  if (AH->remoteVersion >= 90500)
1159  {
1160  if (dopt->enable_row_security)
1161  ExecuteSqlStatement(AH, "SET row_security = on");
1162  else
1163  ExecuteSqlStatement(AH, "SET row_security = off");
1164  }
1165 
1166  /*
1167  * Start transaction-snapshot mode transaction to dump consistent data.
1168  */
1169  ExecuteSqlStatement(AH, "BEGIN");
1170  if (AH->remoteVersion >= 90100)
1171  {
1172  /*
1173  * To support the combination of serializable_deferrable with the jobs
1174  * option we use REPEATABLE READ for the worker connections that are
1175  * passed a snapshot. As long as the snapshot is acquired in a
1176  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1177  * REPEATABLE READ transaction provides the appropriate integrity
1178  * guarantees. This is a kluge, but safe for back-patching.
1179  */
1180  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1182  "SET TRANSACTION ISOLATION LEVEL "
1183  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1184  else
1186  "SET TRANSACTION ISOLATION LEVEL "
1187  "REPEATABLE READ, READ ONLY");
1188  }
1189  else
1190  {
1192  "SET TRANSACTION ISOLATION LEVEL "
1193  "SERIALIZABLE, READ ONLY");
1194  }
1195 
1196  /*
1197  * If user specified a snapshot to use, select that. In a parallel dump
1198  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1199  * is already set (if the server can handle it) and we should use that.
1200  */
1201  if (dumpsnapshot)
1202  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1203 
1204  if (AH->sync_snapshot_id)
1205  {
1206  PQExpBuffer query = createPQExpBuffer();
1207 
1208  appendPQExpBufferStr(query, "SET TRANSACTION SNAPSHOT ");
1209  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1210  ExecuteSqlStatement(AH, query->data);
1211  destroyPQExpBuffer(query);
1212  }
1213  else if (AH->numWorkers > 1 &&
1214  AH->remoteVersion >= 90200 &&
1216  {
1217  if (AH->isStandby && AH->remoteVersion < 100000)
1218  fatal("Synchronized snapshots on standby servers are not supported by this server version.\n"
1219  "Run with --no-synchronized-snapshots instead if you do not need\n"
1220  "synchronized snapshots.");
1221 
1222 
1224  }
1225 }
1226 
1227 /* Set up connection for a parallel worker process */
1228 static void
1230 {
1231  /*
1232  * We want to re-select all the same values the master connection is
1233  * using. We'll have inherited directly-usable values in
1234  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1235  * inherited encoding value back to a string to pass to setup_connection.
1236  */
1237  setup_connection(AH,
1239  NULL,
1240  NULL);
1241 }
1242 
1243 static char *
1245 {
1246  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1247  char *result;
1248  PGresult *res;
1249 
1250  res = ExecuteSqlQueryForSingleRow(fout, query);
1251  result = pg_strdup(PQgetvalue(res, 0, 0));
1252  PQclear(res);
1253 
1254  return result;
1255 }
1256 
1257 static ArchiveFormat
1259 {
1260  ArchiveFormat archiveFormat;
1261 
1262  *mode = archModeWrite;
1263 
1264  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1265  {
1266  /* This is used by pg_dumpall, and is not documented */
1267  archiveFormat = archNull;
1268  *mode = archModeAppend;
1269  }
1270  else if (pg_strcasecmp(format, "c") == 0)
1271  archiveFormat = archCustom;
1272  else if (pg_strcasecmp(format, "custom") == 0)
1273  archiveFormat = archCustom;
1274  else if (pg_strcasecmp(format, "d") == 0)
1275  archiveFormat = archDirectory;
1276  else if (pg_strcasecmp(format, "directory") == 0)
1277  archiveFormat = archDirectory;
1278  else if (pg_strcasecmp(format, "p") == 0)
1279  archiveFormat = archNull;
1280  else if (pg_strcasecmp(format, "plain") == 0)
1281  archiveFormat = archNull;
1282  else if (pg_strcasecmp(format, "t") == 0)
1283  archiveFormat = archTar;
1284  else if (pg_strcasecmp(format, "tar") == 0)
1285  archiveFormat = archTar;
1286  else
1287  fatal("invalid output format \"%s\" specified", format);
1288  return archiveFormat;
1289 }
1290 
1291 /*
1292  * Find the OIDs of all schemas matching the given list of patterns,
1293  * and append them to the given OID list.
1294  */
1295 static void
1297  SimpleStringList *patterns,
1298  SimpleOidList *oids,
1299  bool strict_names)
1300 {
1301  PQExpBuffer query;
1302  PGresult *res;
1303  SimpleStringListCell *cell;
1304  int i;
1305 
1306  if (patterns->head == NULL)
1307  return; /* nothing to do */
1308 
1309  query = createPQExpBuffer();
1310 
1311  /*
1312  * The loop below runs multiple SELECTs might sometimes result in
1313  * duplicate entries in the OID list, but we don't care.
1314  */
1315 
1316  for (cell = patterns->head; cell; cell = cell->next)
1317  {
1318  appendPQExpBufferStr(query,
1319  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1320  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1321  false, NULL, "n.nspname", NULL, NULL);
1322 
1323  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1324  if (strict_names && PQntuples(res) == 0)
1325  fatal("no matching schemas were found for pattern \"%s\"", cell->val);
1326 
1327  for (i = 0; i < PQntuples(res); i++)
1328  {
1329  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1330  }
1331 
1332  PQclear(res);
1333  resetPQExpBuffer(query);
1334  }
1335 
1336  destroyPQExpBuffer(query);
1337 }
1338 
1339 /*
1340  * Find the OIDs of all tables matching the given list of patterns,
1341  * and append them to the given OID list. See also expand_dbname_patterns()
1342  * in pg_dumpall.c
1343  */
1344 static void
1346  SimpleStringList *patterns, SimpleOidList *oids,
1347  bool strict_names)
1348 {
1349  PQExpBuffer query;
1350  PGresult *res;
1351  SimpleStringListCell *cell;
1352  int i;
1353 
1354  if (patterns->head == NULL)
1355  return; /* nothing to do */
1356 
1357  query = createPQExpBuffer();
1358 
1359  /*
1360  * this might sometimes result in duplicate entries in the OID list, but
1361  * we don't care.
1362  */
1363 
1364  for (cell = patterns->head; cell; cell = cell->next)
1365  {
1366  /*
1367  * Query must remain ABSOLUTELY devoid of unqualified names. This
1368  * would be unnecessary given a pg_table_is_visible() variant taking a
1369  * search_path argument.
1370  */
1371  appendPQExpBuffer(query,
1372  "SELECT c.oid"
1373  "\nFROM pg_catalog.pg_class c"
1374  "\n LEFT JOIN pg_catalog.pg_namespace n"
1375  "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1376  "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1377  "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1378  RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
1379  RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
1380  RELKIND_PARTITIONED_TABLE);
1381  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1382  false, "n.nspname", "c.relname", NULL,
1383  "pg_catalog.pg_table_is_visible(c.oid)");
1384 
1385  ExecuteSqlStatement(fout, "RESET search_path");
1386  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1389  if (strict_names && PQntuples(res) == 0)
1390  fatal("no matching tables were found for pattern \"%s\"", cell->val);
1391 
1392  for (i = 0; i < PQntuples(res); i++)
1393  {
1394  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1395  }
1396 
1397  PQclear(res);
1398  resetPQExpBuffer(query);
1399  }
1400 
1401  destroyPQExpBuffer(query);
1402 }
1403 
1404 /*
1405  * checkExtensionMembership
1406  * Determine whether object is an extension member, and if so,
1407  * record an appropriate dependency and set the object's dump flag.
1408  *
1409  * It's important to call this for each object that could be an extension
1410  * member. Generally, we integrate this with determining the object's
1411  * to-be-dumped-ness, since extension membership overrides other rules for that.
1412  *
1413  * Returns true if object is an extension member, else false.
1414  */
1415 static bool
1417 {
1418  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1419 
1420  if (ext == NULL)
1421  return false;
1422 
1423  dobj->ext_member = true;
1424 
1425  /* Record dependency so that getDependencies needn't deal with that */
1426  addObjectDependency(dobj, ext->dobj.dumpId);
1427 
1428  /*
1429  * In 9.6 and above, mark the member object to have any non-initial ACL,
1430  * policies, and security labels dumped.
1431  *
1432  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1433  * extract the information about the object. We don't provide support for
1434  * initial policies and security labels and it seems unlikely for those to
1435  * ever exist, but we may have to revisit this later.
1436  *
1437  * Prior to 9.6, we do not include any extension member components.
1438  *
1439  * In binary upgrades, we still dump all components of the members
1440  * individually, since the idea is to exactly reproduce the database
1441  * contents rather than replace the extension contents with something
1442  * different.
1443  */
1444  if (fout->dopt->binary_upgrade)
1445  dobj->dump = ext->dobj.dump;
1446  else
1447  {
1448  if (fout->remoteVersion < 90600)
1449  dobj->dump = DUMP_COMPONENT_NONE;
1450  else
1451  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1454  }
1455 
1456  return true;
1457 }
1458 
1459 /*
1460  * selectDumpableNamespace: policy-setting subroutine
1461  * Mark a namespace as to be dumped or not
1462  */
1463 static void
1465 {
1466  /*
1467  * If specific tables are being dumped, do not dump any complete
1468  * namespaces. If specific namespaces are being dumped, dump just those
1469  * namespaces. Otherwise, dump all non-system namespaces.
1470  */
1471  if (table_include_oids.head != NULL)
1472  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1473  else if (schema_include_oids.head != NULL)
1474  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1475  simple_oid_list_member(&schema_include_oids,
1476  nsinfo->dobj.catId.oid) ?
1478  else if (fout->remoteVersion >= 90600 &&
1479  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1480  {
1481  /*
1482  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1483  * they are interesting (and not the original ACLs which were set at
1484  * initdb time, see pg_init_privs).
1485  */
1486  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1487  }
1488  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1489  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1490  {
1491  /* Other system schemas don't get dumped */
1492  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1493  }
1494  else if (strcmp(nsinfo->dobj.name, "public") == 0)
1495  {
1496  /*
1497  * The public schema is a strange beast that sits in a sort of
1498  * no-mans-land between being a system object and a user object. We
1499  * don't want to dump creation or comment commands for it, because
1500  * that complicates matters for non-superuser use of pg_dump. But we
1501  * should dump any ACL changes that have occurred for it, and of
1502  * course we should dump contained objects.
1503  */
1504  nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1506  }
1507  else
1508  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1509 
1510  /*
1511  * In any case, a namespace can be excluded by an exclusion switch
1512  */
1513  if (nsinfo->dobj.dump_contains &&
1514  simple_oid_list_member(&schema_exclude_oids,
1515  nsinfo->dobj.catId.oid))
1516  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1517 
1518  /*
1519  * If the schema belongs to an extension, allow extension membership to
1520  * override the dump decision for the schema itself. However, this does
1521  * not change dump_contains, so this won't change what we do with objects
1522  * within the schema. (If they belong to the extension, they'll get
1523  * suppressed by it, otherwise not.)
1524  */
1525  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1526 }
1527 
1528 /*
1529  * selectDumpableTable: policy-setting subroutine
1530  * Mark a table as to be dumped or not
1531  */
1532 static void
1534 {
1535  if (checkExtensionMembership(&tbinfo->dobj, fout))
1536  return; /* extension membership overrides all else */
1537 
1538  /*
1539  * If specific tables are being dumped, dump just those tables; else, dump
1540  * according to the parent namespace's dump flag.
1541  */
1542  if (table_include_oids.head != NULL)
1543  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1544  tbinfo->dobj.catId.oid) ?
1546  else
1547  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1548 
1549  /*
1550  * In any case, a table can be excluded by an exclusion switch
1551  */
1552  if (tbinfo->dobj.dump &&
1553  simple_oid_list_member(&table_exclude_oids,
1554  tbinfo->dobj.catId.oid))
1555  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1556 }
1557 
1558 /*
1559  * selectDumpableType: policy-setting subroutine
1560  * Mark a type as to be dumped or not
1561  *
1562  * If it's a table's rowtype or an autogenerated array type, we also apply a
1563  * special type code to facilitate sorting into the desired order. (We don't
1564  * want to consider those to be ordinary types because that would bring tables
1565  * up into the datatype part of the dump order.) We still set the object's
1566  * dump flag; that's not going to cause the dummy type to be dumped, but we
1567  * need it so that casts involving such types will be dumped correctly -- see
1568  * dumpCast. This means the flag should be set the same as for the underlying
1569  * object (the table or base type).
1570  */
1571 static void
1573 {
1574  /* skip complex types, except for standalone composite types */
1575  if (OidIsValid(tyinfo->typrelid) &&
1576  tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
1577  {
1578  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1579 
1580  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1581  if (tytable != NULL)
1582  tyinfo->dobj.dump = tytable->dobj.dump;
1583  else
1584  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1585  return;
1586  }
1587 
1588  /* skip auto-generated array types */
1589  if (tyinfo->isArray)
1590  {
1591  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1592 
1593  /*
1594  * Fall through to set the dump flag; we assume that the subsequent
1595  * rules will do the same thing as they would for the array's base
1596  * type. (We cannot reliably look up the base type here, since
1597  * getTypes may not have processed it yet.)
1598  */
1599  }
1600 
1601  if (checkExtensionMembership(&tyinfo->dobj, fout))
1602  return; /* extension membership overrides all else */
1603 
1604  /* Dump based on if the contents of the namespace are being dumped */
1605  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1606 }
1607 
1608 /*
1609  * selectDumpableDefaultACL: policy-setting subroutine
1610  * Mark a default ACL as to be dumped or not
1611  *
1612  * For per-schema default ACLs, dump if the schema is to be dumped.
1613  * Otherwise dump if we are dumping "everything". Note that dataOnly
1614  * and aclsSkip are checked separately.
1615  */
1616 static void
1618 {
1619  /* Default ACLs can't be extension members */
1620 
1621  if (dinfo->dobj.namespace)
1622  /* default ACLs are considered part of the namespace */
1623  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1624  else
1625  dinfo->dobj.dump = dopt->include_everything ?
1627 }
1628 
1629 /*
1630  * selectDumpableCast: policy-setting subroutine
1631  * Mark a cast as to be dumped or not
1632  *
1633  * Casts do not belong to any particular namespace (since they haven't got
1634  * names), nor do they have identifiable owners. To distinguish user-defined
1635  * casts from built-in ones, we must resort to checking whether the cast's
1636  * OID is in the range reserved for initdb.
1637  */
1638 static void
1640 {
1641  if (checkExtensionMembership(&cast->dobj, fout))
1642  return; /* extension membership overrides all else */
1643 
1644  /*
1645  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1646  * support ACLs currently.
1647  */
1648  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1649  cast->dobj.dump = DUMP_COMPONENT_NONE;
1650  else
1651  cast->dobj.dump = fout->dopt->include_everything ?
1653 }
1654 
1655 /*
1656  * selectDumpableProcLang: policy-setting subroutine
1657  * Mark a procedural language as to be dumped or not
1658  *
1659  * Procedural languages do not belong to any particular namespace. To
1660  * identify built-in languages, we must resort to checking whether the
1661  * language's OID is in the range reserved for initdb.
1662  */
1663 static void
1665 {
1666  if (checkExtensionMembership(&plang->dobj, fout))
1667  return; /* extension membership overrides all else */
1668 
1669  /*
1670  * Only include procedural languages when we are dumping everything.
1671  *
1672  * For from-initdb procedural languages, only include ACLs, as we do for
1673  * the pg_catalog namespace. We need this because procedural languages do
1674  * not live in any namespace.
1675  */
1676  if (!fout->dopt->include_everything)
1677  plang->dobj.dump = DUMP_COMPONENT_NONE;
1678  else
1679  {
1680  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1681  plang->dobj.dump = fout->remoteVersion < 90600 ?
1683  else
1684  plang->dobj.dump = DUMP_COMPONENT_ALL;
1685  }
1686 }
1687 
1688 /*
1689  * selectDumpableAccessMethod: policy-setting subroutine
1690  * Mark an access method as to be dumped or not
1691  *
1692  * Access methods do not belong to any particular namespace. To identify
1693  * built-in access methods, we must resort to checking whether the
1694  * method's OID is in the range reserved for initdb.
1695  */
1696 static void
1698 {
1699  if (checkExtensionMembership(&method->dobj, fout))
1700  return; /* extension membership overrides all else */
1701 
1702  /*
1703  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1704  * they do not support ACLs currently.
1705  */
1706  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1707  method->dobj.dump = DUMP_COMPONENT_NONE;
1708  else
1709  method->dobj.dump = fout->dopt->include_everything ?
1711 }
1712 
1713 /*
1714  * selectDumpableExtension: policy-setting subroutine
1715  * Mark an extension as to be dumped or not
1716  *
1717  * Built-in extensions should be skipped except for checking ACLs, since we
1718  * assume those will already be installed in the target database. We identify
1719  * such extensions by their having OIDs in the range reserved for initdb.
1720  * We dump all user-added extensions by default, or none of them if
1721  * include_everything is false (i.e., a --schema or --table switch was given).
1722  */
1723 static void
1725 {
1726  /*
1727  * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
1728  * change permissions on their member objects, if they wish to, and have
1729  * those changes preserved.
1730  */
1731  if (extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1732  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1733  else
1734  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1737 }
1738 
1739 /*
1740  * selectDumpablePublicationTable: policy-setting subroutine
1741  * Mark a publication table as to be dumped or not
1742  *
1743  * Publication tables have schemas, but those are ignored in decision making,
1744  * because publications are only dumped when we are dumping everything.
1745  */
1746 static void
1748 {
1749  if (checkExtensionMembership(dobj, fout))
1750  return; /* extension membership overrides all else */
1751 
1752  dobj->dump = fout->dopt->include_everything ?
1754 }
1755 
1756 /*
1757  * selectDumpableObject: policy-setting subroutine
1758  * Mark a generic dumpable object as to be dumped or not
1759  *
1760  * Use this only for object types without a special-case routine above.
1761  */
1762 static void
1764 {
1765  if (checkExtensionMembership(dobj, fout))
1766  return; /* extension membership overrides all else */
1767 
1768  /*
1769  * Default policy is to dump if parent namespace is dumpable, or for
1770  * non-namespace-associated items, dump if we're dumping "everything".
1771  */
1772  if (dobj->namespace)
1773  dobj->dump = dobj->namespace->dobj.dump_contains;
1774  else
1775  dobj->dump = fout->dopt->include_everything ?
1777 }
1778 
1779 /*
1780  * Dump a table's contents for loading using the COPY command
1781  * - this routine is called by the Archiver when it wants the table
1782  * to be dumped.
1783  */
1784 
1785 static int
1786 dumpTableData_copy(Archive *fout, void *dcontext)
1787 {
1788  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1789  TableInfo *tbinfo = tdinfo->tdtable;
1790  const char *classname = tbinfo->dobj.name;
1792 
1793  /*
1794  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1795  * which uses it already.
1796  */
1797  PQExpBuffer clistBuf = createPQExpBuffer();
1798  PGconn *conn = GetConnection(fout);
1799  PGresult *res;
1800  int ret;
1801  char *copybuf;
1802  const char *column_list;
1803 
1804  pg_log_info("dumping contents of table \"%s.%s\"",
1805  tbinfo->dobj.namespace->dobj.name, classname);
1806 
1807  /*
1808  * Specify the column list explicitly so that we have no possibility of
1809  * retrieving data in the wrong column order. (The default column
1810  * ordering of COPY will not be what we want in certain corner cases
1811  * involving ADD COLUMN and inheritance.)
1812  */
1813  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1814 
1815  if (tdinfo->filtercond)
1816  {
1817  /* Note: this syntax is only supported in 8.2 and up */
1818  appendPQExpBufferStr(q, "COPY (SELECT ");
1819  /* klugery to get rid of parens in column list */
1820  if (strlen(column_list) > 2)
1821  {
1822  appendPQExpBufferStr(q, column_list + 1);
1823  q->data[q->len - 1] = ' ';
1824  }
1825  else
1826  appendPQExpBufferStr(q, "* ");
1827  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1828  fmtQualifiedDumpable(tbinfo),
1829  tdinfo->filtercond);
1830  }
1831  else
1832  {
1833  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1834  fmtQualifiedDumpable(tbinfo),
1835  column_list);
1836  }
1837  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1838  PQclear(res);
1839  destroyPQExpBuffer(clistBuf);
1840 
1841  for (;;)
1842  {
1843  ret = PQgetCopyData(conn, &copybuf, 0);
1844 
1845  if (ret < 0)
1846  break; /* done or error */
1847 
1848  if (copybuf)
1849  {
1850  WriteData(fout, copybuf, ret);
1851  PQfreemem(copybuf);
1852  }
1853 
1854  /* ----------
1855  * THROTTLE:
1856  *
1857  * There was considerable discussion in late July, 2000 regarding
1858  * slowing down pg_dump when backing up large tables. Users with both
1859  * slow & fast (multi-processor) machines experienced performance
1860  * degradation when doing a backup.
1861  *
1862  * Initial attempts based on sleeping for a number of ms for each ms
1863  * of work were deemed too complex, then a simple 'sleep in each loop'
1864  * implementation was suggested. The latter failed because the loop
1865  * was too tight. Finally, the following was implemented:
1866  *
1867  * If throttle is non-zero, then
1868  * See how long since the last sleep.
1869  * Work out how long to sleep (based on ratio).
1870  * If sleep is more than 100ms, then
1871  * sleep
1872  * reset timer
1873  * EndIf
1874  * EndIf
1875  *
1876  * where the throttle value was the number of ms to sleep per ms of
1877  * work. The calculation was done in each loop.
1878  *
1879  * Most of the hard work is done in the backend, and this solution
1880  * still did not work particularly well: on slow machines, the ratio
1881  * was 50:1, and on medium paced machines, 1:1, and on fast
1882  * multi-processor machines, it had little or no effect, for reasons
1883  * that were unclear.
1884  *
1885  * Further discussion ensued, and the proposal was dropped.
1886  *
1887  * For those people who want this feature, it can be implemented using
1888  * gettimeofday in each loop, calculating the time since last sleep,
1889  * multiplying that by the sleep ratio, then if the result is more
1890  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1891  * function to sleep for a subsecond period ie.
1892  *
1893  * select(0, NULL, NULL, NULL, &tvi);
1894  *
1895  * This will return after the interval specified in the structure tvi.
1896  * Finally, call gettimeofday again to save the 'last sleep time'.
1897  * ----------
1898  */
1899  }
1900  archprintf(fout, "\\.\n\n\n");
1901 
1902  if (ret == -2)
1903  {
1904  /* copy data transfer failed */
1905  pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
1906  pg_log_error("Error message from server: %s", PQerrorMessage(conn));
1907  pg_log_error("The command was: %s", q->data);
1908  exit_nicely(1);
1909  }
1910 
1911  /* Check command status and return to normal libpq state */
1912  res = PQgetResult(conn);
1913  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1914  {
1915  pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
1916  pg_log_error("Error message from server: %s", PQerrorMessage(conn));
1917  pg_log_error("The command was: %s", q->data);
1918  exit_nicely(1);
1919  }
1920  PQclear(res);
1921 
1922  /* Do this to ensure we've pumped libpq back to idle state */
1923  if (PQgetResult(conn) != NULL)
1924  pg_log_warning("unexpected extra results during COPY of table \"%s\"",
1925  classname);
1926 
1927  destroyPQExpBuffer(q);
1928  return 1;
1929 }
1930 
1931 /*
1932  * Dump table data using INSERT commands.
1933  *
1934  * Caution: when we restore from an archive file direct to database, the
1935  * INSERT commands emitted by this function have to be parsed by
1936  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
1937  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
1938  */
1939 static int
1940 dumpTableData_insert(Archive *fout, void *dcontext)
1941 {
1942  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1943  TableInfo *tbinfo = tdinfo->tdtable;
1944  DumpOptions *dopt = fout->dopt;
1946  PQExpBuffer insertStmt = NULL;
1947  PGresult *res;
1948  int nfields;
1949  int rows_per_statement = dopt->dump_inserts;
1950  int rows_this_statement = 0;
1951 
1952  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
1953  "SELECT * FROM ONLY %s",
1954  fmtQualifiedDumpable(tbinfo));
1955  if (tdinfo->filtercond)
1956  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
1957 
1958  ExecuteSqlStatement(fout, q->data);
1959 
1960  while (1)
1961  {
1962  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
1963  PGRES_TUPLES_OK);
1964  nfields = PQnfields(res);
1965 
1966  /*
1967  * First time through, we build as much of the INSERT statement as
1968  * possible in "insertStmt", which we can then just print for each
1969  * statement. If the table happens to have zero columns then this will
1970  * be a complete statement, otherwise it will end in "VALUES" and be
1971  * ready to have the row's column values printed.
1972  */
1973  if (insertStmt == NULL)
1974  {
1975  TableInfo *targettab;
1976 
1977  insertStmt = createPQExpBuffer();
1978 
1979  /*
1980  * When load-via-partition-root is set, get the root table name
1981  * for the partition table, so that we can reload data through the
1982  * root table.
1983  */
1984  if (dopt->load_via_partition_root && tbinfo->ispartition)
1985  targettab = getRootTableInfo(tbinfo);
1986  else
1987  targettab = tbinfo;
1988 
1989  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
1990  fmtQualifiedDumpable(targettab));
1991 
1992  /* corner case for zero-column table */
1993  if (nfields == 0)
1994  {
1995  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
1996  }
1997  else
1998  {
1999  /* append the list of column names if required */
2000  if (dopt->column_inserts)
2001  {
2002  appendPQExpBufferChar(insertStmt, '(');
2003  for (int field = 0; field < nfields; field++)
2004  {
2005  if (field > 0)
2006  appendPQExpBufferStr(insertStmt, ", ");
2007  appendPQExpBufferStr(insertStmt,
2008  fmtId(PQfname(res, field)));
2009  }
2010  appendPQExpBufferStr(insertStmt, ") ");
2011  }
2012 
2013  if (tbinfo->needs_override)
2014  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
2015 
2016  appendPQExpBufferStr(insertStmt, "VALUES");
2017  }
2018  }
2019 
2020  for (int tuple = 0; tuple < PQntuples(res); tuple++)
2021  {
2022  /* Write the INSERT if not in the middle of a multi-row INSERT. */
2023  if (rows_this_statement == 0)
2024  archputs(insertStmt->data, fout);
2025 
2026  /*
2027  * If it is zero-column table then we've already written the
2028  * complete statement, which will mean we've disobeyed
2029  * --rows-per-insert when it's set greater than 1. We do support
2030  * a way to make this multi-row with: SELECT UNION ALL SELECT
2031  * UNION ALL ... but that's non-standard so we should avoid it
2032  * given that using INSERTs is mostly only ever needed for
2033  * cross-database exports.
2034  */
2035  if (nfields == 0)
2036  continue;
2037 
2038  /* Emit a row heading */
2039  if (rows_per_statement == 1)
2040  archputs(" (", fout);
2041  else if (rows_this_statement > 0)
2042  archputs(",\n\t(", fout);
2043  else
2044  archputs("\n\t(", fout);
2045 
2046  for (int field = 0; field < nfields; field++)
2047  {
2048  if (field > 0)
2049  archputs(", ", fout);
2050  if (tbinfo->attgenerated[field])
2051  {
2052  archputs("DEFAULT", fout);
2053  continue;
2054  }
2055  if (PQgetisnull(res, tuple, field))
2056  {
2057  archputs("NULL", fout);
2058  continue;
2059  }
2060 
2061  /* XXX This code is partially duplicated in ruleutils.c */
2062  switch (PQftype(res, field))
2063  {
2064  case INT2OID:
2065  case INT4OID:
2066  case INT8OID:
2067  case OIDOID:
2068  case FLOAT4OID:
2069  case FLOAT8OID:
2070  case NUMERICOID:
2071  {
2072  /*
2073  * These types are printed without quotes unless
2074  * they contain values that aren't accepted by the
2075  * scanner unquoted (e.g., 'NaN'). Note that
2076  * strtod() and friends might accept NaN, so we
2077  * can't use that to test.
2078  *
2079  * In reality we only need to defend against
2080  * infinity and NaN, so we need not get too crazy
2081  * about pattern matching here.
2082  */
2083  const char *s = PQgetvalue(res, tuple, field);
2084 
2085  if (strspn(s, "0123456789 +-eE.") == strlen(s))
2086  archputs(s, fout);
2087  else
2088  archprintf(fout, "'%s'", s);
2089  }
2090  break;
2091 
2092  case BITOID:
2093  case VARBITOID:
2094  archprintf(fout, "B'%s'",
2095  PQgetvalue(res, tuple, field));
2096  break;
2097 
2098  case BOOLOID:
2099  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2100  archputs("true", fout);
2101  else
2102  archputs("false", fout);
2103  break;
2104 
2105  default:
2106  /* All other types are printed as string literals. */
2107  resetPQExpBuffer(q);
2109  PQgetvalue(res, tuple, field),
2110  fout);
2111  archputs(q->data, fout);
2112  break;
2113  }
2114  }
2115 
2116  /* Terminate the row ... */
2117  archputs(")", fout);
2118 
2119  /* ... and the statement, if the target no. of rows is reached */
2120  if (++rows_this_statement >= rows_per_statement)
2121  {
2122  if (dopt->do_nothing)
2123  archputs(" ON CONFLICT DO NOTHING;\n", fout);
2124  else
2125  archputs(";\n", fout);
2126  /* Reset the row counter */
2127  rows_this_statement = 0;
2128  }
2129  }
2130 
2131  if (PQntuples(res) <= 0)
2132  {
2133  PQclear(res);
2134  break;
2135  }
2136  PQclear(res);
2137  }
2138 
2139  /* Terminate any statements that didn't make the row count. */
2140  if (rows_this_statement > 0)
2141  {
2142  if (dopt->do_nothing)
2143  archputs(" ON CONFLICT DO NOTHING;\n", fout);
2144  else
2145  archputs(";\n", fout);
2146  }
2147 
2148  archputs("\n\n", fout);
2149 
2150  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2151 
2152  destroyPQExpBuffer(q);
2153  if (insertStmt != NULL)
2154  destroyPQExpBuffer(insertStmt);
2155 
2156  return 1;
2157 }
2158 
2159 /*
2160  * getRootTableInfo:
2161  * get the root TableInfo for the given partition table.
2162  */
2163 static TableInfo *
2165 {
2166  TableInfo *parentTbinfo;
2167 
2168  Assert(tbinfo->ispartition);
2169  Assert(tbinfo->numParents == 1);
2170 
2171  parentTbinfo = tbinfo->parents[0];
2172  while (parentTbinfo->ispartition)
2173  {
2174  Assert(parentTbinfo->numParents == 1);
2175  parentTbinfo = parentTbinfo->parents[0];
2176  }
2177 
2178  return parentTbinfo;
2179 }
2180 
2181 /*
2182  * dumpTableData -
2183  * dump the contents of a single table
2184  *
2185  * Actually, this just makes an ArchiveEntry for the table contents.
2186  */
2187 static void
2189 {
2190  DumpOptions *dopt = fout->dopt;
2191  TableInfo *tbinfo = tdinfo->tdtable;
2192  PQExpBuffer copyBuf = createPQExpBuffer();
2193  PQExpBuffer clistBuf = createPQExpBuffer();
2194  DataDumperPtr dumpFn;
2195  char *copyStmt;
2196  const char *copyFrom;
2197 
2198  if (!dopt->dump_inserts)
2199  {
2200  /* Dump/restore using COPY */
2201  dumpFn = dumpTableData_copy;
2202 
2203  /*
2204  * When load-via-partition-root is set, get the root table name for
2205  * the partition table, so that we can reload data through the root
2206  * table.
2207  */
2208  if (dopt->load_via_partition_root && tbinfo->ispartition)
2209  {
2210  TableInfo *parentTbinfo;
2211 
2212  parentTbinfo = getRootTableInfo(tbinfo);
2213  copyFrom = fmtQualifiedDumpable(parentTbinfo);
2214  }
2215  else
2216  copyFrom = fmtQualifiedDumpable(tbinfo);
2217 
2218  /* must use 2 steps here 'cause fmtId is nonreentrant */
2219  appendPQExpBuffer(copyBuf, "COPY %s ",
2220  copyFrom);
2221  appendPQExpBuffer(copyBuf, "%s FROM stdin;\n",
2222  fmtCopyColumnList(tbinfo, clistBuf));
2223  copyStmt = copyBuf->data;
2224  }
2225  else
2226  {
2227  /* Restore using INSERT */
2228  dumpFn = dumpTableData_insert;
2229  copyStmt = NULL;
2230  }
2231 
2232  /*
2233  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2234  * dependency on its table as "special" and pass it to ArchiveEntry now.
2235  * See comments for BuildArchiveDependencies.
2236  */
2237  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2238  {
2239  TocEntry *te;
2240 
2241  te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2242  ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2243  .namespace = tbinfo->dobj.namespace->dobj.name,
2244  .owner = tbinfo->rolname,
2245  .description = "TABLE DATA",
2246  .section = SECTION_DATA,
2247  .copyStmt = copyStmt,
2248  .deps = &(tbinfo->dobj.dumpId),
2249  .nDeps = 1,
2250  .dumpFn = dumpFn,
2251  .dumpArg = tdinfo));
2252 
2253  /*
2254  * Set the TocEntry's dataLength in case we are doing a parallel dump
2255  * and want to order dump jobs by table size. We choose to measure
2256  * dataLength in table pages during dump, so no scaling is needed.
2257  * However, relpages is declared as "integer" in pg_class, and hence
2258  * also in TableInfo, but it's really BlockNumber a/k/a unsigned int.
2259  * Cast so that we get the right interpretation of table sizes
2260  * exceeding INT_MAX pages.
2261  */
2262  te->dataLength = (BlockNumber) tbinfo->relpages;
2263  }
2264 
2265  destroyPQExpBuffer(copyBuf);
2266  destroyPQExpBuffer(clistBuf);
2267 }
2268 
2269 /*
2270  * refreshMatViewData -
2271  * load or refresh the contents of a single materialized view
2272  *
2273  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2274  * statement.
2275  */
2276 static void
2278 {
2279  TableInfo *tbinfo = tdinfo->tdtable;
2280  PQExpBuffer q;
2281 
2282  /* If the materialized view is not flagged as populated, skip this. */
2283  if (!tbinfo->relispopulated)
2284  return;
2285 
2286  q = createPQExpBuffer();
2287 
2288  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2289  fmtQualifiedDumpable(tbinfo));
2290 
2291  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2292  ArchiveEntry(fout,
2293  tdinfo->dobj.catId, /* catalog ID */
2294  tdinfo->dobj.dumpId, /* dump ID */
2295  ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2296  .namespace = tbinfo->dobj.namespace->dobj.name,
2297  .owner = tbinfo->rolname,
2298  .description = "MATERIALIZED VIEW DATA",
2299  .section = SECTION_POST_DATA,
2300  .createStmt = q->data,
2301  .deps = tdinfo->dobj.dependencies,
2302  .nDeps = tdinfo->dobj.nDeps));
2303 
2304  destroyPQExpBuffer(q);
2305 }
2306 
2307 /*
2308  * getTableData -
2309  * set up dumpable objects representing the contents of tables
2310  */
2311 static void
2313 {
2314  int i;
2315 
2316  for (i = 0; i < numTables; i++)
2317  {
2318  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2319  (!relkind || tblinfo[i].relkind == relkind))
2320  makeTableDataInfo(dopt, &(tblinfo[i]));
2321  }
2322 }
2323 
2324 /*
2325  * Make a dumpable object for the data of this specific table
2326  *
2327  * Note: we make a TableDataInfo if and only if we are going to dump the
2328  * table data; the "dump" flag in such objects isn't used.
2329  */
2330 static void
2332 {
2333  TableDataInfo *tdinfo;
2334 
2335  /*
2336  * Nothing to do if we already decided to dump the table. This will
2337  * happen for "config" tables.
2338  */
2339  if (tbinfo->dataObj != NULL)
2340  return;
2341 
2342  /* Skip VIEWs (no data to dump) */
2343  if (tbinfo->relkind == RELKIND_VIEW)
2344  return;
2345  /* Skip FOREIGN TABLEs (no data to dump) */
2346  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2347  return;
2348  /* Skip partitioned tables (data in partitions) */
2349  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2350  return;
2351 
2352  /* Don't dump data in unlogged tables, if so requested */
2353  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2354  dopt->no_unlogged_table_data)
2355  return;
2356 
2357  /* Check that the data is not explicitly excluded */
2358  if (simple_oid_list_member(&tabledata_exclude_oids,
2359  tbinfo->dobj.catId.oid))
2360  return;
2361 
2362  /* OK, let's dump it */
2363  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2364 
2365  if (tbinfo->relkind == RELKIND_MATVIEW)
2366  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2367  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2368  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2369  else
2370  tdinfo->dobj.objType = DO_TABLE_DATA;
2371 
2372  /*
2373  * Note: use tableoid 0 so that this object won't be mistaken for
2374  * something that pg_depend entries apply to.
2375  */
2376  tdinfo->dobj.catId.tableoid = 0;
2377  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2378  AssignDumpId(&tdinfo->dobj);
2379  tdinfo->dobj.name = tbinfo->dobj.name;
2380  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2381  tdinfo->tdtable = tbinfo;
2382  tdinfo->filtercond = NULL; /* might get set later */
2383  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2384 
2385  tbinfo->dataObj = tdinfo;
2386 }
2387 
2388 /*
2389  * The refresh for a materialized view must be dependent on the refresh for
2390  * any materialized view that this one is dependent on.
2391  *
2392  * This must be called after all the objects are created, but before they are
2393  * sorted.
2394  */
2395 static void
2397 {
2398  PQExpBuffer query;
2399  PGresult *res;
2400  int ntups,
2401  i;
2402  int i_classid,
2403  i_objid,
2404  i_refobjid;
2405 
2406  /* No Mat Views before 9.3. */
2407  if (fout->remoteVersion < 90300)
2408  return;
2409 
2410  query = createPQExpBuffer();
2411 
2412  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2413  "( "
2414  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2415  "FROM pg_depend d1 "
2416  "JOIN pg_class c1 ON c1.oid = d1.objid "
2417  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2418  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2419  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2420  "AND d2.objid = r1.oid "
2421  "AND d2.refobjid <> d1.objid "
2422  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2423  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2424  CppAsString2(RELKIND_VIEW) ") "
2425  "WHERE d1.classid = 'pg_class'::regclass "
2426  "UNION "
2427  "SELECT w.objid, d3.refobjid, c3.relkind "
2428  "FROM w "
2429  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2430  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2431  "AND d3.objid = r3.oid "
2432  "AND d3.refobjid <> w.refobjid "
2433  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2434  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2435  CppAsString2(RELKIND_VIEW) ") "
2436  ") "
2437  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2438  "FROM w "
2439  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2440 
2441  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2442 
2443  ntups = PQntuples(res);
2444 
2445  i_classid = PQfnumber(res, "classid");
2446  i_objid = PQfnumber(res, "objid");
2447  i_refobjid = PQfnumber(res, "refobjid");
2448 
2449  for (i = 0; i < ntups; i++)
2450  {
2451  CatalogId objId;
2452  CatalogId refobjId;
2453  DumpableObject *dobj;
2454  DumpableObject *refdobj;
2455  TableInfo *tbinfo;
2456  TableInfo *reftbinfo;
2457 
2458  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2459  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2460  refobjId.tableoid = objId.tableoid;
2461  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2462 
2463  dobj = findObjectByCatalogId(objId);
2464  if (dobj == NULL)
2465  continue;
2466 
2467  Assert(dobj->objType == DO_TABLE);
2468  tbinfo = (TableInfo *) dobj;
2469  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2470  dobj = (DumpableObject *) tbinfo->dataObj;
2471  if (dobj == NULL)
2472  continue;
2473  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2474 
2475  refdobj = findObjectByCatalogId(refobjId);
2476  if (refdobj == NULL)
2477  continue;
2478 
2479  Assert(refdobj->objType == DO_TABLE);
2480  reftbinfo = (TableInfo *) refdobj;
2481  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2482  refdobj = (DumpableObject *) reftbinfo->dataObj;
2483  if (refdobj == NULL)
2484  continue;
2485  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2486 
2487  addObjectDependency(dobj, refdobj->dumpId);
2488 
2489  if (!reftbinfo->relispopulated)
2490  tbinfo->relispopulated = false;
2491  }
2492 
2493  PQclear(res);
2494 
2495  destroyPQExpBuffer(query);
2496 }
2497 
2498 /*
2499  * getTableDataFKConstraints -
2500  * add dump-order dependencies reflecting foreign key constraints
2501  *
2502  * This code is executed only in a data-only dump --- in schema+data dumps
2503  * we handle foreign key issues by not creating the FK constraints until
2504  * after the data is loaded. In a data-only dump, however, we want to
2505  * order the table data objects in such a way that a table's referenced
2506  * tables are restored first. (In the presence of circular references or
2507  * self-references this may be impossible; we'll detect and complain about
2508  * that during the dependency sorting step.)
2509  */
2510 static void
2512 {
2513  DumpableObject **dobjs;
2514  int numObjs;
2515  int i;
2516 
2517  /* Search through all the dumpable objects for FK constraints */
2518  getDumpableObjects(&dobjs, &numObjs);
2519  for (i = 0; i < numObjs; i++)
2520  {
2521  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2522  {
2523  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2524  TableInfo *ftable;
2525 
2526  /* Not interesting unless both tables are to be dumped */
2527  if (cinfo->contable == NULL ||
2528  cinfo->contable->dataObj == NULL)
2529  continue;
2530  ftable = findTableByOid(cinfo->confrelid);
2531  if (ftable == NULL ||
2532  ftable->dataObj == NULL)
2533  continue;
2534 
2535  /*
2536  * Okay, make referencing table's TABLE_DATA object depend on the
2537  * referenced table's TABLE_DATA object.
2538  */
2540  ftable->dataObj->dobj.dumpId);
2541  }
2542  }
2543  free(dobjs);
2544 }
2545 
2546 
2547 /*
2548  * guessConstraintInheritance:
2549  * In pre-8.4 databases, we can't tell for certain which constraints
2550  * are inherited. We assume a CHECK constraint is inherited if its name
2551  * matches the name of any constraint in the parent. Originally this code
2552  * tried to compare the expression texts, but that can fail for various
2553  * reasons --- for example, if the parent and child tables are in different
2554  * schemas, reverse-listing of function calls may produce different text
2555  * (schema-qualified or not) depending on search path.
2556  *
2557  * In 8.4 and up we can rely on the conislocal field to decide which
2558  * constraints must be dumped; much safer.
2559  *
2560  * This function assumes all conislocal flags were initialized to true.
2561  * It clears the flag on anything that seems to be inherited.
2562  */
2563 static void
2565 {
2566  int i,
2567  j,
2568  k;
2569 
2570  for (i = 0; i < numTables; i++)
2571  {
2572  TableInfo *tbinfo = &(tblinfo[i]);
2573  int numParents;
2574  TableInfo **parents;
2575  TableInfo *parent;
2576 
2577  /* Sequences and views never have parents */
2578  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2579  tbinfo->relkind == RELKIND_VIEW)
2580  continue;
2581 
2582  /* Don't bother computing anything for non-target tables, either */
2583  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2584  continue;
2585 
2586  numParents = tbinfo->numParents;
2587  parents = tbinfo->parents;
2588 
2589  if (numParents == 0)
2590  continue; /* nothing to see here, move along */
2591 
2592  /* scan for inherited CHECK constraints */
2593  for (j = 0; j < tbinfo->ncheck; j++)
2594  {
2595  ConstraintInfo *constr;
2596 
2597  constr = &(tbinfo->checkexprs[j]);
2598 
2599  for (k = 0; k < numParents; k++)
2600  {
2601  int l;
2602 
2603  parent = parents[k];
2604  for (l = 0; l < parent->ncheck; l++)
2605  {
2606  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2607 
2608  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2609  {
2610  constr->conislocal = false;
2611  break;
2612  }
2613  }
2614  if (!constr->conislocal)
2615  break;
2616  }
2617  }
2618  }
2619 }
2620 
2621 
2622 /*
2623  * dumpDatabase:
2624  * dump the database definition
2625  */
2626 static void
2628 {
2629  DumpOptions *dopt = fout->dopt;
2630  PQExpBuffer dbQry = createPQExpBuffer();
2631  PQExpBuffer delQry = createPQExpBuffer();
2632  PQExpBuffer creaQry = createPQExpBuffer();
2633  PQExpBuffer labelq = createPQExpBuffer();
2634  PGconn *conn = GetConnection(fout);
2635  PGresult *res;
2636  int i_tableoid,
2637  i_oid,
2638  i_datname,
2639  i_dba,
2640  i_encoding,
2641  i_collate,
2642  i_ctype,
2643  i_frozenxid,
2644  i_minmxid,
2645  i_datacl,
2646  i_rdatacl,
2647  i_datistemplate,
2648  i_datconnlimit,
2649  i_tablespace;
2650  CatalogId dbCatId;
2651  DumpId dbDumpId;
2652  const char *datname,
2653  *dba,
2654  *encoding,
2655  *collate,
2656  *ctype,
2657  *datacl,
2658  *rdatacl,
2659  *datistemplate,
2660  *datconnlimit,
2661  *tablespace;
2662  uint32 frozenxid,
2663  minmxid;
2664  char *qdatname;
2665 
2666  pg_log_info("saving database definition");
2667 
2668  /*
2669  * Fetch the database-level properties for this database.
2670  *
2671  * The order in which privileges are in the ACL string (the order they
2672  * have been GRANT'd in, which the backend maintains) must be preserved to
2673  * ensure that GRANTs WITH GRANT OPTION and subsequent GRANTs based on
2674  * those are dumped in the correct order. Note that initial privileges
2675  * (pg_init_privs) are not supported on databases, so this logic cannot
2676  * make use of buildACLQueries().
2677  */
2678  if (fout->remoteVersion >= 90600)
2679  {
2680  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2681  "(%s datdba) AS dba, "
2682  "pg_encoding_to_char(encoding) AS encoding, "
2683  "datcollate, datctype, datfrozenxid, datminmxid, "
2684  "(SELECT array_agg(acl ORDER BY row_n) FROM "
2685  " (SELECT acl, row_n FROM "
2686  " unnest(coalesce(datacl,acldefault('d',datdba))) "
2687  " WITH ORDINALITY AS perm(acl,row_n) "
2688  " WHERE NOT EXISTS ( "
2689  " SELECT 1 "
2690  " FROM unnest(acldefault('d',datdba)) "
2691  " AS init(init_acl) "
2692  " WHERE acl = init_acl)) AS datacls) "
2693  " AS datacl, "
2694  "(SELECT array_agg(acl ORDER BY row_n) FROM "
2695  " (SELECT acl, row_n FROM "
2696  " unnest(acldefault('d',datdba)) "
2697  " WITH ORDINALITY AS initp(acl,row_n) "
2698  " WHERE NOT EXISTS ( "
2699  " SELECT 1 "
2700  " FROM unnest(coalesce(datacl,acldefault('d',datdba))) "
2701  " AS permp(orig_acl) "
2702  " WHERE acl = orig_acl)) AS rdatacls) "
2703  " AS rdatacl, "
2704  "datistemplate, datconnlimit, "
2705  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2706  "shobj_description(oid, 'pg_database') AS description "
2707 
2708  "FROM pg_database "
2709  "WHERE datname = current_database()",
2711  }
2712  else if (fout->remoteVersion >= 90300)
2713  {
2714  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2715  "(%s datdba) AS dba, "
2716  "pg_encoding_to_char(encoding) AS encoding, "
2717  "datcollate, datctype, datfrozenxid, datminmxid, "
2718  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2719  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2720  "shobj_description(oid, 'pg_database') AS description "
2721 
2722  "FROM pg_database "
2723  "WHERE datname = current_database()",
2725  }
2726  else if (fout->remoteVersion >= 80400)
2727  {
2728  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2729  "(%s datdba) AS dba, "
2730  "pg_encoding_to_char(encoding) AS encoding, "
2731  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2732  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2733  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2734  "shobj_description(oid, 'pg_database') AS description "
2735 
2736  "FROM pg_database "
2737  "WHERE datname = current_database()",
2739  }
2740  else if (fout->remoteVersion >= 80200)
2741  {
2742  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2743  "(%s datdba) AS dba, "
2744  "pg_encoding_to_char(encoding) AS encoding, "
2745  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2746  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2747  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2748  "shobj_description(oid, 'pg_database') AS description "
2749 
2750  "FROM pg_database "
2751  "WHERE datname = current_database()",
2753  }
2754  else
2755  {
2756  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2757  "(%s datdba) AS dba, "
2758  "pg_encoding_to_char(encoding) AS encoding, "
2759  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2760  "datacl, '' as rdatacl, datistemplate, "
2761  "-1 as datconnlimit, "
2762  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2763  "FROM pg_database "
2764  "WHERE datname = current_database()",
2766  }
2767 
2768  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2769 
2770  i_tableoid = PQfnumber(res, "tableoid");
2771  i_oid = PQfnumber(res, "oid");
2772  i_datname = PQfnumber(res, "datname");
2773  i_dba = PQfnumber(res, "dba");
2774  i_encoding = PQfnumber(res, "encoding");
2775  i_collate = PQfnumber(res, "datcollate");
2776  i_ctype = PQfnumber(res, "datctype");
2777  i_frozenxid = PQfnumber(res, "datfrozenxid");
2778  i_minmxid = PQfnumber(res, "datminmxid");
2779  i_datacl = PQfnumber(res, "datacl");
2780  i_rdatacl = PQfnumber(res, "rdatacl");
2781  i_datistemplate = PQfnumber(res, "datistemplate");
2782  i_datconnlimit = PQfnumber(res, "datconnlimit");
2783  i_tablespace = PQfnumber(res, "tablespace");
2784 
2785  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2786  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2787  datname = PQgetvalue(res, 0, i_datname);
2788  dba = PQgetvalue(res, 0, i_dba);
2789  encoding = PQgetvalue(res, 0, i_encoding);
2790  collate = PQgetvalue(res, 0, i_collate);
2791  ctype = PQgetvalue(res, 0, i_ctype);
2792  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2793  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2794  datacl = PQgetvalue(res, 0, i_datacl);
2795  rdatacl = PQgetvalue(res, 0, i_rdatacl);
2796  datistemplate = PQgetvalue(res, 0, i_datistemplate);
2797  datconnlimit = PQgetvalue(res, 0, i_datconnlimit);
2798  tablespace = PQgetvalue(res, 0, i_tablespace);
2799 
2800  qdatname = pg_strdup(fmtId(datname));
2801 
2802  /*
2803  * Prepare the CREATE DATABASE command. We must specify encoding, locale,
2804  * and tablespace since those can't be altered later. Other DB properties
2805  * are left to the DATABASE PROPERTIES entry, so that they can be applied
2806  * after reconnecting to the target DB.
2807  */
2808  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2809  qdatname);
2810  if (strlen(encoding) > 0)
2811  {
2812  appendPQExpBufferStr(creaQry, " ENCODING = ");
2813  appendStringLiteralAH(creaQry, encoding, fout);
2814  }
2815  if (strlen(collate) > 0 && strcmp(collate, ctype) == 0)
2816  {
2817  appendPQExpBufferStr(creaQry, " LOCALE = ");
2818  appendStringLiteralAH(creaQry, collate, fout);
2819  }
2820  else
2821  {
2822  if (strlen(collate) > 0)
2823  {
2824  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2825  appendStringLiteralAH(creaQry, collate, fout);
2826  }
2827  if (strlen(ctype) > 0)
2828  {
2829  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2830  appendStringLiteralAH(creaQry, ctype, fout);
2831  }
2832  }
2833 
2834  /*
2835  * Note: looking at dopt->outputNoTablespaces here is completely the wrong
2836  * thing; the decision whether to specify a tablespace should be left till
2837  * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
2838  * label the DATABASE entry with the tablespace and let the normal
2839  * tablespace selection logic work ... but CREATE DATABASE doesn't pay
2840  * attention to default_tablespace, so that won't work.
2841  */
2842  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2843  !dopt->outputNoTablespaces)
2844  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2845  fmtId(tablespace));
2846  appendPQExpBufferStr(creaQry, ";\n");
2847 
2848  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2849  qdatname);
2850 
2851  dbDumpId = createDumpId();
2852 
2853  ArchiveEntry(fout,
2854  dbCatId, /* catalog ID */
2855  dbDumpId, /* dump ID */
2856  ARCHIVE_OPTS(.tag = datname,
2857  .owner = dba,
2858  .description = "DATABASE",
2859  .section = SECTION_PRE_DATA,
2860  .createStmt = creaQry->data,
2861  .dropStmt = delQry->data));
2862 
2863  /* Compute correct tag for archive entry */
2864  appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
2865 
2866  /* Dump DB comment if any */
2867  if (fout->remoteVersion >= 80200)
2868  {
2869  /*
2870  * 8.2 and up keep comments on shared objects in a shared table, so we
2871  * cannot use the dumpComment() code used for other database objects.
2872  * Be careful that the ArchiveEntry parameters match that function.
2873  */
2874  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2875 
2876  if (comment && *comment && !dopt->no_comments)
2877  {
2878  resetPQExpBuffer(dbQry);
2879 
2880  /*
2881  * Generates warning when loaded into a differently-named
2882  * database.
2883  */
2884  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
2885  appendStringLiteralAH(dbQry, comment, fout);
2886  appendPQExpBufferStr(dbQry, ";\n");
2887 
2888  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2889  ARCHIVE_OPTS(.tag = labelq->data,
2890  .owner = dba,
2891  .description = "COMMENT",
2892  .section = SECTION_NONE,
2893  .createStmt = dbQry->data,
2894  .deps = &dbDumpId,
2895  .nDeps = 1));
2896  }
2897  }
2898  else
2899  {
2900  dumpComment(fout, "DATABASE", qdatname, NULL, dba,
2901  dbCatId, 0, dbDumpId);
2902  }
2903 
2904  /* Dump DB security label, if enabled */
2905  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2906  {
2907  PGresult *shres;
2908  PQExpBuffer seclabelQry;
2909 
2910  seclabelQry = createPQExpBuffer();
2911 
2912  buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry);
2913  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2914  resetPQExpBuffer(seclabelQry);
2915  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2916  if (seclabelQry->len > 0)
2917  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2918  ARCHIVE_OPTS(.tag = labelq->data,
2919  .owner = dba,
2920  .description = "SECURITY LABEL",
2921  .section = SECTION_NONE,
2922  .createStmt = seclabelQry->data,
2923  .deps = &dbDumpId,
2924  .nDeps = 1));
2925  destroyPQExpBuffer(seclabelQry);
2926  PQclear(shres);
2927  }
2928 
2929  /*
2930  * Dump ACL if any. Note that we do not support initial privileges
2931  * (pg_init_privs) on databases.
2932  */
2933  dumpACL(fout, dbCatId, dbDumpId, "DATABASE",
2934  qdatname, NULL, NULL,
2935  dba, datacl, rdatacl, "", "");
2936 
2937  /*
2938  * Now construct a DATABASE PROPERTIES archive entry to restore any
2939  * non-default database-level properties. (The reason this must be
2940  * separate is that we cannot put any additional commands into the TOC
2941  * entry that has CREATE DATABASE. pg_restore would execute such a group
2942  * in an implicit transaction block, and the backend won't allow CREATE
2943  * DATABASE in that context.)
2944  */
2945  resetPQExpBuffer(creaQry);
2946  resetPQExpBuffer(delQry);
2947 
2948  if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
2949  appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
2950  qdatname, datconnlimit);
2951 
2952  if (strcmp(datistemplate, "t") == 0)
2953  {
2954  appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
2955  qdatname);
2956 
2957  /*
2958  * The backend won't accept DROP DATABASE on a template database. We
2959  * can deal with that by removing the template marking before the DROP
2960  * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
2961  * since no such command is currently supported, fake it with a direct
2962  * UPDATE on pg_database.
2963  */
2964  appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
2965  "SET datistemplate = false WHERE datname = ");
2966  appendStringLiteralAH(delQry, datname, fout);
2967  appendPQExpBufferStr(delQry, ";\n");
2968  }
2969 
2970  /* Add database-specific SET options */
2971  dumpDatabaseConfig(fout, creaQry, datname, dbCatId.oid);
2972 
2973  /*
2974  * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
2975  * entry, too, for lack of a better place.
2976  */
2977  if (dopt->binary_upgrade)
2978  {
2979  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
2980  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
2981  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
2982  "WHERE datname = ",
2983  frozenxid, minmxid);
2984  appendStringLiteralAH(creaQry, datname, fout);
2985  appendPQExpBufferStr(creaQry, ";\n");
2986  }
2987 
2988  if (creaQry->len > 0)
2989  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2990  ARCHIVE_OPTS(.tag = datname,
2991  .owner = dba,
2992  .description = "DATABASE PROPERTIES",
2993  .section = SECTION_PRE_DATA,
2994  .createStmt = creaQry->data,
2995  .dropStmt = delQry->data,
2996  .deps = &dbDumpId));
2997 
2998  /*
2999  * pg_largeobject comes from the old system intact, so set its
3000  * relfrozenxids and relminmxids.
3001  */
3002  if (dopt->binary_upgrade)
3003  {
3004  PGresult *lo_res;
3005  PQExpBuffer loFrozenQry = createPQExpBuffer();
3006  PQExpBuffer loOutQry = createPQExpBuffer();
3007  int i_relfrozenxid,
3008  i_relminmxid;
3009 
3010  /*
3011  * pg_largeobject
3012  */
3013  if (fout->remoteVersion >= 90300)
3014  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
3015  "FROM pg_catalog.pg_class\n"
3016  "WHERE oid = %u;\n",
3017  LargeObjectRelationId);
3018  else
3019  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
3020  "FROM pg_catalog.pg_class\n"
3021  "WHERE oid = %u;\n",
3022  LargeObjectRelationId);
3023 
3024  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
3025 
3026  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
3027  i_relminmxid = PQfnumber(lo_res, "relminmxid");
3028 
3029  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
3030  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
3031  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
3032  "WHERE oid = %u;\n",
3033  atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
3034  atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
3035  LargeObjectRelationId);
3036  ArchiveEntry(fout, nilCatalogId, createDumpId(),
3037  ARCHIVE_OPTS(.tag = "pg_largeobject",
3038  .description = "pg_largeobject",
3039  .section = SECTION_PRE_DATA,
3040  .createStmt = loOutQry->data));
3041 
3042  PQclear(lo_res);
3043 
3044  destroyPQExpBuffer(loFrozenQry);
3045  destroyPQExpBuffer(loOutQry);
3046  }
3047 
3048  PQclear(res);
3049 
3050  free(qdatname);
3051  destroyPQExpBuffer(dbQry);
3052  destroyPQExpBuffer(delQry);
3053  destroyPQExpBuffer(creaQry);
3054  destroyPQExpBuffer(labelq);
3055 }
3056 
3057 /*
3058  * Collect any database-specific or role-and-database-specific SET options
3059  * for this database, and append them to outbuf.
3060  */
3061 static void
3063  const char *dbname, Oid dboid)
3064 {
3065  PGconn *conn = GetConnection(AH);
3067  PGresult *res;
3068  int count = 1;
3069 
3070  /*
3071  * First collect database-specific options. Pre-8.4 server versions lack
3072  * unnest(), so we do this the hard way by querying once per subscript.
3073  */
3074  for (;;)
3075  {
3076  if (AH->remoteVersion >= 90000)
3077  printfPQExpBuffer(buf, "SELECT setconfig[%d] FROM pg_db_role_setting "
3078  "WHERE setrole = 0 AND setdatabase = '%u'::oid",
3079  count, dboid);
3080  else
3081  printfPQExpBuffer(buf, "SELECT datconfig[%d] FROM pg_database WHERE oid = '%u'::oid", count, dboid);
3082 
3083  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3084 
3085  if (PQntuples(res) == 1 &&
3086  !PQgetisnull(res, 0, 0))
3087  {
3088  makeAlterConfigCommand(conn, PQgetvalue(res, 0, 0),
3089  "DATABASE", dbname, NULL, NULL,
3090  outbuf);
3091  PQclear(res);
3092  count++;
3093  }
3094  else
3095  {
3096  PQclear(res);
3097  break;
3098  }
3099  }
3100 
3101  /* Now look for role-and-database-specific options */
3102  if (AH->remoteVersion >= 90000)
3103  {
3104  /* Here we can assume we have unnest() */
3105  printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
3106  "FROM pg_db_role_setting s, pg_roles r "
3107  "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
3108  dboid);
3109 
3110  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3111 
3112  if (PQntuples(res) > 0)
3113  {
3114  int i;
3115 
3116  for (i = 0; i < PQntuples(res); i++)
3117  makeAlterConfigCommand(conn, PQgetvalue(res, i, 1),
3118  "ROLE", PQgetvalue(res, i, 0),
3119  "DATABASE", dbname,
3120  outbuf);
3121  }
3122 
3123  PQclear(res);
3124  }
3125 
3126  destroyPQExpBuffer(buf);
3127 }
3128 
3129 /*
3130  * dumpEncoding: put the correct encoding into the archive
3131  */
3132 static void
3134 {
3135  const char *encname = pg_encoding_to_char(AH->encoding);
3137 
3138  pg_log_info("saving encoding = %s", encname);
3139 
3140  appendPQExpBufferStr(qry, "SET client_encoding = ");
3141  appendStringLiteralAH(qry, encname, AH);
3142  appendPQExpBufferStr(qry, ";\n");
3143 
3144  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3145  ARCHIVE_OPTS(.tag = "ENCODING",
3146  .description = "ENCODING",
3147  .section = SECTION_PRE_DATA,
3148  .createStmt = qry->data));
3149 
3150  destroyPQExpBuffer(qry);
3151 }
3152 
3153 
3154 /*
3155  * dumpStdStrings: put the correct escape string behavior into the archive
3156  */
3157 static void
3159 {
3160  const char *stdstrings = AH->std_strings ? "on" : "off";
3162 
3163  pg_log_info("saving standard_conforming_strings = %s",
3164  stdstrings);
3165 
3166  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3167  stdstrings);
3168 
3169  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3170  ARCHIVE_OPTS(.tag = "STDSTRINGS",
3171  .description = "STDSTRINGS",
3172  .section = SECTION_PRE_DATA,
3173  .createStmt = qry->data));
3174 
3175  destroyPQExpBuffer(qry);
3176 }
3177 
3178 /*
3179  * dumpSearchPath: record the active search_path in the archive
3180  */
3181 static void
3183 {
3185  PQExpBuffer path = createPQExpBuffer();
3186  PGresult *res;
3187  char **schemanames = NULL;
3188  int nschemanames = 0;
3189  int i;
3190 
3191  /*
3192  * We use the result of current_schemas(), not the search_path GUC,
3193  * because that might contain wildcards such as "$user", which won't
3194  * necessarily have the same value during restore. Also, this way avoids
3195  * listing schemas that may appear in search_path but not actually exist,
3196  * which seems like a prudent exclusion.
3197  */
3198  res = ExecuteSqlQueryForSingleRow(AH,
3199  "SELECT pg_catalog.current_schemas(false)");
3200 
3201  if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3202  fatal("could not parse result of current_schemas()");
3203 
3204  /*
3205  * We use set_config(), not a simple "SET search_path" command, because
3206  * the latter has less-clean behavior if the search path is empty. While
3207  * that's likely to get fixed at some point, it seems like a good idea to
3208  * be as backwards-compatible as possible in what we put into archives.
3209  */
3210  for (i = 0; i < nschemanames; i++)
3211  {
3212  if (i > 0)
3213  appendPQExpBufferStr(path, ", ");
3214  appendPQExpBufferStr(path, fmtId(schemanames[i]));
3215  }
3216 
3217  appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3218  appendStringLiteralAH(qry, path->data, AH);
3219  appendPQExpBufferStr(qry, ", false);\n");
3220 
3221  pg_log_info("saving search_path = %s", path->data);
3222 
3223  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3224  ARCHIVE_OPTS(.tag = "SEARCHPATH",
3225  .description = "SEARCHPATH",
3226  .section = SECTION_PRE_DATA,
3227  .createStmt = qry->data));
3228 
3229  /* Also save it in AH->searchpath, in case we're doing plain text dump */
3230  AH->searchpath = pg_strdup(qry->data);
3231 
3232  if (schemanames)
3233  free(schemanames);
3234  PQclear(res);
3235  destroyPQExpBuffer(qry);
3236  destroyPQExpBuffer(path);
3237 }
3238 
3239 
3240 /*
3241  * getBlobs:
3242  * Collect schema-level data about large objects
3243  */
3244 static void
3246 {
3247  DumpOptions *dopt = fout->dopt;
3248  PQExpBuffer blobQry = createPQExpBuffer();
3249  BlobInfo *binfo;
3250  DumpableObject *bdata;
3251  PGresult *res;
3252  int ntups;
3253  int i;
3254  int i_oid;
3255  int i_lomowner;
3256  int i_lomacl;
3257  int i_rlomacl;
3258  int i_initlomacl;
3259  int i_initrlomacl;
3260 
3261  pg_log_info("reading large objects");
3262 
3263  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
3264  if (fout->remoteVersion >= 90600)
3265  {
3266  PQExpBuffer acl_subquery = createPQExpBuffer();
3267  PQExpBuffer racl_subquery = createPQExpBuffer();
3268  PQExpBuffer init_acl_subquery = createPQExpBuffer();
3269  PQExpBuffer init_racl_subquery = createPQExpBuffer();
3270 
3271  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
3272  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
3273  dopt->binary_upgrade);
3274 
3275  appendPQExpBuffer(blobQry,
3276  "SELECT l.oid, (%s l.lomowner) AS rolname, "
3277  "%s AS lomacl, "
3278  "%s AS rlomacl, "
3279  "%s AS initlomacl, "
3280  "%s AS initrlomacl "
3281  "FROM pg_largeobject_metadata l "
3282  "LEFT JOIN pg_init_privs pip ON "
3283  "(l.oid = pip.objoid "
3284  "AND pip.classoid = 'pg_largeobject'::regclass "
3285  "AND pip.objsubid = 0) ",
3287  acl_subquery->data,
3288  racl_subquery->data,
3289  init_acl_subquery->data,
3290  init_racl_subquery->data);
3291 
3292  destroyPQExpBuffer(acl_subquery);
3293  destroyPQExpBuffer(racl_subquery);
3294  destroyPQExpBuffer(init_acl_subquery);
3295  destroyPQExpBuffer(init_racl_subquery);
3296  }
3297  else if (fout->remoteVersion >= 90000)
3298  appendPQExpBuffer(blobQry,
3299  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
3300  "NULL AS rlomacl, NULL AS initlomacl, "
3301  "NULL AS initrlomacl "
3302  " FROM pg_largeobject_metadata",
3304  else
3305  appendPQExpBufferStr(blobQry,
3306  "SELECT DISTINCT loid AS oid, "
3307  "NULL::name AS rolname, NULL::oid AS lomacl, "
3308  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
3309  "NULL::oid AS initrlomacl "
3310  " FROM pg_largeobject");
3311 
3312  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
3313 
3314  i_oid = PQfnumber(res, "oid");
3315  i_lomowner = PQfnumber(res, "rolname");
3316  i_lomacl = PQfnumber(res, "lomacl");
3317  i_rlomacl = PQfnumber(res, "rlomacl");
3318  i_initlomacl = PQfnumber(res, "initlomacl");
3319  i_initrlomacl = PQfnumber(res, "initrlomacl");
3320 
3321  ntups = PQntuples(res);
3322 
3323  /*
3324  * Each large object has its own BLOB archive entry.
3325  */
3326  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
3327 
3328  for (i = 0; i < ntups; i++)
3329  {
3330  binfo[i].dobj.objType = DO_BLOB;
3331  binfo[i].dobj.catId.tableoid = LargeObjectRelationId;
3332  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3333  AssignDumpId(&binfo[i].dobj);
3334 
3335  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
3336  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
3337  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
3338  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
3339  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
3340  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
3341 
3342  if (PQgetisnull(res, i, i_lomacl) &&
3343  PQgetisnull(res, i, i_rlomacl) &&
3344  PQgetisnull(res, i, i_initlomacl) &&
3345  PQgetisnull(res, i, i_initrlomacl))
3346  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
3347 
3348  /*
3349  * In binary-upgrade mode for blobs, we do *not* dump out the blob
3350  * data, as it will be copied by pg_upgrade, which simply copies the
3351  * pg_largeobject table. We *do* however dump out anything but the
3352  * data, as pg_upgrade copies just pg_largeobject, but not
3353  * pg_largeobject_metadata, after the dump is restored.
3354  */
3355  if (dopt->binary_upgrade)
3356  binfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
3357  }
3358 
3359  /*
3360  * If we have any large objects, a "BLOBS" archive entry is needed. This
3361  * is just a placeholder for sorting; it carries no data now.
3362  */
3363  if (ntups > 0)
3364  {
3365  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
3366  bdata->objType = DO_BLOB_DATA;
3367  bdata->catId = nilCatalogId;
3368  AssignDumpId(bdata);
3369  bdata->name = pg_strdup("BLOBS");
3370  }
3371 
3372  PQclear(res);
3373  destroyPQExpBuffer(blobQry);
3374 }
3375 
3376 /*
3377  * dumpBlob
3378  *
3379  * dump the definition (metadata) of the given large object
3380  */
3381 static void
3382 dumpBlob(Archive *fout, BlobInfo *binfo)
3383 {
3384  PQExpBuffer cquery = createPQExpBuffer();
3385  PQExpBuffer dquery = createPQExpBuffer();
3386 
3387  appendPQExpBuffer(cquery,
3388  "SELECT pg_catalog.lo_create('%s');\n",
3389  binfo->dobj.name);
3390 
3391  appendPQExpBuffer(dquery,
3392  "SELECT pg_catalog.lo_unlink('%s');\n",
3393  binfo->dobj.name);
3394 
3395  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
3396  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
3397  ARCHIVE_OPTS(.tag = binfo->dobj.name,
3398  .owner = binfo->rolname,
3399  .description = "BLOB",
3400  .section = SECTION_PRE_DATA,
3401  .createStmt = cquery->data,
3402  .dropStmt = dquery->data));
3403 
3404  /* Dump comment if any */
3405  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3406  dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
3407  NULL, binfo->rolname,
3408  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3409 
3410  /* Dump security label if any */
3411  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3412  dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
3413  NULL, binfo->rolname,
3414  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3415 
3416  /* Dump ACL if any */
3417  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3418  dumpACL(fout, binfo->dobj.catId, binfo->dobj.dumpId, "LARGE OBJECT",
3419  binfo->dobj.name, NULL,
3420  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3421  binfo->initblobacl, binfo->initrblobacl);
3422 
3423  destroyPQExpBuffer(cquery);
3424  destroyPQExpBuffer(dquery);
3425 }
3426 
3427 /*
3428  * dumpBlobs:
3429  * dump the data contents of all large objects
3430  */
3431 static int
3432 dumpBlobs(Archive *fout, void *arg)
3433 {
3434  const char *blobQry;
3435  const char *blobFetchQry;
3436  PGconn *conn = GetConnection(fout);
3437  PGresult *res;
3438  char buf[LOBBUFSIZE];
3439  int ntups;
3440  int i;
3441  int cnt;
3442 
3443  pg_log_info("saving large objects");
3444 
3445  /*
3446  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3447  * the already-in-memory dumpable objects instead...
3448  */
3449  if (fout->remoteVersion >= 90000)
3450  blobQry =
3451  "DECLARE bloboid CURSOR FOR "
3452  "SELECT oid FROM pg_largeobject_metadata ORDER BY 1";
3453  else
3454  blobQry =
3455  "DECLARE bloboid CURSOR FOR "
3456  "SELECT DISTINCT loid FROM pg_largeobject ORDER BY 1";
3457 
3458  ExecuteSqlStatement(fout, blobQry);
3459 
3460  /* Command to fetch from cursor */
3461  blobFetchQry = "FETCH 1000 IN bloboid";
3462 
3463  do
3464  {
3465  /* Do a fetch */
3466  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3467 
3468  /* Process the tuples, if any */
3469  ntups = PQntuples(res);
3470  for (i = 0; i < ntups; i++)
3471  {
3472  Oid blobOid;
3473  int loFd;
3474 
3475  blobOid = atooid(PQgetvalue(res, i, 0));
3476  /* Open the BLOB */
3477  loFd = lo_open(conn, blobOid, INV_READ);
3478  if (loFd == -1)
3479  fatal("could not open large object %u: %s",
3480  blobOid, PQerrorMessage(conn));
3481 
3482  StartBlob(fout, blobOid);
3483 
3484  /* Now read it in chunks, sending data to archive */
3485  do
3486  {
3487  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3488  if (cnt < 0)
3489  fatal("error reading large object %u: %s",
3490  blobOid, PQerrorMessage(conn));
3491 
3492  WriteData(fout, buf, cnt);
3493  } while (cnt > 0);
3494 
3495  lo_close(conn, loFd);
3496 
3497  EndBlob(fout, blobOid);
3498  }
3499 
3500  PQclear(res);
3501  } while (ntups > 0);
3502 
3503  return 1;
3504 }
3505 
3506 /*
3507  * getPolicies
3508  * get information about policies on a dumpable table.
3509  */
3510 void
3511 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3512 {
3513  PQExpBuffer query;
3514  PGresult *res;
3515  PolicyInfo *polinfo;
3516  int i_oid;
3517  int i_tableoid;
3518  int i_polname;
3519  int i_polcmd;
3520  int i_polpermissive;
3521  int i_polroles;
3522  int i_polqual;
3523  int i_polwithcheck;
3524  int i,
3525  j,
3526  ntups;
3527 
3528  if (fout->remoteVersion < 90500)
3529  return;
3530 
3531  query = createPQExpBuffer();
3532 
3533  for (i = 0; i < numTables; i++)
3534  {
3535  TableInfo *tbinfo = &tblinfo[i];
3536 
3537  /* Ignore row security on tables not to be dumped */
3538  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3539  continue;
3540 
3541  pg_log_info("reading row security enabled for table \"%s.%s\"",
3542  tbinfo->dobj.namespace->dobj.name,
3543  tbinfo->dobj.name);
3544 
3545  /*
3546  * Get row security enabled information for the table. We represent
3547  * RLS being enabled on a table by creating a PolicyInfo object with
3548  * null polname.
3549  */
3550  if (tbinfo->rowsec)
3551  {
3552  /*
3553  * Note: use tableoid 0 so that this object won't be mistaken for
3554  * something that pg_depend entries apply to.
3555  */
3556  polinfo = pg_malloc(sizeof(PolicyInfo));
3557  polinfo->dobj.objType = DO_POLICY;
3558  polinfo->dobj.catId.tableoid = 0;
3559  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3560  AssignDumpId(&polinfo->dobj);
3561  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3562  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3563  polinfo->poltable = tbinfo;
3564  polinfo->polname = NULL;
3565  polinfo->polcmd = '\0';
3566  polinfo->polpermissive = 0;
3567  polinfo->polroles = NULL;
3568  polinfo->polqual = NULL;
3569  polinfo->polwithcheck = NULL;
3570  }
3571 
3572  pg_log_info("reading policies for table \"%s.%s\"",
3573  tbinfo->dobj.namespace->dobj.name,
3574  tbinfo->dobj.name);
3575 
3576  resetPQExpBuffer(query);
3577 
3578  /* Get the policies for the table. */
3579  if (fout->remoteVersion >= 100000)
3580  appendPQExpBuffer(query,
3581  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3582  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3583  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3584  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3585  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3586  "FROM pg_catalog.pg_policy pol "
3587  "WHERE polrelid = '%u'",
3588  tbinfo->dobj.catId.oid);
3589  else
3590  appendPQExpBuffer(query,
3591  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3592  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3593  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3594  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3595  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3596  "FROM pg_catalog.pg_policy pol "
3597  "WHERE polrelid = '%u'",
3598  tbinfo->dobj.catId.oid);
3599  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3600 
3601  ntups = PQntuples(res);
3602 
3603  if (ntups == 0)
3604  {
3605  /*
3606  * No explicit policies to handle (only the default-deny policy,
3607  * which is handled as part of the table definition). Clean up
3608  * and return.
3609  */
3610  PQclear(res);
3611  continue;
3612  }
3613 
3614  i_oid = PQfnumber(res, "oid");
3615  i_tableoid = PQfnumber(res, "tableoid");
3616  i_polname = PQfnumber(res, "polname");
3617  i_polcmd = PQfnumber(res, "polcmd");
3618  i_polpermissive = PQfnumber(res, "polpermissive");
3619  i_polroles = PQfnumber(res, "polroles");
3620  i_polqual = PQfnumber(res, "polqual");
3621  i_polwithcheck = PQfnumber(res, "polwithcheck");
3622 
3623  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3624 
3625  for (j = 0; j < ntups; j++)
3626  {
3627  polinfo[j].dobj.objType = DO_POLICY;
3628  polinfo[j].dobj.catId.tableoid =
3629  atooid(PQgetvalue(res, j, i_tableoid));
3630  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3631  AssignDumpId(&polinfo[j].dobj);
3632  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3633  polinfo[j].poltable = tbinfo;
3634  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3635  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3636 
3637  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3638  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3639 
3640  if (PQgetisnull(res, j, i_polroles))
3641  polinfo[j].polroles = NULL;
3642  else
3643  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3644 
3645  if (PQgetisnull(res, j, i_polqual))
3646  polinfo[j].polqual = NULL;
3647  else
3648  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3649 
3650  if (PQgetisnull(res, j, i_polwithcheck))
3651  polinfo[j].polwithcheck = NULL;
3652  else
3653  polinfo[j].polwithcheck
3654  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3655  }
3656  PQclear(res);
3657  }
3658  destroyPQExpBuffer(query);
3659 }
3660 
3661 /*
3662  * dumpPolicy
3663  * dump the definition of the given policy
3664  */
3665 static void
3667 {
3668  DumpOptions *dopt = fout->dopt;
3669  TableInfo *tbinfo = polinfo->poltable;
3670  PQExpBuffer query;
3671  PQExpBuffer delqry;
3672  const char *cmd;
3673  char *tag;
3674 
3675  if (dopt->dataOnly)
3676  return;
3677 
3678  /*
3679  * If polname is NULL, then this record is just indicating that ROW LEVEL
3680  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3681  * ROW LEVEL SECURITY.
3682  */
3683  if (polinfo->polname == NULL)
3684  {
3685  query = createPQExpBuffer();
3686 
3687  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3688  fmtQualifiedDumpable(tbinfo));
3689 
3690  /*
3691  * We must emit the ROW SECURITY object's dependency on its table
3692  * explicitly, because it will not match anything in pg_depend (unlike
3693  * the case for other PolicyInfo objects).
3694  */
3695  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3696  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3697  ARCHIVE_OPTS(.tag = polinfo->dobj.name,
3698  .namespace = polinfo->dobj.namespace->dobj.name,
3699  .owner = tbinfo->rolname,
3700  .description = "ROW SECURITY",
3701  .section = SECTION_POST_DATA,
3702  .createStmt = query->data,
3703  .deps = &(tbinfo->dobj.dumpId),
3704  .nDeps = 1));
3705 
3706  destroyPQExpBuffer(query);
3707  return;
3708  }
3709 
3710  if (polinfo->polcmd == '*')
3711  cmd = "";
3712  else if (polinfo->polcmd == 'r')
3713  cmd = " FOR SELECT";
3714  else if (polinfo->polcmd == 'a')
3715  cmd = " FOR INSERT";
3716  else if (polinfo->polcmd == 'w')
3717  cmd = " FOR UPDATE";
3718  else if (polinfo->polcmd == 'd')
3719  cmd = " FOR DELETE";
3720  else
3721  {
3722  pg_log_error("unexpected policy command type: %c",
3723  polinfo->polcmd);
3724  exit_nicely(1);
3725  }
3726 
3727  query = createPQExpBuffer();
3728  delqry = createPQExpBuffer();
3729 
3730  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3731 
3732  appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
3733  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3734 
3735  if (polinfo->polroles != NULL)
3736  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3737 
3738  if (polinfo->polqual != NULL)
3739  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3740 
3741  if (polinfo->polwithcheck != NULL)
3742  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3743 
3744  appendPQExpBufferStr(query, ";\n");
3745 
3746  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3747  appendPQExpBuffer(delqry, " ON %s;\n", fmtQualifiedDumpable(tbinfo));
3748 
3749  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3750 
3751  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3752  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3753  ARCHIVE_OPTS(.tag = tag,
3754  .namespace = polinfo->dobj.namespace->dobj.name,
3755  .owner = tbinfo->rolname,
3756  .description = "POLICY",
3757  .section = SECTION_POST_DATA,
3758  .createStmt = query->data,
3759  .dropStmt = delqry->data));
3760 
3761  free(tag);
3762  destroyPQExpBuffer(query);
3763  destroyPQExpBuffer(delqry);
3764 }
3765 
3766 /*
3767  * getPublications
3768  * get information about publications
3769  */
3770 void
3772 {
3773  DumpOptions *dopt = fout->dopt;
3774  PQExpBuffer query;
3775  PGresult *res;
3776  PublicationInfo *pubinfo;
3777  int i_tableoid;
3778  int i_oid;
3779  int i_pubname;
3780  int i_rolname;
3781  int i_puballtables;
3782  int i_pubinsert;
3783  int i_pubupdate;
3784  int i_pubdelete;
3785  int i_pubtruncate;
3786  int i,
3787  ntups;
3788 
3789  if (dopt->no_publications || fout->remoteVersion < 100000)
3790  return;
3791 
3792  query = createPQExpBuffer();
3793 
3794  resetPQExpBuffer(query);
3795 
3796  /* Get the publications. */
3797  if (fout->remoteVersion >= 110000)
3798  appendPQExpBuffer(query,
3799  "SELECT p.tableoid, p.oid, p.pubname, "
3800  "(%s p.pubowner) AS rolname, "
3801  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate "
3802  "FROM pg_publication p",
3804  else
3805  appendPQExpBuffer(query,
3806  "SELECT p.tableoid, p.oid, p.pubname, "
3807  "(%s p.pubowner) AS rolname, "
3808  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, false AS pubtruncate "
3809  "FROM pg_publication p",
3811 
3812  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3813 
3814  ntups = PQntuples(res);
3815 
3816  i_tableoid = PQfnumber(res, "tableoid");
3817  i_oid = PQfnumber(res, "oid");
3818  i_pubname = PQfnumber(res, "pubname");
3819  i_rolname = PQfnumber(res, "rolname");
3820  i_puballtables = PQfnumber(res, "puballtables");
3821  i_pubinsert = PQfnumber(res, "pubinsert");
3822  i_pubupdate = PQfnumber(res, "pubupdate");
3823  i_pubdelete = PQfnumber(res, "pubdelete");
3824  i_pubtruncate = PQfnumber(res, "pubtruncate");
3825 
3826  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3827 
3828  for (i = 0; i < ntups; i++)
3829  {
3830  pubinfo[i].dobj.objType = DO_PUBLICATION;
3831  pubinfo[i].dobj.catId.tableoid =
3832  atooid(PQgetvalue(res, i, i_tableoid));
3833  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3834  AssignDumpId(&pubinfo[i].dobj);
3835  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3836  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3837  pubinfo[i].puballtables =
3838  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3839  pubinfo[i].pubinsert =
3840  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3841  pubinfo[i].pubupdate =
3842  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3843  pubinfo[i].pubdelete =
3844  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3845  pubinfo[i].pubtruncate =
3846  (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
3847 
3848  if (strlen(pubinfo[i].rolname) == 0)
3849  pg_log_warning("owner of publication \"%s\" appears to be invalid",
3850  pubinfo[i].dobj.name);
3851 
3852  /* Decide whether we want to dump it */
3853  selectDumpableObject(&(pubinfo[i].dobj), fout);
3854  }
3855  PQclear(res);
3856 
3857  destroyPQExpBuffer(query);
3858 }
3859 
3860 /*
3861  * dumpPublication
3862  * dump the definition of the given publication
3863  */
3864 static void
3866 {
3867  PQExpBuffer delq;
3868  PQExpBuffer query;
3869  char *qpubname;
3870  bool first = true;
3871 
3872  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3873  return;
3874 
3875  delq = createPQExpBuffer();
3876  query = createPQExpBuffer();
3877 
3878  qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
3879 
3880  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3881  qpubname);
3882 
3883  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3884  qpubname);
3885 
3886  if (pubinfo->puballtables)
3887  appendPQExpBufferStr(query, " FOR ALL TABLES");
3888 
3889  appendPQExpBufferStr(query, " WITH (publish = '");
3890  if (pubinfo->pubinsert)
3891  {
3892  appendPQExpBufferStr(query, "insert");
3893  first = false;
3894  }
3895 
3896  if (pubinfo->pubupdate)
3897  {
3898  if (!first)
3899  appendPQExpBufferStr(query, ", ");
3900 
3901  appendPQExpBufferStr(query, "update");
3902  first = false;
3903  }
3904 
3905  if (pubinfo->pubdelete)
3906  {
3907  if (!first)
3908  appendPQExpBufferStr(query, ", ");
3909 
3910  appendPQExpBufferStr(query, "delete");
3911  first = false;
3912  }
3913 
3914  if (pubinfo->pubtruncate)
3915  {
3916  if (!first)
3917  appendPQExpBufferStr(query, ", ");
3918 
3919  appendPQExpBufferStr(query, "truncate");
3920  first = false;
3921  }
3922 
3923  appendPQExpBufferStr(query, "');\n");
3924 
3925  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
3926  ARCHIVE_OPTS(.tag = pubinfo->dobj.name,
3927  .owner = pubinfo->rolname,
3928  .description = "PUBLICATION",
3929  .section = SECTION_POST_DATA,
3930  .createStmt = query->data,
3931  .dropStmt = delq->data));
3932 
3933  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3934  dumpComment(fout, "PUBLICATION", qpubname,
3935  NULL, pubinfo->rolname,
3936  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3937 
3938  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3939  dumpSecLabel(fout, "PUBLICATION", qpubname,
3940  NULL, pubinfo->rolname,
3941  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3942 
3943  destroyPQExpBuffer(delq);
3944  destroyPQExpBuffer(query);
3945  free(qpubname);
3946 }
3947 
3948 /*
3949  * getPublicationTables
3950  * get information about publication membership for dumpable tables.
3951  */
3952 void
3954 {
3955  PQExpBuffer query;
3956  PGresult *res;
3957  PublicationRelInfo *pubrinfo;
3958  DumpOptions *dopt = fout->dopt;
3959  int i_tableoid;
3960  int i_oid;
3961  int i_pubname;
3962  int i,
3963  j,
3964  ntups;
3965 
3966  if (dopt->no_publications || fout->remoteVersion < 100000)
3967  return;
3968 
3969  query = createPQExpBuffer();
3970 
3971  for (i = 0; i < numTables; i++)
3972  {
3973  TableInfo *tbinfo = &tblinfo[i];
3974 
3975  /* Only plain tables can be aded to publications. */
3976  if (tbinfo->relkind != RELKIND_RELATION)
3977  continue;
3978 
3979  /*
3980  * Ignore publication membership of tables whose definitions are not
3981  * to be dumped.
3982  */
3983  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3984  continue;
3985 
3986  pg_log_info("reading publication membership for table \"%s.%s\"",
3987  tbinfo->dobj.namespace->dobj.name,
3988  tbinfo->dobj.name);
3989 
3990  resetPQExpBuffer(query);
3991 
3992  /* Get the publication membership for the table. */
3993  appendPQExpBuffer(query,
3994  "SELECT pr.tableoid, pr.oid, p.pubname "
3995  "FROM pg_publication_rel pr, pg_publication p "
3996  "WHERE pr.prrelid = '%u'"
3997  " AND p.oid = pr.prpubid",
3998  tbinfo->dobj.catId.oid);
3999  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4000 
4001  ntups = PQntuples(res);
4002 
4003  if (ntups == 0)
4004  {
4005  /*
4006  * Table is not member of any publications. Clean up and return.
4007  */
4008  PQclear(res);
4009  continue;
4010  }
4011 
4012  i_tableoid = PQfnumber(res, "tableoid");
4013  i_oid = PQfnumber(res, "oid");
4014  i_pubname = PQfnumber(res, "pubname");
4015 
4016  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
4017 
4018  for (j = 0; j < ntups; j++)
4019  {
4020  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
4021  pubrinfo[j].dobj.catId.tableoid =
4022  atooid(PQgetvalue(res, j, i_tableoid));
4023  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
4024  AssignDumpId(&pubrinfo[j].dobj);
4025  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4026  pubrinfo[j].dobj.name = tbinfo->dobj.name;
4027  pubrinfo[j].pubname = pg_strdup(PQgetvalue(res, j, i_pubname));
4028  pubrinfo[j].pubtable = tbinfo;
4029 
4030  /* Decide whether we want to dump it */
4031  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
4032  }
4033  PQclear(res);
4034  }
4035  destroyPQExpBuffer(query);
4036 }
4037 
4038 /*
4039  * dumpPublicationTable
4040  * dump the definition of the given publication table mapping
4041  */
4042 static void
4044 {
4045  TableInfo *tbinfo = pubrinfo->pubtable;
4046  PQExpBuffer query;
4047  char *tag;
4048 
4049  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4050  return;
4051 
4052  tag = psprintf("%s %s", pubrinfo->pubname, tbinfo->dobj.name);
4053 
4054  query = createPQExpBuffer();
4055 
4056  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
4057  fmtId(pubrinfo->pubname));
4058  appendPQExpBuffer(query, " %s;\n",
4059  fmtQualifiedDumpable(tbinfo));
4060 
4061  /*
4062  * There is no point in creating drop query as the drop is done by table
4063  * drop.
4064  */
4065  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
4066  ARCHIVE_OPTS(.tag = tag,
4067  .namespace = tbinfo->dobj.namespace->dobj.name,
4068  .description = "PUBLICATION TABLE",
4069  .section = SECTION_POST_DATA,
4070  .createStmt = query->data));
4071 
4072  free(tag);
4073  destroyPQExpBuffer(query);
4074 }
4075 
4076 /*
4077  * Is the currently connected user a superuser?
4078  */
4079 static bool
4081 {
4082  ArchiveHandle *AH = (ArchiveHandle *) fout;
4083  const char *val;
4084 
4085  val = PQparameterStatus(AH->connection, "is_superuser");
4086 
4087  if (val && strcmp(val, "on") == 0)
4088  return true;
4089 
4090  return false;
4091 }
4092 
4093 /*
4094  * getSubscriptions
4095  * get information about subscriptions
4096  */
4097 void
4099 {
4100  DumpOptions *dopt = fout->dopt;
4101  PQExpBuffer query;
4102  PGresult *res;
4103  SubscriptionInfo *subinfo;
4104  int i_tableoid;
4105  int i_oid;
4106  int i_subname;
4107  int i_rolname;
4108  int i_subconninfo;
4109  int i_subslotname;
4110  int i_subsynccommit;
4111  int i_subpublications;
4112  int i,
4113  ntups;
4114 
4115  if (dopt->no_subscriptions || fout->remoteVersion < 100000)
4116  return;
4117 
4118  if (!is_superuser(fout))
4119  {
4120  int n;
4121 
4122  res = ExecuteSqlQuery(fout,
4123  "SELECT count(*) FROM pg_subscription "
4124  "WHERE subdbid = (SELECT oid FROM pg_database"
4125  " WHERE datname = current_database())",
4126  PGRES_TUPLES_OK);
4127  n = atoi(PQgetvalue(res, 0, 0));
4128  if (n > 0)
4129  pg_log_warning("subscriptions not dumped because current user is not a superuser");
4130  PQclear(res);
4131  return;
4132  }
4133 
4134  query = createPQExpBuffer();
4135 
4136  resetPQExpBuffer(query);
4137 
4138  /* Get the subscriptions in current database. */
4139  appendPQExpBuffer(query,
4140  "SELECT s.tableoid, s.oid, s.subname,"
4141  "(%s s.subowner) AS rolname, "
4142  " s.subconninfo, s.subslotname, s.subsynccommit, "
4143  " s.subpublications "
4144  "FROM pg_subscription s "
4145  "WHERE s.subdbid = (SELECT oid FROM pg_database"
4146  " WHERE datname = current_database())",
4148  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4149 
4150  ntups = PQntuples(res);
4151 
4152  i_tableoid = PQfnumber(res, "tableoid");
4153  i_oid = PQfnumber(res, "oid");
4154  i_subname = PQfnumber(res, "subname");
4155  i_rolname = PQfnumber(res, "rolname");
4156  i_subconninfo = PQfnumber(res, "subconninfo");
4157  i_subslotname = PQfnumber(res, "subslotname");
4158  i_subsynccommit = PQfnumber(res, "subsynccommit");
4159  i_subpublications = PQfnumber(res, "subpublications");
4160 
4161  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
4162 
4163  for (i = 0; i < ntups; i++)
4164  {
4165  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
4166  subinfo[i].dobj.catId.tableoid =
4167  atooid(PQgetvalue(res, i, i_tableoid));
4168  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4169  AssignDumpId(&subinfo[i].dobj);
4170  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
4171  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4172  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
4173  if (PQgetisnull(res, i, i_subslotname))
4174  subinfo[i].subslotname = NULL;
4175  else
4176  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
4177  subinfo[i].subsynccommit =
4178  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
4179  subinfo[i].subpublications =
4180  pg_strdup(PQgetvalue(res, i, i_subpublications));
4181 
4182  if (strlen(subinfo[i].rolname) == 0)
4183  pg_log_warning("owner of subscription \"%s\" appears to be invalid",
4184  subinfo[i].dobj.name);
4185 
4186  /* Decide whether we want to dump it */
4187  selectDumpableObject(&(subinfo[i].dobj), fout);
4188  }
4189  PQclear(res);
4190 
4191  destroyPQExpBuffer(query);
4192 }
4193 
4194 /*
4195  * dumpSubscription
4196  * dump the definition of the given subscription
4197  */
4198 static void
4200 {
4201  PQExpBuffer delq;
4202  PQExpBuffer query;
4203  PQExpBuffer publications;
4204  char *qsubname;
4205  char **pubnames = NULL;
4206  int npubnames = 0;
4207  int i;
4208 
4209  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4210  return;
4211 
4212  delq = createPQExpBuffer();
4213  query = createPQExpBuffer();
4214 
4215  qsubname = pg_strdup(fmtId(subinfo->dobj.name));
4216 
4217  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
4218  qsubname);
4219 
4220  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
4221  qsubname);
4222  appendStringLiteralAH(query, subinfo->subconninfo, fout);
4223 
4224  /* Build list of quoted publications and append them to query. */
4225  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
4226  {
4227  pg_log_warning("could not parse subpublications array");
4228  if (pubnames)
4229  free(pubnames);
4230  pubnames = NULL;
4231  npubnames = 0;
4232  }
4233 
4234  publications = createPQExpBuffer();
4235  for (i = 0; i < npubnames; i++)
4236  {
4237  if (i > 0)
4238  appendPQExpBufferStr(publications, ", ");
4239 
4240  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
4241  }
4242 
4243  appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
4244  if (subinfo->subslotname)
4245  appendStringLiteralAH(query, subinfo->subslotname, fout);
4246  else
4247  appendPQExpBufferStr(query, "NONE");
4248 
4249  if (strcmp(subinfo->subsynccommit, "off") != 0)
4250  appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
4251 
4252  appendPQExpBufferStr(query, ");\n");
4253 
4254  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
4255  ARCHIVE_OPTS(.tag = subinfo->dobj.name,
4256  .owner = subinfo->rolname,
4257  .description = "SUBSCRIPTION",
4258  .section = SECTION_POST_DATA,
4259  .createStmt = query->data,
4260  .dropStmt = delq->data));
4261 
4262  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4263  dumpComment(fout, "SUBSCRIPTION", qsubname,
4264  NULL, subinfo->rolname,
4265  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4266 
4267  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4268  dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
4269  NULL, subinfo->rolname,
4270  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4271 
4272  destroyPQExpBuffer(publications);
4273  if (pubnames)
4274  free(pubnames);
4275 
4276  destroyPQExpBuffer(delq);
4277  destroyPQExpBuffer(query);
4278  free(qsubname);
4279 }
4280 
4281 static void
4283  PQExpBuffer upgrade_buffer,
4284  Oid pg_type_oid,
4285  bool force_array_type)
4286 {
4287  PQExpBuffer upgrade_query = createPQExpBuffer();
4288  PGresult *res;
4289  Oid pg_type_array_oid;
4290 
4291  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
4292  appendPQExpBuffer(upgrade_buffer,
4293  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4294  pg_type_oid);
4295 
4296  /* we only support old >= 8.3 for binary upgrades */
4297  appendPQExpBuffer(upgrade_query,
4298  "SELECT typarray "
4299  "FROM pg_catalog.pg_type "
4300  "WHERE oid = '%u'::pg_catalog.oid;",
4301  pg_type_oid);
4302 
4303  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4304 
4305  pg_type_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
4306 
4307  PQclear(res);
4308 
4309  if (!OidIsValid(pg_type_array_oid) && force_array_type)
4310  {
4311  /*
4312  * If the old version didn't assign an array type, but the new version
4313  * does, we must select an unused type OID to assign. This currently
4314  * only happens for domains, when upgrading pre-v11 to v11 and up.
4315  *
4316  * Note: local state here is kind of ugly, but we must have some,
4317  * since we mustn't choose the same unused OID more than once.
4318  */
4319  static Oid next_possible_free_oid = FirstNormalObjectId;
4320  bool is_dup;
4321 
4322  do
4323  {
4324  ++next_possible_free_oid;
4325  printfPQExpBuffer(upgrade_query,
4326  "SELECT EXISTS(SELECT 1 "
4327  "FROM pg_catalog.pg_type "
4328  "WHERE oid = '%u'::pg_catalog.oid);",
4329  next_possible_free_oid);
4330  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4331  is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
4332  PQclear(res);
4333  } while (is_dup);
4334 
4335  pg_type_array_oid = next_possible_free_oid;
4336  }
4337 
4338  if (OidIsValid(pg_type_array_oid))
4339  {
4340  appendPQExpBufferStr(upgrade_buffer,
4341  "\n-- For binary upgrade, must preserve pg_type array oid\n");
4342  appendPQExpBuffer(upgrade_buffer,
4343  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4344  pg_type_array_oid);
4345  }
4346 
4347  destroyPQExpBuffer(upgrade_query);
4348 }
4349 
4350 static bool
4352  PQExpBuffer upgrade_buffer,
4353  Oid pg_rel_oid)
4354 {
4355  PQExpBuffer upgrade_query = createPQExpBuffer();
4356  PGresult *upgrade_res;
4357  Oid pg_type_oid;
4358  bool toast_set = false;
4359 
4360  /*
4361  * We only support old >= 8.3 for binary upgrades.
4362  *
4363  * We purposefully ignore toast OIDs for partitioned tables; the reason is
4364  * that versions 10 and 11 have them, but 12 does not, so emitting them
4365  * causes the upgrade to fail.
4366  */
4367  appendPQExpBuffer(upgrade_query,
4368  "SELECT c.reltype AS crel, t.reltype AS trel "
4369  "FROM pg_catalog.pg_class c "
4370  "LEFT JOIN pg_catalog.pg_class t ON "
4371  " (c.reltoastrelid = t.oid AND c.relkind <> '%c') "
4372  "WHERE c.oid = '%u'::pg_catalog.oid;",
4373  RELKIND_PARTITIONED_TABLE, pg_rel_oid);
4374 
4375  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4376 
4377  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
4378 
4379  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
4380  pg_type_oid, false);
4381 
4382  if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
4383  {
4384  /* Toast tables do not have pg_type array rows */
4385  Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
4386  PQfnumber(upgrade_res, "trel")));
4387 
4388  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
4389  appendPQExpBuffer(upgrade_buffer,
4390  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4391  pg_type_toast_oid);
4392 
4393  toast_set = true;
4394  }
4395 
4396  PQclear(upgrade_res);
4397  destroyPQExpBuffer(upgrade_query);
4398 
4399  return toast_set;
4400 }
4401 
4402 static void
4404  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
4405  bool is_index)
4406 {
4407  PQExpBuffer upgrade_query = createPQExpBuffer();
4408  PGresult *upgrade_res;
4409  Oid pg_class_reltoastrelid;
4410  Oid pg_index_indexrelid;
4411 
4412  appendPQExpBuffer(upgrade_query,
4413  "SELECT c.reltoastrelid, i.indexrelid "
4414  "FROM pg_catalog.pg_class c LEFT JOIN "
4415  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
4416  "WHERE c.oid = '%u'::pg_catalog.oid;",
4417  pg_class_oid);
4418 
4419  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4420 
4421  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastrelid")));
4422  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
4423 
4424  appendPQExpBufferStr(upgrade_buffer,
4425  "\n-- For binary upgrade, must preserve pg_class oids\n");
4426 
4427  if (!is_index)
4428  {
4429  appendPQExpBuffer(upgrade_buffer,
4430  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
4431  pg_class_oid);
4432  /* only tables have toast tables, not indexes */
4433  if (OidIsValid(pg_class_reltoastrelid))
4434  {
4435  /*
4436  * One complexity is that the table definition might not require
4437  * the creation of a TOAST table, and the TOAST table might have
4438  * been created long after table creation, when the table was
4439  * loaded with wide data. By setting the TOAST oid we force
4440  * creation of the TOAST heap and TOAST index by the backend so we
4441  * can cleanly copy the files during binary upgrade.
4442  */
4443 
4444  appendPQExpBuffer(upgrade_buffer,
4445  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
4446  pg_class_reltoastrelid);
4447 
4448  /* every toast table has an index */
4449  appendPQExpBuffer(upgrade_buffer,
4450  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4451  pg_index_indexrelid);
4452  }
4453  }
4454  else
4455  appendPQExpBuffer(upgrade_buffer,
4456  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4457  pg_class_oid);
4458 
4459  appendPQExpBufferChar(upgrade_buffer, '\n');
4460 
4461  PQclear(upgrade_res);
4462  destroyPQExpBuffer(upgrade_query);
4463 }
4464 
4465 /*
4466  * If the DumpableObject is a member of an extension, add a suitable
4467  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4468  *
4469  * For somewhat historical reasons, objname should already be quoted,
4470  * but not objnamespace (if any).
4471  */
4472 static void
4474  DumpableObject *dobj,
4475  const char *objtype,
4476  const char *objname,
4477  const char *objnamespace)
4478 {
4479  DumpableObject *extobj = NULL;
4480  int i;
4481 
4482  if (!dobj->ext_member)
4483  return;
4484 
4485  /*
4486  * Find the parent extension. We could avoid this search if we wanted to
4487  * add a link field to DumpableObject, but the space costs of that would
4488  * be considerable. We assume that member objects could only have a
4489  * direct dependency on their own extension, not any others.
4490  */
4491  for (i = 0; i < dobj->nDeps; i++)
4492  {
4493  extobj = findObjectByDumpId(dobj->dependencies[i]);
4494  if (extobj && extobj->objType == DO_EXTENSION)
4495  break;
4496  extobj = NULL;
4497  }
4498  if (extobj == NULL)
4499  fatal("could not find parent extension for %s %s",
4500  objtype, objname);
4501 
4502  appendPQExpBufferStr(upgrade_buffer,
4503  "\n-- For binary upgrade, handle extension membership the hard way\n");
4504  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
4505  fmtId(extobj->name),
4506  objtype);
4507  if (objnamespace && *objnamespace)
4508  appendPQExpBuffer(upgrade_buffer, "%s.", fmtId(objnamespace));
4509  appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
4510 }
4511 
4512 /*
4513  * getNamespaces:
4514  * read all namespaces in the system catalogs and return them in the
4515  * NamespaceInfo* structure
4516  *
4517  * numNamespaces is set to the number of namespaces read in
4518  */
4519 NamespaceInfo *
4521 {
4522  DumpOptions *dopt = fout->dopt;
4523  PGresult *res;
4524  int ntups;
4525  int i;
4526  PQExpBuffer query;
4527  NamespaceInfo *nsinfo;
4528  int i_tableoid;
4529  int i_oid;
4530  int i_nspname;
4531  int i_rolname;
4532  int i_nspacl;
4533  int i_rnspacl;
4534  int i_initnspacl;
4535  int i_initrnspacl;
4536 
4537  query = createPQExpBuffer();
4538 
4539  /*
4540  * we fetch all namespaces including system ones, so that every object we
4541  * read in can be linked to a containing namespace.
4542  */
4543  if (fout->remoteVersion >= 90600)
4544  {
4545  PQExpBuffer acl_subquery = createPQExpBuffer();
4546  PQExpBuffer racl_subquery = createPQExpBuffer();
4547  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4548  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4549 
4550  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4551  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4552  dopt->binary_upgrade);
4553 
4554  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4555  "(%s nspowner) AS rolname, "
4556  "%s as nspacl, "
4557  "%s as rnspacl, "
4558  "%s as initnspacl, "
4559  "%s as initrnspacl "
4560  "FROM pg_namespace n "
4561  "LEFT JOIN pg_init_privs pip "
4562  "ON (n.oid = pip.objoid "
4563  "AND pip.classoid = 'pg_namespace'::regclass "
4564  "AND pip.objsubid = 0",
4566  acl_subquery->data,
4567  racl_subquery->data,
4568  init_acl_subquery->data,
4569  init_racl_subquery->data);
4570 
4571  appendPQExpBufferStr(query, ") ");
4572 
4573  destroyPQExpBuffer(acl_subquery);
4574  destroyPQExpBuffer(racl_subquery);
4575  destroyPQExpBuffer(init_acl_subquery);
4576  destroyPQExpBuffer(init_racl_subquery);
4577  }
4578  else
4579  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4580  "(%s nspowner) AS rolname, "
4581  "nspacl, NULL as rnspacl, "
4582  "NULL AS initnspacl, NULL as initrnspacl "
4583  "FROM pg_namespace",
4585 
4586  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4587 
4588  ntups = PQntuples(res);
4589 
4590  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4591 
4592  i_tableoid = PQfnumber(res, "tableoid");
4593  i_oid = PQfnumber(res, "oid");
4594  i_nspname = PQfnumber(res, "nspname");
4595  i_rolname = PQfnumber(res, "rolname");
4596  i_nspacl = PQfnumber(res, "nspacl");
4597  i_rnspacl = PQfnumber(res, "rnspacl");
4598  i_initnspacl = PQfnumber(res, "initnspacl");
4599  i_initrnspacl = PQfnumber(res, "initrnspacl");
4600 
4601  for (i = 0; i < ntups; i++)
4602  {
4603  nsinfo[i].dobj.objType = DO_NAMESPACE;
4604  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4605  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4606  AssignDumpId(&nsinfo[i].dobj);
4607  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4608  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4609  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4610  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4611  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4612  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4613 
4614  /* Decide whether to dump this namespace */
4615  selectDumpableNamespace(&nsinfo[i], fout);
4616 
4617  /*
4618  * Do not try to dump ACL if the ACL is empty or the default.
4619  *
4620  * This is useful because, for some schemas/objects, the only
4621  * component we are going to try and dump is the ACL and if we can
4622  * remove that then 'dump' goes to zero/false and we don't consider
4623  * this object for dumping at all later on.
4624  */
4625  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4626  PQgetisnull(res, i, i_initnspacl) &&
4627  PQgetisnull(res, i, i_initrnspacl))
4628  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4629 
4630  if (strlen(nsinfo[i].rolname) == 0)
4631  pg_log_warning("owner of schema \"%s\" appears to be invalid",
4632  nsinfo[i].dobj.name);
4633  }
4634 
4635  PQclear(res);
4636  destroyPQExpBuffer(query);
4637 
4638  *numNamespaces = ntups;
4639 
4640  return nsinfo;
4641 }
4642 
4643 /*
4644  * findNamespace:
4645  * given a namespace OID, look up the info read by getNamespaces
4646  */
4647 static NamespaceInfo *
4649 {
4650  NamespaceInfo *nsinfo;
4651 
4652  nsinfo = findNamespaceByOid(nsoid);
4653  if (nsinfo == NULL)
4654  fatal("schema with OID %u does not exist", nsoid);
4655  return nsinfo;
4656 }
4657 
4658 /*
4659  * getExtensions:
4660  * read all extensions in the system catalogs and return them in the
4661  * ExtensionInfo* structure
4662  *
4663  * numExtensions is set to the number of extensions read in
4664  */
4665 ExtensionInfo *
4667 {
4668  DumpOptions *dopt = fout->dopt;
4669  PGresult *res;
4670  int ntups;
4671  int i;
4672  PQExpBuffer query;
4673  ExtensionInfo *extinfo;
4674  int i_tableoid;
4675  int i_oid;
4676  int i_extname;
4677  int i_nspname;
4678  int i_extrelocatable;
4679  int i_extversion;
4680  int i_extconfig;
4681  int i_extcondition;
4682 
4683  /*
4684  * Before 9.1, there are no extensions.
4685  */
4686  if (fout->remoteVersion < 90100)
4687  {
4688  *numExtensions = 0;
4689  return NULL;
4690  }
4691 
4692  query = createPQExpBuffer();
4693 
4694  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4695  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4696  "FROM pg_extension x "
4697  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4698 
4699  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4700 
4701  ntups = PQntuples(res);
4702 
4703  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4704 
4705  i_tableoid = PQfnumber(res, "tableoid");
4706  i_oid = PQfnumber(res, "oid");
4707  i_extname = PQfnumber(res, "extname");
4708  i_nspname = PQfnumber(res, "nspname");
4709  i_extrelocatable = PQfnumber(res, "extrelocatable");
4710  i_extversion = PQfnumber(res, "extversion");
4711  i_extconfig = PQfnumber(res, "extconfig");
4712  i_extcondition = PQfnumber(res, "extcondition");
4713 
4714  for (i = 0; i < ntups; i++)
4715  {
4716  extinfo[i].dobj.objType = DO_EXTENSION;
4717  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4718  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4719  AssignDumpId(&extinfo[i].dobj);
4720  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4721  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4722  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4723  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4724  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4725  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4726 
4727  /* Decide whether we want to dump it */
4728  selectDumpableExtension(&(extinfo[i]), dopt);
4729  }
4730 
4731  PQclear(res);
4732  destroyPQExpBuffer(query);
4733 
4734  *numExtensions = ntups;
4735 
4736  return extinfo;
4737 }
4738 
4739 /*
4740  * getTypes:
4741  * read all types in the system catalogs and return them in the
4742  * TypeInfo* structure
4743  *
4744  * numTypes is set to the number of types read in
4745  *
4746  * NB: this must run after getFuncs() because we assume we can do
4747  * findFuncByOid().
4748  */
4749 TypeInfo *
4751 {
4752  DumpOptions *dopt = fout->dopt;
4753  PGresult *res;
4754  int ntups;
4755  int i;
4756  PQExpBuffer query = createPQExpBuffer();
4757  TypeInfo *tyinfo;
4758  ShellTypeInfo *stinfo;
4759  int i_tableoid;
4760  int i_oid;
4761  int i_typname;
4762  int i_typnamespace;
4763  int i_typacl;
4764  int i_rtypacl;
4765  int i_inittypacl;
4766  int i_initrtypacl;
4767  int i_rolname;
4768  int i_typelem;
4769  int i_typrelid;
4770  int i_typrelkind;
4771  int i_typtype;
4772  int i_typisdefined;
4773  int i_isarray;
4774 
4775  /*
4776  * we include even the built-in types because those may be used as array
4777  * elements by user-defined types
4778  *
4779  * we filter out the built-in types when we dump out the types
4780  *
4781  * same approach for undefined (shell) types and array types
4782  *
4783  * Note: as of 8.3 we can reliably detect whether a type is an
4784  * auto-generated array type by checking the element type's typarray.
4785  * (Before that the test is capable of generating false positives.) We
4786  * still check for name beginning with '_', though, so as to avoid the
4787  * cost of the subselect probe for all standard types. This would have to
4788  * be revisited if the backend ever allows renaming of array types.
4789  */
4790 
4791  if (fout->remoteVersion >= 90600)
4792  {
4793  PQExpBuffer acl_subquery = createPQExpBuffer();
4794  PQExpBuffer racl_subquery = createPQExpBuffer();
4795  PQExpBuffer initacl_subquery = createPQExpBuffer();
4796  PQExpBuffer initracl_subquery = createPQExpBuffer();
4797 
4798  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
4799  initracl_subquery, "t.typacl", "t.typowner", "'T'",
4800  dopt->binary_upgrade);
4801 
4802  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
4803  "t.typnamespace, "
4804  "%s AS typacl, "
4805  "%s AS rtypacl, "
4806  "%s AS inittypacl, "
4807  "%s AS initrtypacl, "
4808  "(%s t.typowner) AS rolname, "
4809  "t.typelem, t.typrelid, "
4810  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
4811  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
4812  "t.typtype, t.typisdefined, "
4813  "t.typname[0] = '_' AND t.typelem != 0 AND "
4814  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
4815  "FROM pg_type t "
4816  "LEFT JOIN pg_init_privs pip ON "
4817  "(t.oid = pip.objoid "
4818  "AND pip.classoid = 'pg_type'::regclass "
4819  "AND pip.objsubid = 0) ",
4820  acl_subquery->data,
4821  racl_subquery->data,
4822  initacl_subquery->data,
4823  initracl_subquery->data,
4825 
4826  destroyPQExpBuffer(acl_subquery);
4827  destroyPQExpBuffer(racl_subquery);
4828  destroyPQExpBuffer(initacl_subquery);
4829  destroyPQExpBuffer(initracl_subquery);
4830  }
4831  else if (fout->remoteVersion >= 90200)
4832  {
4833  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4834  "typnamespace, typacl, NULL as rtypacl, "
4835  "NULL AS inittypacl, NULL AS initrtypacl, "
4836  "(%s typowner) AS rolname, "
4837  "typelem, typrelid, "
4838  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4839  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4840  "typtype, typisdefined, "
4841  "typname[0] = '_' AND typelem != 0 AND "
4842  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4843  "FROM pg_type",
4845  }
4846  else if (fout->remoteVersion >= 80300)
4847  {
4848  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4849  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4850  "NULL AS inittypacl, NULL AS initrtypacl, "
4851  "(%s typowner) AS rolname, "
4852  "typelem, typrelid, "
4853  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4854  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4855  "typtype, typisdefined, "
4856  "typname[0] = '_' AND typelem != 0 AND "
4857  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4858  "FROM pg_type",
4860  }
4861  else
4862  {
4863  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4864  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4865  "NULL AS inittypacl, NULL AS initrtypacl, "
4866  "(%s typowner) AS rolname, "
4867  "typelem, typrelid, "
4868  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4869  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4870  "typtype, typisdefined, "
4871  "typname[0] = '_' AND typelem != 0 AS isarray "
4872  "FROM pg_type",
4874  }
4875 
4876  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4877 
4878  ntups = PQntuples(res);
4879 
4880  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
4881 
4882  i_tableoid = PQfnumber(res, "tableoid");
4883  i_oid = PQfnumber(res, "oid");
4884  i_typname = PQfnumber(res, "typname");
4885  i_typnamespace = PQfnumber(res, "typnamespace");
4886  i_typacl = PQfnumber(res, "typacl");
4887  i_rtypacl = PQfnumber(res, "rtypacl");
4888  i_inittypacl = PQfnumber(res, "inittypacl");
4889  i_initrtypacl = PQfnumber(res, "initrtypacl");
4890  i_rolname = PQfnumber(res, "rolname");
4891  i_typelem = PQfnumber(res, "typelem");
4892  i_typrelid = PQfnumber(res, "typrelid");
4893  i_typrelkind = PQfnumber(res, "typrelkind");
4894  i_typtype = PQfnumber(res, "typtype");
4895  i_typisdefined = PQfnumber(res, "typisdefined");
4896  i_isarray = PQfnumber(res, "isarray");
4897 
4898  for (i = 0; i < ntups; i++)
4899  {
4900  tyinfo[i].dobj.objType = DO_TYPE;
4901  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4902  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4903  AssignDumpId(&tyinfo[i].dobj);
4904  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
4905  tyinfo[i].dobj.namespace =
4906  findNamespace(fout,
4907  atooid(PQgetvalue(res, i, i_typnamespace)));
4908  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4909  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
4910  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
4911  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
4912  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
4913  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
4914  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
4915  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
4916  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
4917  tyinfo[i].shellType = NULL;
4918 
4919  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
4920  tyinfo[i].isDefined = true;
4921  else
4922  tyinfo[i].isDefined = false;
4923 
4924  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
4925  tyinfo[i].isArray = true;
4926  else
4927  tyinfo[i].isArray = false;
4928 
4929  /* Decide whether we want to dump it */
4930  selectDumpableType(&tyinfo[i], fout);
4931 
4932  /* Do not try to dump ACL if no ACL exists. */
4933  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
4934  PQgetisnull(res, i, i_inittypacl) &&
4935  PQgetisnull(res, i, i_initrtypacl))
4936  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4937 
4938  /*
4939  * If it's a domain, fetch info about its constraints, if any
4940  */
4941  tyinfo[i].nDomChecks = 0;
4942  tyinfo[i].domChecks = NULL;
4943  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4944  tyinfo[i].typtype == TYPTYPE_DOMAIN)
4945  getDomainConstraints(fout, &(tyinfo[i]));
4946 
4947  /*
4948  * If it's a base type, make a DumpableObject representing a shell
4949  * definition of the type. We will need to dump that ahead of the I/O
4950  * functions for the type. Similarly, range types need a shell
4951  * definition in case they have a canonicalize function.
4952  *
4953  * Note: the shell type doesn't have a catId. You might think it
4954  * should copy the base type's catId, but then it might capture the
4955  * pg_depend entries for the type, which we don't want.
4956  */
4957  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4958  (tyinfo[i].typtype == TYPTYPE_BASE ||
4959  tyinfo[i].typtype == TYPTYPE_RANGE))
4960  {
4961  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
4962  stinfo->dobj.objType = DO_SHELL_TYPE;
4963  stinfo->dobj.catId = nilCatalogId;
4964  AssignDumpId(&stinfo->dobj);
4965  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
4966  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
4967  stinfo->baseType = &(tyinfo[i]);
4968  tyinfo[i].shellType = stinfo;
4969 
4970  /*
4971  * Initially mark the shell type as not to be dumped. We'll only
4972  * dump it if the I/O or canonicalize functions need to be dumped;
4973  * this is taken care of while sorting dependencies.
4974  */
4975  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
4976  }
4977 
4978  if (strlen(tyinfo[i].rolname) == 0)
4979  pg_log_warning("owner of data type \"%s\" appears to be invalid",
4980  tyinfo[i].dobj.name);
4981  }
4982 
4983  *numTypes = ntups;
4984 
4985  PQclear(res);
4986 
4987  destroyPQExpBuffer(query);
4988 
4989  return tyinfo;
4990 }
4991 
4992 /*
4993  * getOperators:
4994  * read all operators in the system catalogs and return them in the
4995  * OprInfo* structure
4996  *
4997  * numOprs is set to the number of operators read in
4998  */
4999 OprInfo *
5000 getOperators(Archive *fout, int *numOprs)
5001 {
5002  PGresult *res;
5003  int ntups;
5004  int i;
5005  PQExpBuffer query = createPQExpBuffer();
5006  OprInfo *oprinfo;
5007  int i_tableoid;
5008  int i_oid;
5009  int i_oprname;
5010  int i_oprnamespace;
5011  int i_rolname;
5012  int i_oprkind;
5013  int i_oprcode;
5014 
5015  /*
5016  * find all operators, including builtin operators; we filter out
5017  * system-defined operators at dump-out time.
5018  */
5019 
5020  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
5021  "oprnamespace, "
5022  "(%s oprowner) AS rolname, "
5023  "oprkind, "
5024  "oprcode::oid AS oprcode "
5025  "FROM pg_operator",
5027 
5028  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5029 
5030  ntups = PQntuples(res);
5031  *numOprs = ntups;
5032 
5033  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
5034 
5035  i_tableoid = PQfnumber(res, "tableoid");
5036  i_oid = PQfnumber(res, "oid");
5037  i_oprname = PQfnumber(res, "oprname");
5038  i_oprnamespace = PQfnumber(res, "oprnamespace");
5039  i_rolname = PQfnumber(res, "rolname");
5040  i_oprkind = PQfnumber(res, "oprkind");
5041  i_oprcode = PQfnumber(res, "oprcode");
5042 
5043  for (i = 0; i < ntups; i++)
5044  {
5045  oprinfo[i].dobj.objType = DO_OPERATOR;
5046  oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5047  oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5048  AssignDumpId(&oprinfo[i].dobj);
5049  oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
5050  oprinfo[i].dobj.namespace =
5051  findNamespace(fout,
5052  atooid(PQgetvalue(res, i, i_oprnamespace)));
5053  oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5054  oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
5055  oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
5056 
5057  /* Decide whether we want to dump it */
5058  selectDumpableObject(&(oprinfo[i].dobj), fout);
5059 
5060  /* Operators do not currently have ACLs. */
5061  oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5062 
5063  if (strlen(oprinfo[i].rolname) == 0)
5064  pg_log_warning("owner of operator \"%s\" appears to be invalid",
5065  oprinfo[i].dobj.name);
5066  }
5067 
5068  PQclear(res);
5069 
5070  destroyPQExpBuffer(query);
5071 
5072  return oprinfo;
5073 }
5074 
5075 /*
5076  * getCollations:
5077  * read all collations in the system catalogs and return them in the
5078  * CollInfo* structure
5079  *
5080  * numCollations is set to the number of collations read in
5081  */
5082 CollInfo *
5084 {
5085  PGresult *res;
5086  int ntups;
5087  int i;
5088  PQExpBuffer query;
5089  CollInfo *collinfo;
5090  int i_tableoid;
5091  int i_oid;
5092  int i_collname;
5093  int i_collnamespace;
5094  int i_rolname;
5095 
5096  /* Collations didn't exist pre-9.1 */
5097  if (fout->remoteVersion < 90100)
5098  {
5099  *numCollations = 0;
5100  return NULL;
5101  }
5102 
5103  query = createPQExpBuffer();
5104 
5105  /*
5106  * find all collations, including builtin collations; we filter out
5107  * system-defined collations at dump-out time.
5108  */
5109 
5110  appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
5111  "collnamespace, "
5112  "(%s collowner) AS rolname "
5113  "FROM pg_collation",
5115 
5116  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5117 
5118  ntups = PQntuples(res);
5119  *numCollations = ntups;
5120 
5121  collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
5122 
5123  i_tableoid = PQfnumber(res, "tableoid");
5124  i_oid = PQfnumber(res, "oid");
5125  i_collname = PQfnumber(res, "collname");
5126  i_collnamespace = PQfnumber(res, "collnamespace");
5127  i_rolname = PQfnumber(res, "rolname");
5128 
5129  for (i = 0; i < ntups; i++)
5130  {
5131  collinfo[i].dobj.objType = DO_COLLATION;
5132  collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5133  collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5134  AssignDumpId(&collinfo[i].dobj);
5135  collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
5136  collinfo[i].dobj.namespace =
5137  findNamespace(fout,
5138  atooid(PQgetvalue(res, i, i_collnamespace)));
5139  collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5140 
5141  /* Decide whether we want to dump it */
5142  selectDumpableObject(&(collinfo[i].dobj), fout);
5143 
5144  /* Collations do not currently have ACLs. */
5145  collinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5146  }
5147 
5148  PQclear(res);
5149 
5150  destroyPQExpBuffer(query);
5151 
5152  return collinfo;
5153 }
5154 
5155 /*
5156  * getConversions:
5157  * read all conversions in the system catalogs and return them in the
5158  * ConvInfo* structure
5159  *
5160  * numConversions is set to the number of conversions read in
5161  */
5162 ConvInfo *
5163 getConversions(Archive *fout, int *numConversions)
5164 {
5165  PGresult *res;
5166  int ntups;
5167  int i;
5168  PQExpBuffer query;
5169  ConvInfo *convinfo;
5170  int i_tableoid;
5171  int i_oid;
5172  int i_conname;
5173  int i_connamespace;
5174  int i_rolname;
5175 
5176  query = createPQExpBuffer();
5177 
5178  /*
5179  * find all conversions, including builtin conversions; we filter out
5180  * system-defined conversions at dump-out time.
5181  */
5182 
5183  appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
5184  "connamespace, "
5185  "(%s conowner) AS rolname "
5186  "FROM pg_conversion",
5188 
5189  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5190 
5191  ntups = PQntuples(res);
5192  *numConversions = ntups;
5193 
5194  convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
5195 
5196  i_tableoid = PQfnumber(res, "tableoid");
5197  i_oid = PQfnumber(res, "oid");
5198  i_conname = PQfnumber(res, "conname");
5199  i_connamespace = PQfnumber(res, "connamespace");
5200  i_rolname = PQfnumber(res, "rolname");
5201 
5202  for (i = 0; i < ntups; i++)
5203  {
5204  convinfo[i].dobj.objType = DO_CONVERSION;
5205  convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5206  convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5207  AssignDumpId(&convinfo[i].dobj);
5208  convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
5209  convinfo[i].dobj.namespace =
5210  findNamespace(fout,
5211  atooid(PQgetvalue(res, i, i_connamespace)));
5212  convinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5213 
5214  /* Decide whether we want to dump it */
5215  selectDumpableObject(&(convinfo[i].dobj), fout);
5216 
5217  /* Conversions do not currently have ACLs. */
5218  convinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5219  }
5220 
5221  PQclear(res);
5222 
5223  destroyPQExpBuffer(query);
5224 
5225  return convinfo;
5226 }
5227 
5228 /*
5229  * getAccessMethods:
5230  * read all user-defined access methods in the system catalogs and return
5231  * them in the AccessMethodInfo* structure
5232  *
5233  * numAccessMethods is set to the number of access methods read in
5234  */
5236 getAccessMethods(Archive *fout, int *numAccessMethods)
5237 {
5238  PGresult *res;
5239  int ntups;
5240  int i;
5241  PQExpBuffer query;
5242  AccessMethodInfo *aminfo;
5243  int i_tableoid;
5244  int i_oid;
5245  int i_amname;
5246  int i_amhandler;
5247  int i_amtype;
5248 
5249  /* Before 9.6, there are no user-defined access methods */
5250  if (fout->remoteVersion < 90600)
5251  {
5252  *numAccessMethods = 0;
5253  return NULL;
5254  }
5255 
5256  query = createPQExpBuffer();
5257 
5258  /* Select all access methods from pg_am table */
5259  appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, amtype, "
5260  "amhandler::pg_catalog.regproc AS amhandler "
5261  "FROM pg_am");
5262 
5263  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5264 
5265  ntups = PQntuples(res);
5266  *numAccessMethods = ntups;
5267 
5268  aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
5269 
5270  i_tableoid = PQfnumber(res, "tableoid");
5271  i_oid = PQfnumber(res, "oid");
5272  i_amname = PQfnumber(res, "amname");
5273  i_amhandler = PQfnumber(res, "amhandler");
5274  i_amtype = PQfnumber(res, "amtype");
5275 
5276  for (i = 0; i < ntups; i++)
5277  {
5278  aminfo[i].dobj.objType = DO_ACCESS_METHOD;
5279  aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5280  aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5281  AssignDumpId(&aminfo[i].dobj);
5282  aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
5283  aminfo[i].dobj.namespace = NULL;
5284  aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
5285  aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
5286 
5287  /* Decide whether we want to dump it */
5288  selectDumpableAccessMethod(&(aminfo[i]), fout);
5289 
5290  /* Access methods do not currently have ACLs. */
5291  aminfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5292  }
5293 
5294  PQclear(res);
5295 
5296  destroyPQExpBuffer(query);
5297 
5298  return aminfo;
5299 }
5300 
5301 
5302 /*
5303  * getOpclasses:
5304  * read all opclasses in the system catalogs and return them in the
5305  * OpclassInfo* structure
5306  *
5307  * numOpclasses is set to the number of opclasses read in
5308  */
5309 OpclassInfo *
5310 getOpclasses(Archive *fout, int *numOpclasses)
5311 {
5312  PGresult *res;
5313  int ntups;
5314