PostgreSQL Source Code  git master
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #include <limits.h>
37 #ifdef HAVE_TERMIOS_H
38 #include <termios.h>
39 #endif
40 
41 #include "getopt_long.h"
42 
43 #include "access/attnum.h"
44 #include "access/sysattr.h"
45 #include "access/transam.h"
46 #include "catalog/pg_aggregate_d.h"
47 #include "catalog/pg_am_d.h"
48 #include "catalog/pg_attribute_d.h"
49 #include "catalog/pg_cast_d.h"
50 #include "catalog/pg_class_d.h"
51 #include "catalog/pg_default_acl_d.h"
52 #include "catalog/pg_largeobject_d.h"
53 #include "catalog/pg_largeobject_metadata_d.h"
54 #include "catalog/pg_proc_d.h"
55 #include "catalog/pg_trigger_d.h"
56 #include "catalog/pg_type_d.h"
57 #include "libpq/libpq-fs.h"
58 #include "storage/block.h"
59 
60 #include "dumputils.h"
61 #include "parallel.h"
62 #include "pg_backup_db.h"
63 #include "pg_backup_utils.h"
64 #include "pg_dump.h"
65 #include "fe_utils/connect.h"
66 #include "fe_utils/string_utils.h"
67 
68 
69 typedef struct
70 {
71  const char *descr; /* comment for an object */
72  Oid classoid; /* object class (catalog OID) */
73  Oid objoid; /* object OID */
74  int objsubid; /* subobject (table column #) */
75 } CommentItem;
76 
77 typedef struct
78 {
79  const char *provider; /* label provider of this security label */
80  const char *label; /* security label for an object */
81  Oid classoid; /* object class (catalog OID) */
82  Oid objoid; /* object OID */
83  int objsubid; /* subobject (table column #) */
84 } SecLabelItem;
85 
86 typedef enum OidOptions
87 {
89  zeroAsAny = 2,
92 } OidOptions;
93 
94 /* global decls */
95 bool g_verbose; /* User wants verbose narration of our
96  * activities. */
97 static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
98 
99 /* subquery used to convert user ID (eg, datdba) to user name */
100 static const char *username_subquery;
101 
102 /*
103  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
104  * FirstNormalObjectId - 1.
105  */
106 static Oid g_last_builtin_oid; /* value of the last builtin oid */
107 
108 /* The specified names/patterns should to match at least one entity */
109 static int strict_names = 0;
110 
111 /*
112  * Object inclusion/exclusion lists
113  *
114  * The string lists record the patterns given by command-line switches,
115  * which we then convert to lists of OIDs of matching objects.
116  */
118 static SimpleOidList schema_include_oids = {NULL, NULL};
120 static SimpleOidList schema_exclude_oids = {NULL, NULL};
121 
123 static SimpleOidList table_include_oids = {NULL, NULL};
125 static SimpleOidList table_exclude_oids = {NULL, NULL};
127 static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
128 
129 
130 char g_opaque_type[10]; /* name for the opaque type */
131 
132 /* placeholders for the delimiters for comments */
134 char g_comment_end[10];
135 
136 static const CatalogId nilCatalogId = {0, 0};
137 
138 /* override for standard extra_float_digits setting */
139 static bool have_extra_float_digits = false;
141 
142 /*
143  * The default number of rows per INSERT when
144  * --inserts is specified without --rows-per-insert
145  */
146 #define DUMP_DEFAULT_ROWS_PER_INSERT 1
147 
148 /*
149  * Macro for producing quoted, schema-qualified name of a dumpable object.
150  */
151 #define fmtQualifiedDumpable(obj) \
152  fmtQualifiedId((obj)->dobj.namespace->dobj.name, \
153  (obj)->dobj.name)
154 
155 static void help(const char *progname);
156 static void setup_connection(Archive *AH,
157  const char *dumpencoding, const char *dumpsnapshot,
158  char *use_role);
160 static void expand_schema_name_patterns(Archive *fout,
161  SimpleStringList *patterns,
162  SimpleOidList *oids,
163  bool strict_names);
164 static void expand_table_name_patterns(Archive *fout,
165  SimpleStringList *patterns,
166  SimpleOidList *oids,
167  bool strict_names);
168 static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
169 static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
170 static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
171 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
172 static void dumpComment(Archive *fout, const char *type, const char *name,
173  const char *namespace, const char *owner,
174  CatalogId catalogId, int subid, DumpId dumpId);
175 static int findComments(Archive *fout, Oid classoid, Oid objoid,
176  CommentItem **items);
177 static int collectComments(Archive *fout, CommentItem **items);
178 static void dumpSecLabel(Archive *fout, const char *type, const char *name,
179  const char *namespace, const char *owner,
180  CatalogId catalogId, int subid, DumpId dumpId);
181 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
182  SecLabelItem **items);
183 static int collectSecLabels(Archive *fout, SecLabelItem **items);
184 static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
185 static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
186 static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
187 static void dumpType(Archive *fout, TypeInfo *tyinfo);
188 static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
189 static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
190 static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
191 static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
192 static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
193 static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
194 static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
195 static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
196 static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
197 static void dumpFunc(Archive *fout, FuncInfo *finfo);
198 static void dumpCast(Archive *fout, CastInfo *cast);
199 static void dumpTransform(Archive *fout, TransformInfo *transform);
200 static void dumpOpr(Archive *fout, OprInfo *oprinfo);
201 static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
202 static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
203 static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
204 static void dumpCollation(Archive *fout, CollInfo *collinfo);
205 static void dumpConversion(Archive *fout, ConvInfo *convinfo);
206 static void dumpRule(Archive *fout, RuleInfo *rinfo);
207 static void dumpAgg(Archive *fout, AggInfo *agginfo);
208 static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
209 static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
210 static void dumpTable(Archive *fout, TableInfo *tbinfo);
211 static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
212 static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
213 static void dumpSequence(Archive *fout, TableInfo *tbinfo);
214 static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
215 static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
216 static void dumpIndexAttach(Archive *fout, IndexAttachInfo *attachinfo);
217 static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
218 static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
219 static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
220 static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
221 static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
222 static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
223 static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
224 static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
225 static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
226 static void dumpUserMappings(Archive *fout,
227  const char *servername, const char *namespace,
228  const char *owner, CatalogId catalogId, DumpId dumpId);
229 static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
230 
231 static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
232  const char *type, const char *name, const char *subname,
233  const char *nspname, const char *owner,
234  const char *acls, const char *racls,
235  const char *initacls, const char *initracls);
236 
237 static void getDependencies(Archive *fout);
238 static void BuildArchiveDependencies(Archive *fout);
240  DumpId **dependencies, int *nDeps, int *allocDeps);
241 
243 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
244  DumpableObject *boundaryObjs);
245 
246 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
247 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
248 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo);
249 static void buildMatViewRefreshDependencies(Archive *fout);
250 static void getTableDataFKConstraints(void);
251 static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
252  bool is_agg);
253 static char *format_function_arguments_old(Archive *fout,
254  FuncInfo *finfo, int nallargs,
255  char **allargtypes,
256  char **argmodes,
257  char **argnames);
258 static char *format_function_signature(Archive *fout,
259  FuncInfo *finfo, bool honor_quotes);
260 static char *convertRegProcReference(Archive *fout,
261  const char *proc);
262 static char *getFormattedOperatorName(Archive *fout, const char *oproid);
263 static char *convertTSFunction(Archive *fout, Oid funcOid);
264 static Oid findLastBuiltinOid_V71(Archive *fout);
265 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
266 static void getBlobs(Archive *fout);
267 static void dumpBlob(Archive *fout, BlobInfo *binfo);
268 static int dumpBlobs(Archive *fout, void *arg);
269 static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
270 static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
271 static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
272 static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
273 static void dumpDatabase(Archive *AH);
274 static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
275  const char *dbname, Oid dboid);
276 static void dumpEncoding(Archive *AH);
277 static void dumpStdStrings(Archive *AH);
278 static void dumpSearchPath(Archive *AH);
280  PQExpBuffer upgrade_buffer,
281  Oid pg_type_oid,
282  bool force_array_type);
284  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
285 static void binary_upgrade_set_pg_class_oids(Archive *fout,
286  PQExpBuffer upgrade_buffer,
287  Oid pg_class_oid, bool is_index);
288 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
289  DumpableObject *dobj,
290  const char *objtype,
291  const char *objname,
292  const char *objnamespace);
293 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
294 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
295 static bool nonemptyReloptions(const char *reloptions);
296 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
297  const char *prefix, Archive *fout);
298 static char *get_synchronized_snapshot(Archive *fout);
299 static void setupDumpWorker(Archive *AHX);
300 static TableInfo *getRootTableInfo(TableInfo *tbinfo);
301 
302 
303 int
304 main(int argc, char **argv)
305 {
306  int c;
307  const char *filename = NULL;
308  const char *format = "p";
309  TableInfo *tblinfo;
310  int numTables;
311  DumpableObject **dobjs;
312  int numObjs;
313  DumpableObject *boundaryObjs;
314  int i;
315  int optindex;
316  char *endptr;
317  RestoreOptions *ropt;
318  Archive *fout; /* the script file */
319  const char *dumpencoding = NULL;
320  const char *dumpsnapshot = NULL;
321  char *use_role = NULL;
322  long rowsPerInsert;
323  int numWorkers = 1;
324  trivalue prompt_password = TRI_DEFAULT;
325  int compressLevel = -1;
326  int plainText = 0;
327  ArchiveFormat archiveFormat = archUnknown;
328  ArchiveMode archiveMode;
329 
330  static DumpOptions dopt;
331 
332  static struct option long_options[] = {
333  {"data-only", no_argument, NULL, 'a'},
334  {"blobs", no_argument, NULL, 'b'},
335  {"no-blobs", no_argument, NULL, 'B'},
336  {"clean", no_argument, NULL, 'c'},
337  {"create", no_argument, NULL, 'C'},
338  {"dbname", required_argument, NULL, 'd'},
339  {"file", required_argument, NULL, 'f'},
340  {"format", required_argument, NULL, 'F'},
341  {"host", required_argument, NULL, 'h'},
342  {"jobs", 1, NULL, 'j'},
343  {"no-reconnect", no_argument, NULL, 'R'},
344  {"no-owner", no_argument, NULL, 'O'},
345  {"port", required_argument, NULL, 'p'},
346  {"schema", required_argument, NULL, 'n'},
347  {"exclude-schema", required_argument, NULL, 'N'},
348  {"schema-only", no_argument, NULL, 's'},
349  {"superuser", required_argument, NULL, 'S'},
350  {"table", required_argument, NULL, 't'},
351  {"exclude-table", required_argument, NULL, 'T'},
352  {"no-password", no_argument, NULL, 'w'},
353  {"password", no_argument, NULL, 'W'},
354  {"username", required_argument, NULL, 'U'},
355  {"verbose", no_argument, NULL, 'v'},
356  {"no-privileges", no_argument, NULL, 'x'},
357  {"no-acl", no_argument, NULL, 'x'},
358  {"compress", required_argument, NULL, 'Z'},
359  {"encoding", required_argument, NULL, 'E'},
360  {"help", no_argument, NULL, '?'},
361  {"version", no_argument, NULL, 'V'},
362 
363  /*
364  * the following options don't have an equivalent short option letter
365  */
366  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
367  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
368  {"column-inserts", no_argument, &dopt.column_inserts, 1},
369  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
370  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
371  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
372  {"exclude-table-data", required_argument, NULL, 4},
373  {"extra-float-digits", required_argument, NULL, 8},
374  {"if-exists", no_argument, &dopt.if_exists, 1},
375  {"inserts", no_argument, NULL, 9},
376  {"lock-wait-timeout", required_argument, NULL, 2},
377  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
378  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
379  {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
380  {"role", required_argument, NULL, 3},
381  {"section", required_argument, NULL, 5},
382  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
383  {"snapshot", required_argument, NULL, 6},
384  {"strict-names", no_argument, &strict_names, 1},
385  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
386  {"no-comments", no_argument, &dopt.no_comments, 1},
387  {"no-publications", no_argument, &dopt.no_publications, 1},
388  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
389  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
390  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
391  {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
392  {"no-sync", no_argument, NULL, 7},
393  {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
394  {"rows-per-insert", required_argument, NULL, 10},
395 
396  {NULL, 0, NULL, 0}
397  };
398 
399  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
400 
401  /*
402  * Initialize what we need for parallel execution, especially for thread
403  * support on Windows.
404  */
406 
407  g_verbose = false;
408 
409  strcpy(g_comment_start, "-- ");
410  g_comment_end[0] = '\0';
411  strcpy(g_opaque_type, "opaque");
412 
413  progname = get_progname(argv[0]);
414 
415  if (argc > 1)
416  {
417  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
418  {
419  help(progname);
420  exit_nicely(0);
421  }
422  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
423  {
424  puts("pg_dump (PostgreSQL) " PG_VERSION);
425  exit_nicely(0);
426  }
427  }
428 
429  InitDumpOptions(&dopt);
430 
431  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:oOp:RsS:t:T:U:vwWxZ:",
432  long_options, &optindex)) != -1)
433  {
434  switch (c)
435  {
436  case 'a': /* Dump data only */
437  dopt.dataOnly = true;
438  break;
439 
440  case 'b': /* Dump blobs */
441  dopt.outputBlobs = true;
442  break;
443 
444  case 'B': /* Don't dump blobs */
445  dopt.dontOutputBlobs = true;
446  break;
447 
448  case 'c': /* clean (i.e., drop) schema prior to create */
449  dopt.outputClean = 1;
450  break;
451 
452  case 'C': /* Create DB */
453  dopt.outputCreateDB = 1;
454  break;
455 
456  case 'd': /* database name */
457  dopt.dbname = pg_strdup(optarg);
458  break;
459 
460  case 'E': /* Dump encoding */
461  dumpencoding = pg_strdup(optarg);
462  break;
463 
464  case 'f':
465  filename = pg_strdup(optarg);
466  break;
467 
468  case 'F':
469  format = pg_strdup(optarg);
470  break;
471 
472  case 'h': /* server host */
473  dopt.pghost = pg_strdup(optarg);
474  break;
475 
476  case 'j': /* number of dump jobs */
477  numWorkers = atoi(optarg);
478  break;
479 
480  case 'n': /* include schema(s) */
481  simple_string_list_append(&schema_include_patterns, optarg);
482  dopt.include_everything = false;
483  break;
484 
485  case 'N': /* exclude schema(s) */
486  simple_string_list_append(&schema_exclude_patterns, optarg);
487  break;
488 
489  case 'O': /* Don't reconnect to match owner */
490  dopt.outputNoOwner = 1;
491  break;
492 
493  case 'p': /* server port */
494  dopt.pgport = pg_strdup(optarg);
495  break;
496 
497  case 'R':
498  /* no-op, still accepted for backwards compatibility */
499  break;
500 
501  case 's': /* dump schema only */
502  dopt.schemaOnly = true;
503  break;
504 
505  case 'S': /* Username for superuser in plain text output */
507  break;
508 
509  case 't': /* include table(s) */
510  simple_string_list_append(&table_include_patterns, optarg);
511  dopt.include_everything = false;
512  break;
513 
514  case 'T': /* exclude table(s) */
515  simple_string_list_append(&table_exclude_patterns, optarg);
516  break;
517 
518  case 'U':
519  dopt.username = pg_strdup(optarg);
520  break;
521 
522  case 'v': /* verbose */
523  g_verbose = true;
524  break;
525 
526  case 'w':
527  prompt_password = TRI_NO;
528  break;
529 
530  case 'W':
531  prompt_password = TRI_YES;
532  break;
533 
534  case 'x': /* skip ACL dump */
535  dopt.aclsSkip = true;
536  break;
537 
538  case 'Z': /* Compression Level */
539  compressLevel = atoi(optarg);
540  if (compressLevel < 0 || compressLevel > 9)
541  {
542  write_msg(NULL, "compression level must be in range 0..9\n");
543  exit_nicely(1);
544  }
545  break;
546 
547  case 0:
548  /* This covers the long options. */
549  break;
550 
551  case 2: /* lock-wait-timeout */
553  break;
554 
555  case 3: /* SET ROLE */
556  use_role = pg_strdup(optarg);
557  break;
558 
559  case 4: /* exclude table(s) data */
560  simple_string_list_append(&tabledata_exclude_patterns, optarg);
561  break;
562 
563  case 5: /* section */
565  break;
566 
567  case 6: /* snapshot */
568  dumpsnapshot = pg_strdup(optarg);
569  break;
570 
571  case 7: /* no-sync */
572  dosync = false;
573  break;
574 
575  case 8:
577  extra_float_digits = atoi(optarg);
578  if (extra_float_digits < -15 || extra_float_digits > 3)
579  {
580  write_msg(NULL, "extra_float_digits must be in range -15..3\n");
581  exit_nicely(1);
582  }
583  break;
584 
585  case 9: /* inserts */
586 
587  /*
588  * dump_inserts also stores --rows-per-insert, careful not to
589  * overwrite that.
590  */
591  if (dopt.dump_inserts == 0)
593  break;
594 
595  case 10: /* rows per insert */
596  errno = 0;
597  rowsPerInsert = strtol(optarg, &endptr, 10);
598 
599  if (endptr == optarg || *endptr != '\0' ||
600  rowsPerInsert <= 0 || rowsPerInsert > INT_MAX ||
601  errno == ERANGE)
602  {
603  write_msg(NULL, "rows-per-insert must be in range %d..%d\n",
604  1, INT_MAX);
605  exit_nicely(1);
606  }
607  dopt.dump_inserts = (int) rowsPerInsert;
608  break;
609 
610  default:
611  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
612  exit_nicely(1);
613  }
614  }
615 
616  /*
617  * Non-option argument specifies database name as long as it wasn't
618  * already specified with -d / --dbname
619  */
620  if (optind < argc && dopt.dbname == NULL)
621  dopt.dbname = argv[optind++];
622 
623  /* Complain if any arguments remain */
624  if (optind < argc)
625  {
626  fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
627  progname, argv[optind]);
628  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
629  progname);
630  exit_nicely(1);
631  }
632 
633  /* --column-inserts implies --inserts */
634  if (dopt.column_inserts && dopt.dump_inserts == 0)
636 
637  /*
638  * Binary upgrade mode implies dumping sequence data even in schema-only
639  * mode. This is not exposed as a separate option, but kept separate
640  * internally for clarity.
641  */
642  if (dopt.binary_upgrade)
643  dopt.sequence_data = 1;
644 
645  if (dopt.dataOnly && dopt.schemaOnly)
646  {
647  write_msg(NULL, "options -s/--schema-only and -a/--data-only cannot be used together\n");
648  exit_nicely(1);
649  }
650 
651  if (dopt.dataOnly && dopt.outputClean)
652  {
653  write_msg(NULL, "options -c/--clean and -a/--data-only cannot be used together\n");
654  exit_nicely(1);
655  }
656 
657  if (dopt.if_exists && !dopt.outputClean)
658  exit_horribly(NULL, "option --if-exists requires option -c/--clean\n");
659 
660  /*
661  * --inserts are already implied above if --column-inserts or
662  * --rows-per-insert were specified.
663  */
664  if (dopt.do_nothing && dopt.dump_inserts == 0)
665  exit_horribly(NULL, "option --on-conflict-do-nothing requires option --inserts, --rows-per-insert or --column-inserts\n");
666 
667  /* Identify archive format to emit */
668  archiveFormat = parseArchiveFormat(format, &archiveMode);
669 
670  /* archiveFormat specific setup */
671  if (archiveFormat == archNull)
672  plainText = 1;
673 
674  /* Custom and directory formats are compressed by default, others not */
675  if (compressLevel == -1)
676  {
677 #ifdef HAVE_LIBZ
678  if (archiveFormat == archCustom || archiveFormat == archDirectory)
679  compressLevel = Z_DEFAULT_COMPRESSION;
680  else
681 #endif
682  compressLevel = 0;
683  }
684 
685 #ifndef HAVE_LIBZ
686  if (compressLevel != 0)
687  write_msg(NULL, "WARNING: requested compression not available in this "
688  "installation -- archive will be uncompressed\n");
689  compressLevel = 0;
690 #endif
691 
692  /*
693  * If emitting an archive format, we always want to emit a DATABASE item,
694  * in case --create is specified at pg_restore time.
695  */
696  if (!plainText)
697  dopt.outputCreateDB = 1;
698 
699  /*
700  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
701  * parallel jobs because that's the maximum limit for the
702  * WaitForMultipleObjects() call.
703  */
704  if (numWorkers <= 0
705 #ifdef WIN32
706  || numWorkers > MAXIMUM_WAIT_OBJECTS
707 #endif
708  )
709  exit_horribly(NULL, "invalid number of parallel jobs\n");
710 
711  /* Parallel backup only in the directory archive format so far */
712  if (archiveFormat != archDirectory && numWorkers > 1)
713  exit_horribly(NULL, "parallel backup only supported by the directory format\n");
714 
715  /* Open the output file */
716  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
717  archiveMode, setupDumpWorker);
718 
719  /* Make dump options accessible right away */
720  SetArchiveOptions(fout, &dopt, NULL);
721 
722  /* Register the cleanup hook */
723  on_exit_close_archive(fout);
724 
725  /* Let the archiver know how noisy to be */
726  fout->verbose = g_verbose;
727 
728  /*
729  * We allow the server to be back to 8.0, and up to any minor release of
730  * our own major version. (See also version check in pg_dumpall.c.)
731  */
732  fout->minRemoteVersion = 80000;
733  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
734 
735  fout->numWorkers = numWorkers;
736 
737  /*
738  * Open the database using the Archiver, so it knows about it. Errors mean
739  * death.
740  */
741  ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
742  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
743 
744  /*
745  * Disable security label support if server version < v9.1.x (prevents
746  * access to nonexistent pg_seclabel catalog)
747  */
748  if (fout->remoteVersion < 90100)
749  dopt.no_security_labels = 1;
750 
751  /*
752  * On hot standbys, never try to dump unlogged table data, since it will
753  * just throw an error.
754  */
755  if (fout->isStandby)
756  dopt.no_unlogged_table_data = true;
757 
758  /* Select the appropriate subquery to convert user IDs to names */
759  if (fout->remoteVersion >= 80100)
760  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
761  else
762  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
763 
764  /* check the version for the synchronized snapshots feature */
765  if (numWorkers > 1 && fout->remoteVersion < 90200
766  && !dopt.no_synchronized_snapshots)
767  exit_horribly(NULL,
768  "Synchronized snapshots are not supported by this server version.\n"
769  "Run with --no-synchronized-snapshots instead if you do not need\n"
770  "synchronized snapshots.\n");
771 
772  /* check the version when a snapshot is explicitly specified by user */
773  if (dumpsnapshot && fout->remoteVersion < 90200)
774  exit_horribly(NULL,
775  "Exported snapshots are not supported by this server version.\n");
776 
777  /*
778  * Find the last built-in OID, if needed (prior to 8.1)
779  *
780  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
781  */
782  if (fout->remoteVersion < 80100)
784  else
786 
787  if (g_verbose)
788  write_msg(NULL, "last built-in OID is %u\n", g_last_builtin_oid);
789 
790  /* Expand schema selection patterns into OID lists */
791  if (schema_include_patterns.head != NULL)
792  {
793  expand_schema_name_patterns(fout, &schema_include_patterns,
794  &schema_include_oids,
795  strict_names);
796  if (schema_include_oids.head == NULL)
797  exit_horribly(NULL, "no matching schemas were found\n");
798  }
799  expand_schema_name_patterns(fout, &schema_exclude_patterns,
800  &schema_exclude_oids,
801  false);
802  /* non-matching exclusion patterns aren't an error */
803 
804  /* Expand table selection patterns into OID lists */
805  if (table_include_patterns.head != NULL)
806  {
807  expand_table_name_patterns(fout, &table_include_patterns,
808  &table_include_oids,
809  strict_names);
810  if (table_include_oids.head == NULL)
811  exit_horribly(NULL, "no matching tables were found\n");
812  }
813  expand_table_name_patterns(fout, &table_exclude_patterns,
814  &table_exclude_oids,
815  false);
816 
817  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
818  &tabledata_exclude_oids,
819  false);
820 
821  /* non-matching exclusion patterns aren't an error */
822 
823  /*
824  * Dumping blobs is the default for dumps where an inclusion switch is not
825  * used (an "include everything" dump). -B can be used to exclude blobs
826  * from those dumps. -b can be used to include blobs even when an
827  * inclusion switch is used.
828  *
829  * -s means "schema only" and blobs are data, not schema, so we never
830  * include blobs when -s is used.
831  */
832  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
833  dopt.outputBlobs = true;
834 
835  /*
836  * Now scan the database and create DumpableObject structs for all the
837  * objects we intend to dump.
838  */
839  tblinfo = getSchemaData(fout, &numTables);
840 
841  if (fout->remoteVersion < 80400)
842  guessConstraintInheritance(tblinfo, numTables);
843 
844  if (!dopt.schemaOnly)
845  {
846  getTableData(&dopt, tblinfo, numTables, 0);
848  if (dopt.dataOnly)
850  }
851 
852  if (dopt.schemaOnly && dopt.sequence_data)
853  getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
854 
855  /*
856  * In binary-upgrade mode, we do not have to worry about the actual blob
857  * data or the associated metadata that resides in the pg_largeobject and
858  * pg_largeobject_metadata tables, respectively.
859  *
860  * However, we do need to collect blob information as there may be
861  * comments or other information on blobs that we do need to dump out.
862  */
863  if (dopt.outputBlobs || dopt.binary_upgrade)
864  getBlobs(fout);
865 
866  /*
867  * Collect dependency data to assist in ordering the objects.
868  */
869  getDependencies(fout);
870 
871  /* Lastly, create dummy objects to represent the section boundaries */
872  boundaryObjs = createBoundaryObjects();
873 
874  /* Get pointers to all the known DumpableObjects */
875  getDumpableObjects(&dobjs, &numObjs);
876 
877  /*
878  * Add dummy dependencies to enforce the dump section ordering.
879  */
880  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
881 
882  /*
883  * Sort the objects into a safe dump order (no forward references).
884  *
885  * We rely on dependency information to help us determine a safe order, so
886  * the initial sort is mostly for cosmetic purposes: we sort by name to
887  * ensure that logically identical schemas will dump identically.
888  */
889  sortDumpableObjectsByTypeName(dobjs, numObjs);
890 
891  sortDumpableObjects(dobjs, numObjs,
892  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
893 
894  /*
895  * Create archive TOC entries for all the objects to be dumped, in a safe
896  * order.
897  */
898 
899  /* First the special ENCODING, STDSTRINGS, and SEARCHPATH entries. */
900  dumpEncoding(fout);
901  dumpStdStrings(fout);
902  dumpSearchPath(fout);
903 
904  /* The database items are always next, unless we don't want them at all */
905  if (dopt.outputCreateDB)
906  dumpDatabase(fout);
907 
908  /* Now the rearrangeable objects. */
909  for (i = 0; i < numObjs; i++)
910  dumpDumpableObject(fout, dobjs[i]);
911 
912  /*
913  * Set up options info to ensure we dump what we want.
914  */
915  ropt = NewRestoreOptions();
916  ropt->filename = filename;
917 
918  /* if you change this list, see dumpOptionsFromRestoreOptions */
919  ropt->dropSchema = dopt.outputClean;
920  ropt->dataOnly = dopt.dataOnly;
921  ropt->schemaOnly = dopt.schemaOnly;
922  ropt->if_exists = dopt.if_exists;
923  ropt->column_inserts = dopt.column_inserts;
924  ropt->dumpSections = dopt.dumpSections;
925  ropt->aclsSkip = dopt.aclsSkip;
926  ropt->superuser = dopt.outputSuperuser;
927  ropt->createDB = dopt.outputCreateDB;
928  ropt->noOwner = dopt.outputNoOwner;
929  ropt->noTablespace = dopt.outputNoTablespaces;
930  ropt->disable_triggers = dopt.disable_triggers;
931  ropt->use_setsessauth = dopt.use_setsessauth;
933  ropt->dump_inserts = dopt.dump_inserts;
934  ropt->no_comments = dopt.no_comments;
935  ropt->no_publications = dopt.no_publications;
937  ropt->no_subscriptions = dopt.no_subscriptions;
938  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
941  ropt->sequence_data = dopt.sequence_data;
942  ropt->binary_upgrade = dopt.binary_upgrade;
943 
944  if (compressLevel == -1)
945  ropt->compression = 0;
946  else
947  ropt->compression = compressLevel;
948 
949  ropt->suppressDumpWarnings = true; /* We've already shown them */
950 
951  SetArchiveOptions(fout, &dopt, ropt);
952 
953  /* Mark which entries should be output */
955 
956  /*
957  * The archive's TOC entries are now marked as to which ones will actually
958  * be output, so we can set up their dependency lists properly. This isn't
959  * necessary for plain-text output, though.
960  */
961  if (!plainText)
963 
964  /*
965  * And finally we can do the actual output.
966  *
967  * Note: for non-plain-text output formats, the output file is written
968  * inside CloseArchive(). This is, um, bizarre; but not worth changing
969  * right now.
970  */
971  if (plainText)
972  RestoreArchive(fout);
973 
974  CloseArchive(fout);
975 
976  exit_nicely(0);
977 }
978 
979 
980 static void
981 help(const char *progname)
982 {
983  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
984  printf(_("Usage:\n"));
985  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
986 
987  printf(_("\nGeneral options:\n"));
988  printf(_(" -f, --file=FILENAME output file or directory name\n"));
989  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
990  " plain text (default))\n"));
991  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
992  printf(_(" -v, --verbose verbose mode\n"));
993  printf(_(" -V, --version output version information, then exit\n"));
994  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
995  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
996  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
997  printf(_(" -?, --help show this help, then exit\n"));
998 
999  printf(_("\nOptions controlling the output content:\n"));
1000  printf(_(" -a, --data-only dump only the data, not the schema\n"));
1001  printf(_(" -b, --blobs include large objects in dump\n"));
1002  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
1003  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
1004  printf(_(" -C, --create include commands to create database in dump\n"));
1005  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
1006  printf(_(" -n, --schema=SCHEMA dump the named schema(s) only\n"));
1007  printf(_(" -N, --exclude-schema=SCHEMA do NOT dump the named schema(s)\n"));
1008  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
1009  " plain-text format\n"));
1010  printf(_(" -s, --schema-only dump only the schema, no data\n"));
1011  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
1012  printf(_(" -t, --table=TABLE dump the named table(s) only\n"));
1013  printf(_(" -T, --exclude-table=TABLE do NOT dump the named table(s)\n"));
1014  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
1015  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
1016  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
1017  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
1018  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
1019  printf(_(" --enable-row-security enable row security (dump only content user has\n"
1020  " access to)\n"));
1021  printf(_(" --exclude-table-data=TABLE do NOT dump data for the named table(s)\n"));
1022  printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
1023  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
1024  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
1025  printf(_(" --load-via-partition-root load partitions via the root table\n"));
1026  printf(_(" --no-comments do not dump comments\n"));
1027  printf(_(" --no-publications do not dump publications\n"));
1028  printf(_(" --no-security-labels do not dump security label assignments\n"));
1029  printf(_(" --no-subscriptions do not dump subscriptions\n"));
1030  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
1031  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
1032  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
1033  printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
1034  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
1035  printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
1036  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
1037  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
1038  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
1039  printf(_(" --strict-names require table and/or schema include patterns to\n"
1040  " match at least one entity each\n"));
1041  printf(_(" --use-set-session-authorization\n"
1042  " use SET SESSION AUTHORIZATION commands instead of\n"
1043  " ALTER OWNER commands to set ownership\n"));
1044 
1045  printf(_("\nConnection options:\n"));
1046  printf(_(" -d, --dbname=DBNAME database to dump\n"));
1047  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
1048  printf(_(" -p, --port=PORT database server port number\n"));
1049  printf(_(" -U, --username=NAME connect as specified database user\n"));
1050  printf(_(" -w, --no-password never prompt for password\n"));
1051  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
1052  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
1053 
1054  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
1055  "variable value is used.\n\n"));
1056  printf(_("Report bugs to <pgsql-bugs@lists.postgresql.org>.\n"));
1057 }
1058 
1059 static void
1060 setup_connection(Archive *AH, const char *dumpencoding,
1061  const char *dumpsnapshot, char *use_role)
1062 {
1063  DumpOptions *dopt = AH->dopt;
1064  PGconn *conn = GetConnection(AH);
1065  const char *std_strings;
1066 
1068 
1069  /*
1070  * Set the client encoding if requested.
1071  */
1072  if (dumpencoding)
1073  {
1074  if (PQsetClientEncoding(conn, dumpencoding) < 0)
1075  exit_horribly(NULL, "invalid client encoding \"%s\" specified\n",
1076  dumpencoding);
1077  }
1078 
1079  /*
1080  * Get the active encoding and the standard_conforming_strings setting, so
1081  * we know how to escape strings.
1082  */
1083  AH->encoding = PQclientEncoding(conn);
1084 
1085  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1086  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1087 
1088  /*
1089  * Set the role if requested. In a parallel dump worker, we'll be passed
1090  * use_role == NULL, but AH->use_role is already set (if user specified it
1091  * originally) and we should use that.
1092  */
1093  if (!use_role && AH->use_role)
1094  use_role = AH->use_role;
1095 
1096  /* Set the role if requested */
1097  if (use_role && AH->remoteVersion >= 80100)
1098  {
1099  PQExpBuffer query = createPQExpBuffer();
1100 
1101  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1102  ExecuteSqlStatement(AH, query->data);
1103  destroyPQExpBuffer(query);
1104 
1105  /* save it for possible later use by parallel workers */
1106  if (!AH->use_role)
1107  AH->use_role = pg_strdup(use_role);
1108  }
1109 
1110  /* Set the datestyle to ISO to ensure the dump's portability */
1111  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1112 
1113  /* Likewise, avoid using sql_standard intervalstyle */
1114  if (AH->remoteVersion >= 80400)
1115  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1116 
1117  /*
1118  * Use an explicitly specified extra_float_digits if it has been
1119  * provided. Otherwise, set extra_float_digits so that we can dump float
1120  * data exactly (given correctly implemented float I/O code, anyway).
1121  */
1123  {
1125  appendPQExpBuffer(q, "SET extra_float_digits TO %d",
1127  ExecuteSqlStatement(AH, q->data);
1128  destroyPQExpBuffer(q);
1129  }
1130  else if (AH->remoteVersion >= 90000)
1131  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1132  else
1133  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1134 
1135  /*
1136  * If synchronized scanning is supported, disable it, to prevent
1137  * unpredictable changes in row ordering across a dump and reload.
1138  */
1139  if (AH->remoteVersion >= 80300)
1140  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1141 
1142  /*
1143  * Disable timeouts if supported.
1144  */
1145  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1146  if (AH->remoteVersion >= 90300)
1147  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1148  if (AH->remoteVersion >= 90600)
1149  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1150 
1151  /*
1152  * Quote all identifiers, if requested.
1153  */
1154  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1155  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1156 
1157  /*
1158  * Adjust row-security mode, if supported.
1159  */
1160  if (AH->remoteVersion >= 90500)
1161  {
1162  if (dopt->enable_row_security)
1163  ExecuteSqlStatement(AH, "SET row_security = on");
1164  else
1165  ExecuteSqlStatement(AH, "SET row_security = off");
1166  }
1167 
1168  /*
1169  * Start transaction-snapshot mode transaction to dump consistent data.
1170  */
1171  ExecuteSqlStatement(AH, "BEGIN");
1172  if (AH->remoteVersion >= 90100)
1173  {
1174  /*
1175  * To support the combination of serializable_deferrable with the jobs
1176  * option we use REPEATABLE READ for the worker connections that are
1177  * passed a snapshot. As long as the snapshot is acquired in a
1178  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1179  * REPEATABLE READ transaction provides the appropriate integrity
1180  * guarantees. This is a kluge, but safe for back-patching.
1181  */
1182  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1184  "SET TRANSACTION ISOLATION LEVEL "
1185  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1186  else
1188  "SET TRANSACTION ISOLATION LEVEL "
1189  "REPEATABLE READ, READ ONLY");
1190  }
1191  else
1192  {
1194  "SET TRANSACTION ISOLATION LEVEL "
1195  "SERIALIZABLE, READ ONLY");
1196  }
1197 
1198  /*
1199  * If user specified a snapshot to use, select that. In a parallel dump
1200  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1201  * is already set (if the server can handle it) and we should use that.
1202  */
1203  if (dumpsnapshot)
1204  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1205 
1206  if (AH->sync_snapshot_id)
1207  {
1208  PQExpBuffer query = createPQExpBuffer();
1209 
1210  appendPQExpBuffer(query, "SET TRANSACTION SNAPSHOT ");
1211  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1212  ExecuteSqlStatement(AH, query->data);
1213  destroyPQExpBuffer(query);
1214  }
1215  else if (AH->numWorkers > 1 &&
1216  AH->remoteVersion >= 90200 &&
1218  {
1219  if (AH->isStandby && AH->remoteVersion < 100000)
1220  exit_horribly(NULL,
1221  "Synchronized snapshots on standby servers are not supported by this server version.\n"
1222  "Run with --no-synchronized-snapshots instead if you do not need\n"
1223  "synchronized snapshots.\n");
1224 
1225 
1227  }
1228 }
1229 
1230 /* Set up connection for a parallel worker process */
1231 static void
1233 {
1234  /*
1235  * We want to re-select all the same values the master connection is
1236  * using. We'll have inherited directly-usable values in
1237  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1238  * inherited encoding value back to a string to pass to setup_connection.
1239  */
1240  setup_connection(AH,
1242  NULL,
1243  NULL);
1244 }
1245 
1246 static char *
1248 {
1249  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1250  char *result;
1251  PGresult *res;
1252 
1253  res = ExecuteSqlQueryForSingleRow(fout, query);
1254  result = pg_strdup(PQgetvalue(res, 0, 0));
1255  PQclear(res);
1256 
1257  return result;
1258 }
1259 
1260 static ArchiveFormat
1262 {
1263  ArchiveFormat archiveFormat;
1264 
1265  *mode = archModeWrite;
1266 
1267  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1268  {
1269  /* This is used by pg_dumpall, and is not documented */
1270  archiveFormat = archNull;
1271  *mode = archModeAppend;
1272  }
1273  else if (pg_strcasecmp(format, "c") == 0)
1274  archiveFormat = archCustom;
1275  else if (pg_strcasecmp(format, "custom") == 0)
1276  archiveFormat = archCustom;
1277  else if (pg_strcasecmp(format, "d") == 0)
1278  archiveFormat = archDirectory;
1279  else if (pg_strcasecmp(format, "directory") == 0)
1280  archiveFormat = archDirectory;
1281  else if (pg_strcasecmp(format, "p") == 0)
1282  archiveFormat = archNull;
1283  else if (pg_strcasecmp(format, "plain") == 0)
1284  archiveFormat = archNull;
1285  else if (pg_strcasecmp(format, "t") == 0)
1286  archiveFormat = archTar;
1287  else if (pg_strcasecmp(format, "tar") == 0)
1288  archiveFormat = archTar;
1289  else
1290  exit_horribly(NULL, "invalid output format \"%s\" specified\n", format);
1291  return archiveFormat;
1292 }
1293 
1294 /*
1295  * Find the OIDs of all schemas matching the given list of patterns,
1296  * and append them to the given OID list.
1297  */
1298 static void
1300  SimpleStringList *patterns,
1301  SimpleOidList *oids,
1302  bool strict_names)
1303 {
1304  PQExpBuffer query;
1305  PGresult *res;
1306  SimpleStringListCell *cell;
1307  int i;
1308 
1309  if (patterns->head == NULL)
1310  return; /* nothing to do */
1311 
1312  query = createPQExpBuffer();
1313 
1314  /*
1315  * The loop below runs multiple SELECTs might sometimes result in
1316  * duplicate entries in the OID list, but we don't care.
1317  */
1318 
1319  for (cell = patterns->head; cell; cell = cell->next)
1320  {
1321  appendPQExpBuffer(query,
1322  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1323  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1324  false, NULL, "n.nspname", NULL, NULL);
1325 
1326  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1327  if (strict_names && PQntuples(res) == 0)
1328  exit_horribly(NULL, "no matching schemas were found for pattern \"%s\"\n", cell->val);
1329 
1330  for (i = 0; i < PQntuples(res); i++)
1331  {
1332  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1333  }
1334 
1335  PQclear(res);
1336  resetPQExpBuffer(query);
1337  }
1338 
1339  destroyPQExpBuffer(query);
1340 }
1341 
1342 /*
1343  * Find the OIDs of all tables matching the given list of patterns,
1344  * and append them to the given OID list. See also expand_dbname_patterns()
1345  * in pg_dumpall.c
1346  */
1347 static void
1349  SimpleStringList *patterns, SimpleOidList *oids,
1350  bool strict_names)
1351 {
1352  PQExpBuffer query;
1353  PGresult *res;
1354  SimpleStringListCell *cell;
1355  int i;
1356 
1357  if (patterns->head == NULL)
1358  return; /* nothing to do */
1359 
1360  query = createPQExpBuffer();
1361 
1362  /*
1363  * this might sometimes result in duplicate entries in the OID list, but
1364  * we don't care.
1365  */
1366 
1367  for (cell = patterns->head; cell; cell = cell->next)
1368  {
1369  /*
1370  * Query must remain ABSOLUTELY devoid of unqualified names. This
1371  * would be unnecessary given a pg_table_is_visible() variant taking a
1372  * search_path argument.
1373  */
1374  appendPQExpBuffer(query,
1375  "SELECT c.oid"
1376  "\nFROM pg_catalog.pg_class c"
1377  "\n LEFT JOIN pg_catalog.pg_namespace n"
1378  "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1379  "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1380  "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1381  RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
1382  RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
1383  RELKIND_PARTITIONED_TABLE);
1384  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1385  false, "n.nspname", "c.relname", NULL,
1386  "pg_catalog.pg_table_is_visible(c.oid)");
1387 
1388  ExecuteSqlStatement(fout, "RESET search_path");
1389  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1392  if (strict_names && PQntuples(res) == 0)
1393  exit_horribly(NULL, "no matching tables were found for pattern \"%s\"\n", cell->val);
1394 
1395  for (i = 0; i < PQntuples(res); i++)
1396  {
1397  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1398  }
1399 
1400  PQclear(res);
1401  resetPQExpBuffer(query);
1402  }
1403 
1404  destroyPQExpBuffer(query);
1405 }
1406 
1407 /*
1408  * checkExtensionMembership
1409  * Determine whether object is an extension member, and if so,
1410  * record an appropriate dependency and set the object's dump flag.
1411  *
1412  * It's important to call this for each object that could be an extension
1413  * member. Generally, we integrate this with determining the object's
1414  * to-be-dumped-ness, since extension membership overrides other rules for that.
1415  *
1416  * Returns true if object is an extension member, else false.
1417  */
1418 static bool
1420 {
1421  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1422 
1423  if (ext == NULL)
1424  return false;
1425 
1426  dobj->ext_member = true;
1427 
1428  /* Record dependency so that getDependencies needn't deal with that */
1429  addObjectDependency(dobj, ext->dobj.dumpId);
1430 
1431  /*
1432  * In 9.6 and above, mark the member object to have any non-initial ACL,
1433  * policies, and security labels dumped.
1434  *
1435  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1436  * extract the information about the object. We don't provide support for
1437  * initial policies and security labels and it seems unlikely for those to
1438  * ever exist, but we may have to revisit this later.
1439  *
1440  * Prior to 9.6, we do not include any extension member components.
1441  *
1442  * In binary upgrades, we still dump all components of the members
1443  * individually, since the idea is to exactly reproduce the database
1444  * contents rather than replace the extension contents with something
1445  * different.
1446  */
1447  if (fout->dopt->binary_upgrade)
1448  dobj->dump = ext->dobj.dump;
1449  else
1450  {
1451  if (fout->remoteVersion < 90600)
1452  dobj->dump = DUMP_COMPONENT_NONE;
1453  else
1454  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1457  }
1458 
1459  return true;
1460 }
1461 
1462 /*
1463  * selectDumpableNamespace: policy-setting subroutine
1464  * Mark a namespace as to be dumped or not
1465  */
1466 static void
1468 {
1469  /*
1470  * If specific tables are being dumped, do not dump any complete
1471  * namespaces. If specific namespaces are being dumped, dump just those
1472  * namespaces. Otherwise, dump all non-system namespaces.
1473  */
1474  if (table_include_oids.head != NULL)
1475  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1476  else if (schema_include_oids.head != NULL)
1477  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1478  simple_oid_list_member(&schema_include_oids,
1479  nsinfo->dobj.catId.oid) ?
1481  else if (fout->remoteVersion >= 90600 &&
1482  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1483  {
1484  /*
1485  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1486  * they are interesting (and not the original ACLs which were set at
1487  * initdb time, see pg_init_privs).
1488  */
1489  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1490  }
1491  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1492  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1493  {
1494  /* Other system schemas don't get dumped */
1495  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1496  }
1497  else if (strcmp(nsinfo->dobj.name, "public") == 0)
1498  {
1499  /*
1500  * The public schema is a strange beast that sits in a sort of
1501  * no-mans-land between being a system object and a user object. We
1502  * don't want to dump creation or comment commands for it, because
1503  * that complicates matters for non-superuser use of pg_dump. But we
1504  * should dump any ACL changes that have occurred for it, and of
1505  * course we should dump contained objects.
1506  */
1507  nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1509  }
1510  else
1511  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1512 
1513  /*
1514  * In any case, a namespace can be excluded by an exclusion switch
1515  */
1516  if (nsinfo->dobj.dump_contains &&
1517  simple_oid_list_member(&schema_exclude_oids,
1518  nsinfo->dobj.catId.oid))
1519  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1520 
1521  /*
1522  * If the schema belongs to an extension, allow extension membership to
1523  * override the dump decision for the schema itself. However, this does
1524  * not change dump_contains, so this won't change what we do with objects
1525  * within the schema. (If they belong to the extension, they'll get
1526  * suppressed by it, otherwise not.)
1527  */
1528  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1529 }
1530 
1531 /*
1532  * selectDumpableTable: policy-setting subroutine
1533  * Mark a table as to be dumped or not
1534  */
1535 static void
1537 {
1538  if (checkExtensionMembership(&tbinfo->dobj, fout))
1539  return; /* extension membership overrides all else */
1540 
1541  /*
1542  * If specific tables are being dumped, dump just those tables; else, dump
1543  * according to the parent namespace's dump flag.
1544  */
1545  if (table_include_oids.head != NULL)
1546  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1547  tbinfo->dobj.catId.oid) ?
1549  else
1550  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1551 
1552  /*
1553  * In any case, a table can be excluded by an exclusion switch
1554  */
1555  if (tbinfo->dobj.dump &&
1556  simple_oid_list_member(&table_exclude_oids,
1557  tbinfo->dobj.catId.oid))
1558  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1559 }
1560 
1561 /*
1562  * selectDumpableType: policy-setting subroutine
1563  * Mark a type as to be dumped or not
1564  *
1565  * If it's a table's rowtype or an autogenerated array type, we also apply a
1566  * special type code to facilitate sorting into the desired order. (We don't
1567  * want to consider those to be ordinary types because that would bring tables
1568  * up into the datatype part of the dump order.) We still set the object's
1569  * dump flag; that's not going to cause the dummy type to be dumped, but we
1570  * need it so that casts involving such types will be dumped correctly -- see
1571  * dumpCast. This means the flag should be set the same as for the underlying
1572  * object (the table or base type).
1573  */
1574 static void
1576 {
1577  /* skip complex types, except for standalone composite types */
1578  if (OidIsValid(tyinfo->typrelid) &&
1579  tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
1580  {
1581  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1582 
1583  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1584  if (tytable != NULL)
1585  tyinfo->dobj.dump = tytable->dobj.dump;
1586  else
1587  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1588  return;
1589  }
1590 
1591  /* skip auto-generated array types */
1592  if (tyinfo->isArray)
1593  {
1594  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1595 
1596  /*
1597  * Fall through to set the dump flag; we assume that the subsequent
1598  * rules will do the same thing as they would for the array's base
1599  * type. (We cannot reliably look up the base type here, since
1600  * getTypes may not have processed it yet.)
1601  */
1602  }
1603 
1604  if (checkExtensionMembership(&tyinfo->dobj, fout))
1605  return; /* extension membership overrides all else */
1606 
1607  /* Dump based on if the contents of the namespace are being dumped */
1608  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1609 }
1610 
1611 /*
1612  * selectDumpableDefaultACL: policy-setting subroutine
1613  * Mark a default ACL as to be dumped or not
1614  *
1615  * For per-schema default ACLs, dump if the schema is to be dumped.
1616  * Otherwise dump if we are dumping "everything". Note that dataOnly
1617  * and aclsSkip are checked separately.
1618  */
1619 static void
1621 {
1622  /* Default ACLs can't be extension members */
1623 
1624  if (dinfo->dobj.namespace)
1625  /* default ACLs are considered part of the namespace */
1626  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1627  else
1628  dinfo->dobj.dump = dopt->include_everything ?
1630 }
1631 
1632 /*
1633  * selectDumpableCast: policy-setting subroutine
1634  * Mark a cast as to be dumped or not
1635  *
1636  * Casts do not belong to any particular namespace (since they haven't got
1637  * names), nor do they have identifiable owners. To distinguish user-defined
1638  * casts from built-in ones, we must resort to checking whether the cast's
1639  * OID is in the range reserved for initdb.
1640  */
1641 static void
1643 {
1644  if (checkExtensionMembership(&cast->dobj, fout))
1645  return; /* extension membership overrides all else */
1646 
1647  /*
1648  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1649  * support ACLs currently.
1650  */
1651  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1652  cast->dobj.dump = DUMP_COMPONENT_NONE;
1653  else
1654  cast->dobj.dump = fout->dopt->include_everything ?
1656 }
1657 
1658 /*
1659  * selectDumpableProcLang: policy-setting subroutine
1660  * Mark a procedural language as to be dumped or not
1661  *
1662  * Procedural languages do not belong to any particular namespace. To
1663  * identify built-in languages, we must resort to checking whether the
1664  * language's OID is in the range reserved for initdb.
1665  */
1666 static void
1668 {
1669  if (checkExtensionMembership(&plang->dobj, fout))
1670  return; /* extension membership overrides all else */
1671 
1672  /*
1673  * Only include procedural languages when we are dumping everything.
1674  *
1675  * For from-initdb procedural languages, only include ACLs, as we do for
1676  * the pg_catalog namespace. We need this because procedural languages do
1677  * not live in any namespace.
1678  */
1679  if (!fout->dopt->include_everything)
1680  plang->dobj.dump = DUMP_COMPONENT_NONE;
1681  else
1682  {
1683  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1684  plang->dobj.dump = fout->remoteVersion < 90600 ?
1686  else
1687  plang->dobj.dump = DUMP_COMPONENT_ALL;
1688  }
1689 }
1690 
1691 /*
1692  * selectDumpableAccessMethod: policy-setting subroutine
1693  * Mark an access method as to be dumped or not
1694  *
1695  * Access methods do not belong to any particular namespace. To identify
1696  * built-in access methods, we must resort to checking whether the
1697  * method's OID is in the range reserved for initdb.
1698  */
1699 static void
1701 {
1702  if (checkExtensionMembership(&method->dobj, fout))
1703  return; /* extension membership overrides all else */
1704 
1705  /*
1706  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1707  * they do not support ACLs currently.
1708  */
1709  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1710  method->dobj.dump = DUMP_COMPONENT_NONE;
1711  else
1712  method->dobj.dump = fout->dopt->include_everything ?
1714 }
1715 
1716 /*
1717  * selectDumpableExtension: policy-setting subroutine
1718  * Mark an extension as to be dumped or not
1719  *
1720  * Built-in extensions should be skipped except for checking ACLs, since we
1721  * assume those will already be installed in the target database. We identify
1722  * such extensions by their having OIDs in the range reserved for initdb.
1723  * We dump all user-added extensions by default, or none of them if
1724  * include_everything is false (i.e., a --schema or --table switch was given).
1725  */
1726 static void
1728 {
1729  /*
1730  * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
1731  * change permissions on their member objects, if they wish to, and have
1732  * those changes preserved.
1733  */
1734  if (extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1735  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1736  else
1737  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1740 }
1741 
1742 /*
1743  * selectDumpablePublicationTable: policy-setting subroutine
1744  * Mark a publication table as to be dumped or not
1745  *
1746  * Publication tables have schemas, but those are ignored in decision making,
1747  * because publications are only dumped when we are dumping everything.
1748  */
1749 static void
1751 {
1752  if (checkExtensionMembership(dobj, fout))
1753  return; /* extension membership overrides all else */
1754 
1755  dobj->dump = fout->dopt->include_everything ?
1757 }
1758 
1759 /*
1760  * selectDumpableObject: policy-setting subroutine
1761  * Mark a generic dumpable object as to be dumped or not
1762  *
1763  * Use this only for object types without a special-case routine above.
1764  */
1765 static void
1767 {
1768  if (checkExtensionMembership(dobj, fout))
1769  return; /* extension membership overrides all else */
1770 
1771  /*
1772  * Default policy is to dump if parent namespace is dumpable, or for
1773  * non-namespace-associated items, dump if we're dumping "everything".
1774  */
1775  if (dobj->namespace)
1776  dobj->dump = dobj->namespace->dobj.dump_contains;
1777  else
1778  dobj->dump = fout->dopt->include_everything ?
1780 }
1781 
1782 /*
1783  * Dump a table's contents for loading using the COPY command
1784  * - this routine is called by the Archiver when it wants the table
1785  * to be dumped.
1786  */
1787 
1788 static int
1789 dumpTableData_copy(Archive *fout, void *dcontext)
1790 {
1791  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1792  TableInfo *tbinfo = tdinfo->tdtable;
1793  const char *classname = tbinfo->dobj.name;
1795 
1796  /*
1797  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1798  * which uses it already.
1799  */
1800  PQExpBuffer clistBuf = createPQExpBuffer();
1801  PGconn *conn = GetConnection(fout);
1802  PGresult *res;
1803  int ret;
1804  char *copybuf;
1805  const char *column_list;
1806 
1807  if (g_verbose)
1808  write_msg(NULL, "dumping contents of table \"%s.%s\"\n",
1809  tbinfo->dobj.namespace->dobj.name, classname);
1810 
1811  /*
1812  * Specify the column list explicitly so that we have no possibility of
1813  * retrieving data in the wrong column order. (The default column
1814  * ordering of COPY will not be what we want in certain corner cases
1815  * involving ADD COLUMN and inheritance.)
1816  */
1817  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1818 
1819  if (tdinfo->filtercond)
1820  {
1821  /* Note: this syntax is only supported in 8.2 and up */
1822  appendPQExpBufferStr(q, "COPY (SELECT ");
1823  /* klugery to get rid of parens in column list */
1824  if (strlen(column_list) > 2)
1825  {
1826  appendPQExpBufferStr(q, column_list + 1);
1827  q->data[q->len - 1] = ' ';
1828  }
1829  else
1830  appendPQExpBufferStr(q, "* ");
1831  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1832  fmtQualifiedDumpable(tbinfo),
1833  tdinfo->filtercond);
1834  }
1835  else
1836  {
1837  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1838  fmtQualifiedDumpable(tbinfo),
1839  column_list);
1840  }
1841  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1842  PQclear(res);
1843  destroyPQExpBuffer(clistBuf);
1844 
1845  for (;;)
1846  {
1847  ret = PQgetCopyData(conn, &copybuf, 0);
1848 
1849  if (ret < 0)
1850  break; /* done or error */
1851 
1852  if (copybuf)
1853  {
1854  WriteData(fout, copybuf, ret);
1855  PQfreemem(copybuf);
1856  }
1857 
1858  /* ----------
1859  * THROTTLE:
1860  *
1861  * There was considerable discussion in late July, 2000 regarding
1862  * slowing down pg_dump when backing up large tables. Users with both
1863  * slow & fast (multi-processor) machines experienced performance
1864  * degradation when doing a backup.
1865  *
1866  * Initial attempts based on sleeping for a number of ms for each ms
1867  * of work were deemed too complex, then a simple 'sleep in each loop'
1868  * implementation was suggested. The latter failed because the loop
1869  * was too tight. Finally, the following was implemented:
1870  *
1871  * If throttle is non-zero, then
1872  * See how long since the last sleep.
1873  * Work out how long to sleep (based on ratio).
1874  * If sleep is more than 100ms, then
1875  * sleep
1876  * reset timer
1877  * EndIf
1878  * EndIf
1879  *
1880  * where the throttle value was the number of ms to sleep per ms of
1881  * work. The calculation was done in each loop.
1882  *
1883  * Most of the hard work is done in the backend, and this solution
1884  * still did not work particularly well: on slow machines, the ratio
1885  * was 50:1, and on medium paced machines, 1:1, and on fast
1886  * multi-processor machines, it had little or no effect, for reasons
1887  * that were unclear.
1888  *
1889  * Further discussion ensued, and the proposal was dropped.
1890  *
1891  * For those people who want this feature, it can be implemented using
1892  * gettimeofday in each loop, calculating the time since last sleep,
1893  * multiplying that by the sleep ratio, then if the result is more
1894  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1895  * function to sleep for a subsecond period ie.
1896  *
1897  * select(0, NULL, NULL, NULL, &tvi);
1898  *
1899  * This will return after the interval specified in the structure tvi.
1900  * Finally, call gettimeofday again to save the 'last sleep time'.
1901  * ----------
1902  */
1903  }
1904  archprintf(fout, "\\.\n\n\n");
1905 
1906  if (ret == -2)
1907  {
1908  /* copy data transfer failed */
1909  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.\n", classname);
1910  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1911  write_msg(NULL, "The command was: %s\n", q->data);
1912  exit_nicely(1);
1913  }
1914 
1915  /* Check command status and return to normal libpq state */
1916  res = PQgetResult(conn);
1917  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1918  {
1919  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetResult() failed.\n", classname);
1920  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1921  write_msg(NULL, "The command was: %s\n", q->data);
1922  exit_nicely(1);
1923  }
1924  PQclear(res);
1925 
1926  /* Do this to ensure we've pumped libpq back to idle state */
1927  if (PQgetResult(conn) != NULL)
1928  write_msg(NULL, "WARNING: unexpected extra results during COPY of table \"%s\"\n",
1929  classname);
1930 
1931  destroyPQExpBuffer(q);
1932  return 1;
1933 }
1934 
1935 /*
1936  * Dump table data using INSERT commands.
1937  *
1938  * Caution: when we restore from an archive file direct to database, the
1939  * INSERT commands emitted by this function have to be parsed by
1940  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
1941  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
1942  */
1943 static int
1944 dumpTableData_insert(Archive *fout, void *dcontext)
1945 {
1946  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1947  TableInfo *tbinfo = tdinfo->tdtable;
1948  DumpOptions *dopt = fout->dopt;
1950  PQExpBuffer insertStmt = NULL;
1951  PGresult *res;
1952  int nfields;
1953  int rows_per_statement = dopt->dump_inserts;
1954  int rows_this_statement = 0;
1955 
1956  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
1957  "SELECT * FROM ONLY %s",
1958  fmtQualifiedDumpable(tbinfo));
1959  if (tdinfo->filtercond)
1960  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
1961 
1962  ExecuteSqlStatement(fout, q->data);
1963 
1964  while (1)
1965  {
1966  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
1967  PGRES_TUPLES_OK);
1968  nfields = PQnfields(res);
1969 
1970  /*
1971  * First time through, we build as much of the INSERT statement as
1972  * possible in "insertStmt", which we can then just print for each
1973  * statement. If the table happens to have zero columns then this will
1974  * be a complete statement, otherwise it will end in "VALUES" and be
1975  * ready to have the row's column values printed.
1976  */
1977  if (insertStmt == NULL)
1978  {
1979  TableInfo *targettab;
1980 
1981  insertStmt = createPQExpBuffer();
1982 
1983  /*
1984  * When load-via-partition-root is set, get the root table name
1985  * for the partition table, so that we can reload data through the
1986  * root table.
1987  */
1988  if (dopt->load_via_partition_root && tbinfo->ispartition)
1989  targettab = getRootTableInfo(tbinfo);
1990  else
1991  targettab = tbinfo;
1992 
1993  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
1994  fmtQualifiedDumpable(targettab));
1995 
1996  /* corner case for zero-column table */
1997  if (nfields == 0)
1998  {
1999  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
2000  }
2001  else
2002  {
2003  /* append the list of column names if required */
2004  if (dopt->column_inserts)
2005  {
2006  appendPQExpBufferChar(insertStmt, '(');
2007  for (int field = 0; field < nfields; field++)
2008  {
2009  if (field > 0)
2010  appendPQExpBufferStr(insertStmt, ", ");
2011  appendPQExpBufferStr(insertStmt,
2012  fmtId(PQfname(res, field)));
2013  }
2014  appendPQExpBufferStr(insertStmt, ") ");
2015  }
2016 
2017  if (tbinfo->needs_override)
2018  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
2019 
2020  appendPQExpBufferStr(insertStmt, "VALUES");
2021  }
2022  }
2023 
2024  for (int tuple = 0; tuple < PQntuples(res); tuple++)
2025  {
2026  /* Write the INSERT if not in the middle of a multi-row INSERT. */
2027  if (rows_this_statement == 0)
2028  archputs(insertStmt->data, fout);
2029 
2030  /*
2031  * If it is zero-column table then we've aleady written the
2032  * complete statement, which will mean we've disobeyed
2033  * --rows-per-insert when it's set greater than 1. We do support
2034  * a way to make this multi-row with: SELECT UNION ALL SELECT
2035  * UNION ALL ... but that's non-standard so we should avoid it
2036  * given that using INSERTs is mostly only ever needed for
2037  * cross-database exports.
2038  */
2039  if (nfields == 0)
2040  continue;
2041 
2042  /* Emit a row heading */
2043  if (rows_per_statement == 1)
2044  archputs(" (", fout);
2045  else if (rows_this_statement > 0)
2046  archputs(",\n\t(", fout);
2047  else
2048  archputs("\n\t(", fout);
2049 
2050  for (int field = 0; field < nfields; field++)
2051  {
2052  if (field > 0)
2053  archputs(", ", fout);
2054  if (PQgetisnull(res, tuple, field))
2055  {
2056  archputs("NULL", fout);
2057  continue;
2058  }
2059 
2060  /* XXX This code is partially duplicated in ruleutils.c */
2061  switch (PQftype(res, field))
2062  {
2063  case INT2OID:
2064  case INT4OID:
2065  case INT8OID:
2066  case OIDOID:
2067  case FLOAT4OID:
2068  case FLOAT8OID:
2069  case NUMERICOID:
2070  {
2071  /*
2072  * These types are printed without quotes unless
2073  * they contain values that aren't accepted by the
2074  * scanner unquoted (e.g., 'NaN'). Note that
2075  * strtod() and friends might accept NaN, so we
2076  * can't use that to test.
2077  *
2078  * In reality we only need to defend against
2079  * infinity and NaN, so we need not get too crazy
2080  * about pattern matching here.
2081  */
2082  const char *s = PQgetvalue(res, tuple, field);
2083 
2084  if (strspn(s, "0123456789 +-eE.") == strlen(s))
2085  archputs(s, fout);
2086  else
2087  archprintf(fout, "'%s'", s);
2088  }
2089  break;
2090 
2091  case BITOID:
2092  case VARBITOID:
2093  archprintf(fout, "B'%s'",
2094  PQgetvalue(res, tuple, field));
2095  break;
2096 
2097  case BOOLOID:
2098  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2099  archputs("true", fout);
2100  else
2101  archputs("false", fout);
2102  break;
2103 
2104  default:
2105  /* All other types are printed as string literals. */
2106  resetPQExpBuffer(q);
2108  PQgetvalue(res, tuple, field),
2109  fout);
2110  archputs(q->data, fout);
2111  break;
2112  }
2113  }
2114 
2115  /* Terminate the row ... */
2116  archputs(")", fout);
2117 
2118  /* ... and the statement, if the target no. of rows is reached */
2119  if (++rows_this_statement >= rows_per_statement)
2120  {
2121  if (dopt->do_nothing)
2122  archputs(" ON CONFLICT DO NOTHING;\n", fout);
2123  else
2124  archputs(";\n", fout);
2125  /* Reset the row counter */
2126  rows_this_statement = 0;
2127  }
2128  }
2129 
2130  if (PQntuples(res) <= 0)
2131  {
2132  PQclear(res);
2133  break;
2134  }
2135  PQclear(res);
2136  }
2137 
2138  /* Terminate any statements that didn't make the row count. */
2139  if (rows_this_statement > 0)
2140  {
2141  if (dopt->do_nothing)
2142  archputs(" ON CONFLICT DO NOTHING;\n", fout);
2143  else
2144  archputs(";\n", fout);
2145  }
2146 
2147  archputs("\n\n", fout);
2148 
2149  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2150 
2151  destroyPQExpBuffer(q);
2152  if (insertStmt != NULL)
2153  destroyPQExpBuffer(insertStmt);
2154 
2155  return 1;
2156 }
2157 
2158 /*
2159  * getRootTableInfo:
2160  * get the root TableInfo for the given partition table.
2161  */
2162 static TableInfo *
2164 {
2165  TableInfo *parentTbinfo;
2166 
2167  Assert(tbinfo->ispartition);
2168  Assert(tbinfo->numParents == 1);
2169 
2170  parentTbinfo = tbinfo->parents[0];
2171  while (parentTbinfo->ispartition)
2172  {
2173  Assert(parentTbinfo->numParents == 1);
2174  parentTbinfo = parentTbinfo->parents[0];
2175  }
2176 
2177  return parentTbinfo;
2178 }
2179 
2180 /*
2181  * dumpTableData -
2182  * dump the contents of a single table
2183  *
2184  * Actually, this just makes an ArchiveEntry for the table contents.
2185  */
2186 static void
2188 {
2189  DumpOptions *dopt = fout->dopt;
2190  TableInfo *tbinfo = tdinfo->tdtable;
2191  PQExpBuffer copyBuf = createPQExpBuffer();
2192  PQExpBuffer clistBuf = createPQExpBuffer();
2193  DataDumperPtr dumpFn;
2194  char *copyStmt;
2195  const char *copyFrom;
2196 
2197  if (!dopt->dump_inserts)
2198  {
2199  /* Dump/restore using COPY */
2200  dumpFn = dumpTableData_copy;
2201 
2202  /*
2203  * When load-via-partition-root is set, get the root table name for
2204  * the partition table, so that we can reload data through the root
2205  * table.
2206  */
2207  if (dopt->load_via_partition_root && tbinfo->ispartition)
2208  {
2209  TableInfo *parentTbinfo;
2210 
2211  parentTbinfo = getRootTableInfo(tbinfo);
2212  copyFrom = fmtQualifiedDumpable(parentTbinfo);
2213  }
2214  else
2215  copyFrom = fmtQualifiedDumpable(tbinfo);
2216 
2217  /* must use 2 steps here 'cause fmtId is nonreentrant */
2218  appendPQExpBuffer(copyBuf, "COPY %s ",
2219  copyFrom);
2220  appendPQExpBuffer(copyBuf, "%s FROM stdin;\n",
2221  fmtCopyColumnList(tbinfo, clistBuf));
2222  copyStmt = copyBuf->data;
2223  }
2224  else
2225  {
2226  /* Restore using INSERT */
2227  dumpFn = dumpTableData_insert;
2228  copyStmt = NULL;
2229  }
2230 
2231  /*
2232  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2233  * dependency on its table as "special" and pass it to ArchiveEntry now.
2234  * See comments for BuildArchiveDependencies.
2235  */
2236  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2237  {
2238  TocEntry *te;
2239 
2240  te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2241  ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2242  .namespace = tbinfo->dobj.namespace->dobj.name,
2243  .owner = tbinfo->rolname,
2244  .description = "TABLE DATA",
2245  .section = SECTION_DATA,
2246  .createStmt = "",
2247  .dropStmt = "",
2248  .copyStmt = copyStmt,
2249  .deps = &(tbinfo->dobj.dumpId),
2250  .nDeps = 1,
2251  .dumpFn = dumpFn,
2252  .dumpArg = tdinfo));
2253 
2254  /*
2255  * Set the TocEntry's dataLength in case we are doing a parallel dump
2256  * and want to order dump jobs by table size. We choose to measure
2257  * dataLength in table pages during dump, so no scaling is needed.
2258  * However, relpages is declared as "integer" in pg_class, and hence
2259  * also in TableInfo, but it's really BlockNumber a/k/a unsigned int.
2260  * Cast so that we get the right interpretation of table sizes
2261  * exceeding INT_MAX pages.
2262  */
2263  te->dataLength = (BlockNumber) tbinfo->relpages;
2264  }
2265 
2266  destroyPQExpBuffer(copyBuf);
2267  destroyPQExpBuffer(clistBuf);
2268 }
2269 
2270 /*
2271  * refreshMatViewData -
2272  * load or refresh the contents of a single materialized view
2273  *
2274  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2275  * statement.
2276  */
2277 static void
2279 {
2280  TableInfo *tbinfo = tdinfo->tdtable;
2281  PQExpBuffer q;
2282 
2283  /* If the materialized view is not flagged as populated, skip this. */
2284  if (!tbinfo->relispopulated)
2285  return;
2286 
2287  q = createPQExpBuffer();
2288 
2289  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2290  fmtQualifiedDumpable(tbinfo));
2291 
2292  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2293  ArchiveEntry(fout,
2294  tdinfo->dobj.catId, /* catalog ID */
2295  tdinfo->dobj.dumpId, /* dump ID */
2296  ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2297  .namespace = tbinfo->dobj.namespace->dobj.name,
2298  .owner = tbinfo->rolname,
2299  .description = "MATERIALIZED VIEW DATA",
2300  .section = SECTION_POST_DATA,
2301  .createStmt = q->data,
2302  .dropStmt = "",
2303  .deps = tdinfo->dobj.dependencies,
2304  .nDeps = tdinfo->dobj.nDeps));
2305 
2306  destroyPQExpBuffer(q);
2307 }
2308 
2309 /*
2310  * getTableData -
2311  * set up dumpable objects representing the contents of tables
2312  */
2313 static void
2315 {
2316  int i;
2317 
2318  for (i = 0; i < numTables; i++)
2319  {
2320  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2321  (!relkind || tblinfo[i].relkind == relkind))
2322  makeTableDataInfo(dopt, &(tblinfo[i]));
2323  }
2324 }
2325 
2326 /*
2327  * Make a dumpable object for the data of this specific table
2328  *
2329  * Note: we make a TableDataInfo if and only if we are going to dump the
2330  * table data; the "dump" flag in such objects isn't used.
2331  */
2332 static void
2334 {
2335  TableDataInfo *tdinfo;
2336 
2337  /*
2338  * Nothing to do if we already decided to dump the table. This will
2339  * happen for "config" tables.
2340  */
2341  if (tbinfo->dataObj != NULL)
2342  return;
2343 
2344  /* Skip VIEWs (no data to dump) */
2345  if (tbinfo->relkind == RELKIND_VIEW)
2346  return;
2347  /* Skip FOREIGN TABLEs (no data to dump) */
2348  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2349  return;
2350  /* Skip partitioned tables (data in partitions) */
2351  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2352  return;
2353 
2354  /* Don't dump data in unlogged tables, if so requested */
2355  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2356  dopt->no_unlogged_table_data)
2357  return;
2358 
2359  /* Check that the data is not explicitly excluded */
2360  if (simple_oid_list_member(&tabledata_exclude_oids,
2361  tbinfo->dobj.catId.oid))
2362  return;
2363 
2364  /* OK, let's dump it */
2365  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2366 
2367  if (tbinfo->relkind == RELKIND_MATVIEW)
2368  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2369  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2370  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2371  else
2372  tdinfo->dobj.objType = DO_TABLE_DATA;
2373 
2374  /*
2375  * Note: use tableoid 0 so that this object won't be mistaken for
2376  * something that pg_depend entries apply to.
2377  */
2378  tdinfo->dobj.catId.tableoid = 0;
2379  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2380  AssignDumpId(&tdinfo->dobj);
2381  tdinfo->dobj.name = tbinfo->dobj.name;
2382  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2383  tdinfo->tdtable = tbinfo;
2384  tdinfo->filtercond = NULL; /* might get set later */
2385  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2386 
2387  tbinfo->dataObj = tdinfo;
2388 }
2389 
2390 /*
2391  * The refresh for a materialized view must be dependent on the refresh for
2392  * any materialized view that this one is dependent on.
2393  *
2394  * This must be called after all the objects are created, but before they are
2395  * sorted.
2396  */
2397 static void
2399 {
2400  PQExpBuffer query;
2401  PGresult *res;
2402  int ntups,
2403  i;
2404  int i_classid,
2405  i_objid,
2406  i_refobjid;
2407 
2408  /* No Mat Views before 9.3. */
2409  if (fout->remoteVersion < 90300)
2410  return;
2411 
2412  query = createPQExpBuffer();
2413 
2414  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2415  "( "
2416  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2417  "FROM pg_depend d1 "
2418  "JOIN pg_class c1 ON c1.oid = d1.objid "
2419  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2420  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2421  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2422  "AND d2.objid = r1.oid "
2423  "AND d2.refobjid <> d1.objid "
2424  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2425  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2426  CppAsString2(RELKIND_VIEW) ") "
2427  "WHERE d1.classid = 'pg_class'::regclass "
2428  "UNION "
2429  "SELECT w.objid, d3.refobjid, c3.relkind "
2430  "FROM w "
2431  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2432  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2433  "AND d3.objid = r3.oid "
2434  "AND d3.refobjid <> w.refobjid "
2435  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2436  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2437  CppAsString2(RELKIND_VIEW) ") "
2438  ") "
2439  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2440  "FROM w "
2441  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2442 
2443  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2444 
2445  ntups = PQntuples(res);
2446 
2447  i_classid = PQfnumber(res, "classid");
2448  i_objid = PQfnumber(res, "objid");
2449  i_refobjid = PQfnumber(res, "refobjid");
2450 
2451  for (i = 0; i < ntups; i++)
2452  {
2453  CatalogId objId;
2454  CatalogId refobjId;
2455  DumpableObject *dobj;
2456  DumpableObject *refdobj;
2457  TableInfo *tbinfo;
2458  TableInfo *reftbinfo;
2459 
2460  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2461  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2462  refobjId.tableoid = objId.tableoid;
2463  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2464 
2465  dobj = findObjectByCatalogId(objId);
2466  if (dobj == NULL)
2467  continue;
2468 
2469  Assert(dobj->objType == DO_TABLE);
2470  tbinfo = (TableInfo *) dobj;
2471  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2472  dobj = (DumpableObject *) tbinfo->dataObj;
2473  if (dobj == NULL)
2474  continue;
2475  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2476 
2477  refdobj = findObjectByCatalogId(refobjId);
2478  if (refdobj == NULL)
2479  continue;
2480 
2481  Assert(refdobj->objType == DO_TABLE);
2482  reftbinfo = (TableInfo *) refdobj;
2483  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2484  refdobj = (DumpableObject *) reftbinfo->dataObj;
2485  if (refdobj == NULL)
2486  continue;
2487  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2488 
2489  addObjectDependency(dobj, refdobj->dumpId);
2490 
2491  if (!reftbinfo->relispopulated)
2492  tbinfo->relispopulated = false;
2493  }
2494 
2495  PQclear(res);
2496 
2497  destroyPQExpBuffer(query);
2498 }
2499 
2500 /*
2501  * getTableDataFKConstraints -
2502  * add dump-order dependencies reflecting foreign key constraints
2503  *
2504  * This code is executed only in a data-only dump --- in schema+data dumps
2505  * we handle foreign key issues by not creating the FK constraints until
2506  * after the data is loaded. In a data-only dump, however, we want to
2507  * order the table data objects in such a way that a table's referenced
2508  * tables are restored first. (In the presence of circular references or
2509  * self-references this may be impossible; we'll detect and complain about
2510  * that during the dependency sorting step.)
2511  */
2512 static void
2514 {
2515  DumpableObject **dobjs;
2516  int numObjs;
2517  int i;
2518 
2519  /* Search through all the dumpable objects for FK constraints */
2520  getDumpableObjects(&dobjs, &numObjs);
2521  for (i = 0; i < numObjs; i++)
2522  {
2523  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2524  {
2525  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2526  TableInfo *ftable;
2527 
2528  /* Not interesting unless both tables are to be dumped */
2529  if (cinfo->contable == NULL ||
2530  cinfo->contable->dataObj == NULL)
2531  continue;
2532  ftable = findTableByOid(cinfo->confrelid);
2533  if (ftable == NULL ||
2534  ftable->dataObj == NULL)
2535  continue;
2536 
2537  /*
2538  * Okay, make referencing table's TABLE_DATA object depend on the
2539  * referenced table's TABLE_DATA object.
2540  */
2542  ftable->dataObj->dobj.dumpId);
2543  }
2544  }
2545  free(dobjs);
2546 }
2547 
2548 
2549 /*
2550  * guessConstraintInheritance:
2551  * In pre-8.4 databases, we can't tell for certain which constraints
2552  * are inherited. We assume a CHECK constraint is inherited if its name
2553  * matches the name of any constraint in the parent. Originally this code
2554  * tried to compare the expression texts, but that can fail for various
2555  * reasons --- for example, if the parent and child tables are in different
2556  * schemas, reverse-listing of function calls may produce different text
2557  * (schema-qualified or not) depending on search path.
2558  *
2559  * In 8.4 and up we can rely on the conislocal field to decide which
2560  * constraints must be dumped; much safer.
2561  *
2562  * This function assumes all conislocal flags were initialized to true.
2563  * It clears the flag on anything that seems to be inherited.
2564  */
2565 static void
2567 {
2568  int i,
2569  j,
2570  k;
2571 
2572  for (i = 0; i < numTables; i++)
2573  {
2574  TableInfo *tbinfo = &(tblinfo[i]);
2575  int numParents;
2576  TableInfo **parents;
2577  TableInfo *parent;
2578 
2579  /* Sequences and views never have parents */
2580  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2581  tbinfo->relkind == RELKIND_VIEW)
2582  continue;
2583 
2584  /* Don't bother computing anything for non-target tables, either */
2585  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2586  continue;
2587 
2588  numParents = tbinfo->numParents;
2589  parents = tbinfo->parents;
2590 
2591  if (numParents == 0)
2592  continue; /* nothing to see here, move along */
2593 
2594  /* scan for inherited CHECK constraints */
2595  for (j = 0; j < tbinfo->ncheck; j++)
2596  {
2597  ConstraintInfo *constr;
2598 
2599  constr = &(tbinfo->checkexprs[j]);
2600 
2601  for (k = 0; k < numParents; k++)
2602  {
2603  int l;
2604 
2605  parent = parents[k];
2606  for (l = 0; l < parent->ncheck; l++)
2607  {
2608  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2609 
2610  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2611  {
2612  constr->conislocal = false;
2613  break;
2614  }
2615  }
2616  if (!constr->conislocal)
2617  break;
2618  }
2619  }
2620  }
2621 }
2622 
2623 
2624 /*
2625  * dumpDatabase:
2626  * dump the database definition
2627  */
2628 static void
2630 {
2631  DumpOptions *dopt = fout->dopt;
2632  PQExpBuffer dbQry = createPQExpBuffer();
2633  PQExpBuffer delQry = createPQExpBuffer();
2634  PQExpBuffer creaQry = createPQExpBuffer();
2635  PQExpBuffer labelq = createPQExpBuffer();
2636  PGconn *conn = GetConnection(fout);
2637  PGresult *res;
2638  int i_tableoid,
2639  i_oid,
2640  i_datname,
2641  i_dba,
2642  i_encoding,
2643  i_collate,
2644  i_ctype,
2645  i_frozenxid,
2646  i_minmxid,
2647  i_datacl,
2648  i_rdatacl,
2649  i_datistemplate,
2650  i_datconnlimit,
2651  i_tablespace;
2652  CatalogId dbCatId;
2653  DumpId dbDumpId;
2654  const char *datname,
2655  *dba,
2656  *encoding,
2657  *collate,
2658  *ctype,
2659  *datacl,
2660  *rdatacl,
2661  *datistemplate,
2662  *datconnlimit,
2663  *tablespace;
2664  uint32 frozenxid,
2665  minmxid;
2666  char *qdatname;
2667 
2668  if (g_verbose)
2669  write_msg(NULL, "saving database definition\n");
2670 
2671  /* Fetch the database-level properties for this database */
2672  if (fout->remoteVersion >= 90600)
2673  {
2674  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2675  "(%s datdba) AS dba, "
2676  "pg_encoding_to_char(encoding) AS encoding, "
2677  "datcollate, datctype, datfrozenxid, datminmxid, "
2678  "(SELECT array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
2679  " SELECT unnest(coalesce(datacl,acldefault('d',datdba))) AS acl "
2680  " EXCEPT SELECT unnest(acldefault('d',datdba))) as datacls)"
2681  " AS datacl, "
2682  "(SELECT array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
2683  " SELECT unnest(acldefault('d',datdba)) AS acl "
2684  " EXCEPT SELECT unnest(coalesce(datacl,acldefault('d',datdba)))) as rdatacls)"
2685  " AS rdatacl, "
2686  "datistemplate, datconnlimit, "
2687  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2688  "shobj_description(oid, 'pg_database') AS description "
2689 
2690  "FROM pg_database "
2691  "WHERE datname = current_database()",
2693  }
2694  else if (fout->remoteVersion >= 90300)
2695  {
2696  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2697  "(%s datdba) AS dba, "
2698  "pg_encoding_to_char(encoding) AS encoding, "
2699  "datcollate, datctype, datfrozenxid, datminmxid, "
2700  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2701  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2702  "shobj_description(oid, 'pg_database') AS description "
2703 
2704  "FROM pg_database "
2705  "WHERE datname = current_database()",
2707  }
2708  else if (fout->remoteVersion >= 80400)
2709  {
2710  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2711  "(%s datdba) AS dba, "
2712  "pg_encoding_to_char(encoding) AS encoding, "
2713  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2714  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2715  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2716  "shobj_description(oid, 'pg_database') AS description "
2717 
2718  "FROM pg_database "
2719  "WHERE datname = current_database()",
2721  }
2722  else if (fout->remoteVersion >= 80200)
2723  {
2724  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2725  "(%s datdba) AS dba, "
2726  "pg_encoding_to_char(encoding) AS encoding, "
2727  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2728  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2729  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2730  "shobj_description(oid, 'pg_database') AS description "
2731 
2732  "FROM pg_database "
2733  "WHERE datname = current_database()",
2735  }
2736  else
2737  {
2738  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2739  "(%s datdba) AS dba, "
2740  "pg_encoding_to_char(encoding) AS encoding, "
2741  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2742  "datacl, '' as rdatacl, datistemplate, "
2743  "-1 as datconnlimit, "
2744  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2745  "FROM pg_database "
2746  "WHERE datname = current_database()",
2748  }
2749 
2750  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2751 
2752  i_tableoid = PQfnumber(res, "tableoid");
2753  i_oid = PQfnumber(res, "oid");
2754  i_datname = PQfnumber(res, "datname");
2755  i_dba = PQfnumber(res, "dba");
2756  i_encoding = PQfnumber(res, "encoding");
2757  i_collate = PQfnumber(res, "datcollate");
2758  i_ctype = PQfnumber(res, "datctype");
2759  i_frozenxid = PQfnumber(res, "datfrozenxid");
2760  i_minmxid = PQfnumber(res, "datminmxid");
2761  i_datacl = PQfnumber(res, "datacl");
2762  i_rdatacl = PQfnumber(res, "rdatacl");
2763  i_datistemplate = PQfnumber(res, "datistemplate");
2764  i_datconnlimit = PQfnumber(res, "datconnlimit");
2765  i_tablespace = PQfnumber(res, "tablespace");
2766 
2767  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2768  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2769  datname = PQgetvalue(res, 0, i_datname);
2770  dba = PQgetvalue(res, 0, i_dba);
2771  encoding = PQgetvalue(res, 0, i_encoding);
2772  collate = PQgetvalue(res, 0, i_collate);
2773  ctype = PQgetvalue(res, 0, i_ctype);
2774  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2775  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2776  datacl = PQgetvalue(res, 0, i_datacl);
2777  rdatacl = PQgetvalue(res, 0, i_rdatacl);
2778  datistemplate = PQgetvalue(res, 0, i_datistemplate);
2779  datconnlimit = PQgetvalue(res, 0, i_datconnlimit);
2780  tablespace = PQgetvalue(res, 0, i_tablespace);
2781 
2782  qdatname = pg_strdup(fmtId(datname));
2783 
2784  /*
2785  * Prepare the CREATE DATABASE command. We must specify encoding, locale,
2786  * and tablespace since those can't be altered later. Other DB properties
2787  * are left to the DATABASE PROPERTIES entry, so that they can be applied
2788  * after reconnecting to the target DB.
2789  */
2790  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2791  qdatname);
2792  if (strlen(encoding) > 0)
2793  {
2794  appendPQExpBufferStr(creaQry, " ENCODING = ");
2795  appendStringLiteralAH(creaQry, encoding, fout);
2796  }
2797  if (strlen(collate) > 0)
2798  {
2799  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2800  appendStringLiteralAH(creaQry, collate, fout);
2801  }
2802  if (strlen(ctype) > 0)
2803  {
2804  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2805  appendStringLiteralAH(creaQry, ctype, fout);
2806  }
2807 
2808  /*
2809  * Note: looking at dopt->outputNoTablespaces here is completely the wrong
2810  * thing; the decision whether to specify a tablespace should be left till
2811  * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
2812  * label the DATABASE entry with the tablespace and let the normal
2813  * tablespace selection logic work ... but CREATE DATABASE doesn't pay
2814  * attention to default_tablespace, so that won't work.
2815  */
2816  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2817  !dopt->outputNoTablespaces)
2818  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2819  fmtId(tablespace));
2820  appendPQExpBufferStr(creaQry, ";\n");
2821 
2822  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2823  qdatname);
2824 
2825  dbDumpId = createDumpId();
2826 
2827  ArchiveEntry(fout,
2828  dbCatId, /* catalog ID */
2829  dbDumpId, /* dump ID */
2830  ARCHIVE_OPTS(.tag = datname,
2831  .owner = dba,
2832  .description = "DATABASE",
2833  .section = SECTION_PRE_DATA,
2834  .createStmt = creaQry->data,
2835  .dropStmt = delQry->data));
2836 
2837  /* Compute correct tag for archive entry */
2838  appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
2839 
2840  /* Dump DB comment if any */
2841  if (fout->remoteVersion >= 80200)
2842  {
2843  /*
2844  * 8.2 and up keep comments on shared objects in a shared table, so we
2845  * cannot use the dumpComment() code used for other database objects.
2846  * Be careful that the ArchiveEntry parameters match that function.
2847  */
2848  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2849 
2850  if (comment && *comment && !dopt->no_comments)
2851  {
2852  resetPQExpBuffer(dbQry);
2853 
2854  /*
2855  * Generates warning when loaded into a differently-named
2856  * database.
2857  */
2858  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
2859  appendStringLiteralAH(dbQry, comment, fout);
2860  appendPQExpBufferStr(dbQry, ";\n");
2861 
2862  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2863  ARCHIVE_OPTS(.tag = labelq->data,
2864  .owner = dba,
2865  .description = "COMMENT",
2866  .section = SECTION_NONE,
2867  .createStmt = dbQry->data,
2868  .dropStmt = "",
2869  .deps = &dbDumpId,
2870  .nDeps = 1));
2871  }
2872  }
2873  else
2874  {
2875  dumpComment(fout, "DATABASE", qdatname, NULL, dba,
2876  dbCatId, 0, dbDumpId);
2877  }
2878 
2879  /* Dump DB security label, if enabled */
2880  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2881  {
2882  PGresult *shres;
2883  PQExpBuffer seclabelQry;
2884 
2885  seclabelQry = createPQExpBuffer();
2886 
2887  buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry);
2888  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2889  resetPQExpBuffer(seclabelQry);
2890  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2891  if (seclabelQry->len > 0)
2892  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2893  ARCHIVE_OPTS(.tag = labelq->data,
2894  .owner = dba,
2895  .description = "SECURITY LABEL",
2896  .section = SECTION_NONE,
2897  .createStmt = seclabelQry->data,
2898  .dropStmt = "",
2899  .deps = &dbDumpId,
2900  .nDeps = 1));
2901  destroyPQExpBuffer(seclabelQry);
2902  PQclear(shres);
2903  }
2904 
2905  /*
2906  * Dump ACL if any. Note that we do not support initial privileges
2907  * (pg_init_privs) on databases.
2908  */
2909  dumpACL(fout, dbCatId, dbDumpId, "DATABASE",
2910  qdatname, NULL, NULL,
2911  dba, datacl, rdatacl, "", "");
2912 
2913  /*
2914  * Now construct a DATABASE PROPERTIES archive entry to restore any
2915  * non-default database-level properties. (The reason this must be
2916  * separate is that we cannot put any additional commands into the TOC
2917  * entry that has CREATE DATABASE. pg_restore would execute such a group
2918  * in an implicit transaction block, and the backend won't allow CREATE
2919  * DATABASE in that context.)
2920  */
2921  resetPQExpBuffer(creaQry);
2922  resetPQExpBuffer(delQry);
2923 
2924  if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
2925  appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
2926  qdatname, datconnlimit);
2927 
2928  if (strcmp(datistemplate, "t") == 0)
2929  {
2930  appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
2931  qdatname);
2932 
2933  /*
2934  * The backend won't accept DROP DATABASE on a template database. We
2935  * can deal with that by removing the template marking before the DROP
2936  * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
2937  * since no such command is currently supported, fake it with a direct
2938  * UPDATE on pg_database.
2939  */
2940  appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
2941  "SET datistemplate = false WHERE datname = ");
2942  appendStringLiteralAH(delQry, datname, fout);
2943  appendPQExpBufferStr(delQry, ";\n");
2944  }
2945 
2946  /* Add database-specific SET options */
2947  dumpDatabaseConfig(fout, creaQry, datname, dbCatId.oid);
2948 
2949  /*
2950  * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
2951  * entry, too, for lack of a better place.
2952  */
2953  if (dopt->binary_upgrade)
2954  {
2955  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
2956  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
2957  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
2958  "WHERE datname = ",
2959  frozenxid, minmxid);
2960  appendStringLiteralAH(creaQry, datname, fout);
2961  appendPQExpBufferStr(creaQry, ";\n");
2962  }
2963 
2964  if (creaQry->len > 0)
2965  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2966  ARCHIVE_OPTS(.tag = datname,
2967  .owner = dba,
2968  .description = "DATABASE PROPERTIES",
2969  .section = SECTION_PRE_DATA,
2970  .createStmt = creaQry->data,
2971  .dropStmt = delQry->data,
2972  .deps = &dbDumpId));
2973 
2974  /*
2975  * pg_largeobject comes from the old system intact, so set its
2976  * relfrozenxids and relminmxids.
2977  */
2978  if (dopt->binary_upgrade)
2979  {
2980  PGresult *lo_res;
2981  PQExpBuffer loFrozenQry = createPQExpBuffer();
2982  PQExpBuffer loOutQry = createPQExpBuffer();
2983  int i_relfrozenxid,
2984  i_relminmxid;
2985 
2986  /*
2987  * pg_largeobject
2988  */
2989  if (fout->remoteVersion >= 90300)
2990  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2991  "FROM pg_catalog.pg_class\n"
2992  "WHERE oid = %u;\n",
2993  LargeObjectRelationId);
2994  else
2995  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2996  "FROM pg_catalog.pg_class\n"
2997  "WHERE oid = %u;\n",
2998  LargeObjectRelationId);
2999 
3000  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
3001 
3002  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
3003  i_relminmxid = PQfnumber(lo_res, "relminmxid");
3004 
3005  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
3006  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
3007  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
3008  "WHERE oid = %u;\n",
3009  atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
3010  atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
3011  LargeObjectRelationId);
3012  ArchiveEntry(fout, nilCatalogId, createDumpId(),
3013  ARCHIVE_OPTS(.tag = "pg_largeobject",
3014  .description = "pg_largeobject",
3015  .owner = "",
3016  .section = SECTION_PRE_DATA,
3017  .createStmt = loOutQry->data,
3018  .dropStmt = ""));
3019 
3020  PQclear(lo_res);
3021 
3022  destroyPQExpBuffer(loFrozenQry);
3023  destroyPQExpBuffer(loOutQry);
3024  }
3025 
3026  PQclear(res);
3027 
3028  free(qdatname);
3029  destroyPQExpBuffer(dbQry);
3030  destroyPQExpBuffer(delQry);
3031  destroyPQExpBuffer(creaQry);
3032  destroyPQExpBuffer(labelq);
3033 }
3034 
3035 /*
3036  * Collect any database-specific or role-and-database-specific SET options
3037  * for this database, and append them to outbuf.
3038  */
3039 static void
3041  const char *dbname, Oid dboid)
3042 {
3043  PGconn *conn = GetConnection(AH);
3045  PGresult *res;
3046  int count = 1;
3047 
3048  /*
3049  * First collect database-specific options. Pre-8.4 server versions lack
3050  * unnest(), so we do this the hard way by querying once per subscript.
3051  */
3052  for (;;)
3053  {
3054  if (AH->remoteVersion >= 90000)
3055  printfPQExpBuffer(buf, "SELECT setconfig[%d] FROM pg_db_role_setting "
3056  "WHERE setrole = 0 AND setdatabase = '%u'::oid",
3057  count, dboid);
3058  else
3059  printfPQExpBuffer(buf, "SELECT datconfig[%d] FROM pg_database WHERE oid = '%u'::oid", count, dboid);
3060 
3061  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3062 
3063  if (PQntuples(res) == 1 &&
3064  !PQgetisnull(res, 0, 0))
3065  {
3066  makeAlterConfigCommand(conn, PQgetvalue(res, 0, 0),
3067  "DATABASE", dbname, NULL, NULL,
3068  outbuf);
3069  PQclear(res);
3070  count++;
3071  }
3072  else
3073  {
3074  PQclear(res);
3075  break;
3076  }
3077  }
3078 
3079  /* Now look for role-and-database-specific options */
3080  if (AH->remoteVersion >= 90000)
3081  {
3082  /* Here we can assume we have unnest() */
3083  printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
3084  "FROM pg_db_role_setting s, pg_roles r "
3085  "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
3086  dboid);
3087 
3088  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3089 
3090  if (PQntuples(res) > 0)
3091  {
3092  int i;
3093 
3094  for (i = 0; i < PQntuples(res); i++)
3095  makeAlterConfigCommand(conn, PQgetvalue(res, i, 1),
3096  "ROLE", PQgetvalue(res, i, 0),
3097  "DATABASE", dbname,
3098  outbuf);
3099  }
3100 
3101  PQclear(res);
3102  }
3103 
3104  destroyPQExpBuffer(buf);
3105 }
3106 
3107 /*
3108  * dumpEncoding: put the correct encoding into the archive
3109  */
3110 static void
3112 {
3113  const char *encname = pg_encoding_to_char(AH->encoding);
3115 
3116  if (g_verbose)
3117  write_msg(NULL, "saving encoding = %s\n", encname);
3118 
3119  appendPQExpBufferStr(qry, "SET client_encoding = ");
3120  appendStringLiteralAH(qry, encname, AH);
3121  appendPQExpBufferStr(qry, ";\n");
3122 
3123  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3124  ARCHIVE_OPTS(.tag = "ENCODING",
3125  .description = "ENCODING",
3126  .owner = "",
3127  .section = SECTION_PRE_DATA,
3128  .createStmt = qry->data,
3129  .dropStmt = ""));
3130 
3131  destroyPQExpBuffer(qry);
3132 }
3133 
3134 
3135 /*
3136  * dumpStdStrings: put the correct escape string behavior into the archive
3137  */
3138 static void
3140 {
3141  const char *stdstrings = AH->std_strings ? "on" : "off";
3143 
3144  if (g_verbose)
3145  write_msg(NULL, "saving standard_conforming_strings = %s\n",
3146  stdstrings);
3147 
3148  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3149  stdstrings);
3150 
3151  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3152  ARCHIVE_OPTS(.tag = "STDSTRINGS",
3153  .description = "STDSTRINGS",
3154  .owner = "",
3155  .section = SECTION_PRE_DATA,
3156  .createStmt = qry->data,
3157  .dropStmt = ""));
3158 
3159  destroyPQExpBuffer(qry);
3160 }
3161 
3162 /*
3163  * dumpSearchPath: record the active search_path in the archive
3164  */
3165 static void
3167 {
3169  PQExpBuffer path = createPQExpBuffer();
3170  PGresult *res;
3171  char **schemanames = NULL;
3172  int nschemanames = 0;
3173  int i;
3174 
3175  /*
3176  * We use the result of current_schemas(), not the search_path GUC,
3177  * because that might contain wildcards such as "$user", which won't
3178  * necessarily have the same value during restore. Also, this way avoids
3179  * listing schemas that may appear in search_path but not actually exist,
3180  * which seems like a prudent exclusion.
3181  */
3182  res = ExecuteSqlQueryForSingleRow(AH,
3183  "SELECT pg_catalog.current_schemas(false)");
3184 
3185  if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3186  exit_horribly(NULL, "could not parse result of current_schemas()\n");
3187 
3188  /*
3189  * We use set_config(), not a simple "SET search_path" command, because
3190  * the latter has less-clean behavior if the search path is empty. While
3191  * that's likely to get fixed at some point, it seems like a good idea to
3192  * be as backwards-compatible as possible in what we put into archives.
3193  */
3194  for (i = 0; i < nschemanames; i++)
3195  {
3196  if (i > 0)
3197  appendPQExpBufferStr(path, ", ");
3198  appendPQExpBufferStr(path, fmtId(schemanames[i]));
3199  }
3200 
3201  appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3202  appendStringLiteralAH(qry, path->data, AH);
3203  appendPQExpBufferStr(qry, ", false);\n");
3204 
3205  if (g_verbose)
3206  write_msg(NULL, "saving search_path = %s\n", path->data);
3207 
3208  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3209  ARCHIVE_OPTS(.tag = "SEARCHPATH",
3210  .description = "SEARCHPATH",
3211  .owner = "",
3212  .section = SECTION_PRE_DATA,
3213  .createStmt = qry->data,
3214  .dropStmt = ""));
3215 
3216  /* Also save it in AH->searchpath, in case we're doing plain text dump */
3217  AH->searchpath = pg_strdup(qry->data);
3218 
3219  if (schemanames)
3220  free(schemanames);
3221  PQclear(res);
3222  destroyPQExpBuffer(qry);
3223  destroyPQExpBuffer(path);
3224 }
3225 
3226 
3227 /*
3228  * getBlobs:
3229  * Collect schema-level data about large objects
3230  */
3231 static void
3233 {
3234  DumpOptions *dopt = fout->dopt;
3235  PQExpBuffer blobQry = createPQExpBuffer();
3236  BlobInfo *binfo;
3237  DumpableObject *bdata;
3238  PGresult *res;
3239  int ntups;
3240  int i;
3241  int i_oid;
3242  int i_lomowner;
3243  int i_lomacl;
3244  int i_rlomacl;
3245  int i_initlomacl;
3246  int i_initrlomacl;
3247 
3248  /* Verbose message */
3249  if (g_verbose)
3250  write_msg(NULL, "reading large objects\n");
3251 
3252  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
3253  if (fout->remoteVersion >= 90600)
3254  {
3255  PQExpBuffer acl_subquery = createPQExpBuffer();
3256  PQExpBuffer racl_subquery = createPQExpBuffer();
3257  PQExpBuffer init_acl_subquery = createPQExpBuffer();
3258  PQExpBuffer init_racl_subquery = createPQExpBuffer();
3259 
3260  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
3261  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
3262  dopt->binary_upgrade);
3263 
3264  appendPQExpBuffer(blobQry,
3265  "SELECT l.oid, (%s l.lomowner) AS rolname, "
3266  "%s AS lomacl, "
3267  "%s AS rlomacl, "
3268  "%s AS initlomacl, "
3269  "%s AS initrlomacl "
3270  "FROM pg_largeobject_metadata l "
3271  "LEFT JOIN pg_init_privs pip ON "
3272  "(l.oid = pip.objoid "
3273  "AND pip.classoid = 'pg_largeobject'::regclass "
3274  "AND pip.objsubid = 0) ",
3276  acl_subquery->data,
3277  racl_subquery->data,
3278  init_acl_subquery->data,
3279  init_racl_subquery->data);
3280 
3281  destroyPQExpBuffer(acl_subquery);
3282  destroyPQExpBuffer(racl_subquery);
3283  destroyPQExpBuffer(init_acl_subquery);
3284  destroyPQExpBuffer(init_racl_subquery);
3285  }
3286  else if (fout->remoteVersion >= 90000)
3287  appendPQExpBuffer(blobQry,
3288  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
3289  "NULL AS rlomacl, NULL AS initlomacl, "
3290  "NULL AS initrlomacl "
3291  " FROM pg_largeobject_metadata",
3293  else
3294  appendPQExpBufferStr(blobQry,
3295  "SELECT DISTINCT loid AS oid, "
3296  "NULL::name AS rolname, NULL::oid AS lomacl, "
3297  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
3298  "NULL::oid AS initrlomacl "
3299  " FROM pg_largeobject");
3300 
3301  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
3302 
3303  i_oid = PQfnumber(res, "oid");
3304  i_lomowner = PQfnumber(res, "rolname");
3305  i_lomacl = PQfnumber(res, "lomacl");
3306  i_rlomacl = PQfnumber(res, "rlomacl");
3307  i_initlomacl = PQfnumber(res, "initlomacl");
3308  i_initrlomacl = PQfnumber(res, "initrlomacl");
3309 
3310  ntups = PQntuples(res);
3311 
3312  /*
3313  * Each large object has its own BLOB archive entry.
3314  */
3315  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
3316 
3317  for (i = 0; i < ntups; i++)
3318  {
3319  binfo[i].dobj.objType = DO_BLOB;
3320  binfo[i].dobj.catId.tableoid = LargeObjectRelationId;
3321  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3322  AssignDumpId(&binfo[i].dobj);
3323 
3324  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
3325  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
3326  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
3327  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
3328  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
3329  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
3330 
3331  if (PQgetisnull(res, i, i_lomacl) &&
3332  PQgetisnull(res, i, i_rlomacl) &&
3333  PQgetisnull(res, i, i_initlomacl) &&
3334  PQgetisnull(res, i, i_initrlomacl))
3335  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
3336 
3337  /*
3338  * In binary-upgrade mode for blobs, we do *not* dump out the blob
3339  * data, as it will be copied by pg_upgrade, which simply copies the
3340  * pg_largeobject table. We *do* however dump out anything but the
3341  * data, as pg_upgrade copies just pg_largeobject, but not
3342  * pg_largeobject_metadata, after the dump is restored.
3343  */
3344  if (dopt->binary_upgrade)
3345  binfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
3346  }
3347 
3348  /*
3349  * If we have any large objects, a "BLOBS" archive entry is needed. This
3350  * is just a placeholder for sorting; it carries no data now.
3351  */
3352  if (ntups > 0)
3353  {
3354  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
3355  bdata->objType = DO_BLOB_DATA;
3356  bdata->catId = nilCatalogId;
3357  AssignDumpId(bdata);
3358  bdata->name = pg_strdup("BLOBS");
3359  }
3360 
3361  PQclear(res);
3362  destroyPQExpBuffer(blobQry);
3363 }
3364 
3365 /*
3366  * dumpBlob
3367  *
3368  * dump the definition (metadata) of the given large object
3369  */
3370 static void
3371 dumpBlob(Archive *fout, BlobInfo *binfo)
3372 {
3373  PQExpBuffer cquery = createPQExpBuffer();
3374  PQExpBuffer dquery = createPQExpBuffer();
3375 
3376  appendPQExpBuffer(cquery,
3377  "SELECT pg_catalog.lo_create('%s');\n",
3378  binfo->dobj.name);
3379 
3380  appendPQExpBuffer(dquery,
3381  "SELECT pg_catalog.lo_unlink('%s');\n",
3382  binfo->dobj.name);
3383 
3384  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
3385  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
3386  ARCHIVE_OPTS(.tag = binfo->dobj.name,
3387  .owner = binfo->rolname,
3388  .description = "BLOB",
3389  .section = SECTION_PRE_DATA,
3390  .createStmt = cquery->data,
3391  .dropStmt = dquery->data));
3392 
3393  /* Dump comment if any */
3394  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3395  dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
3396  NULL, binfo->rolname,
3397  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3398 
3399  /* Dump security label if any */
3400  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3401  dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
3402  NULL, binfo->rolname,
3403  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3404 
3405  /* Dump ACL if any */
3406  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3407  dumpACL(fout, binfo->dobj.catId, binfo->dobj.dumpId, "LARGE OBJECT",
3408  binfo->dobj.name, NULL,
3409  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3410  binfo->initblobacl, binfo->initrblobacl);
3411 
3412  destroyPQExpBuffer(cquery);
3413  destroyPQExpBuffer(dquery);
3414 }
3415 
3416 /*
3417  * dumpBlobs:
3418  * dump the data contents of all large objects
3419  */
3420 static int
3421 dumpBlobs(Archive *fout, void *arg)
3422 {
3423  const char *blobQry;
3424  const char *blobFetchQry;
3425  PGconn *conn = GetConnection(fout);
3426  PGresult *res;
3427  char buf[LOBBUFSIZE];
3428  int ntups;
3429  int i;
3430  int cnt;
3431 
3432  if (g_verbose)
3433  write_msg(NULL, "saving large objects\n");
3434 
3435  /*
3436  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3437  * the already-in-memory dumpable objects instead...
3438  */
3439  if (fout->remoteVersion >= 90000)
3440  blobQry =
3441  "DECLARE bloboid CURSOR FOR "
3442  "SELECT oid FROM pg_largeobject_metadata ORDER BY 1";
3443  else
3444  blobQry =
3445  "DECLARE bloboid CURSOR FOR "
3446  "SELECT DISTINCT loid FROM pg_largeobject ORDER BY 1";
3447 
3448  ExecuteSqlStatement(fout, blobQry);
3449 
3450  /* Command to fetch from cursor */
3451  blobFetchQry = "FETCH 1000 IN bloboid";
3452 
3453  do
3454  {
3455  /* Do a fetch */
3456  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3457 
3458  /* Process the tuples, if any */
3459  ntups = PQntuples(res);
3460  for (i = 0; i < ntups; i++)
3461  {
3462  Oid blobOid;
3463  int loFd;
3464 
3465  blobOid = atooid(PQgetvalue(res, i, 0));
3466  /* Open the BLOB */
3467  loFd = lo_open(conn, blobOid, INV_READ);
3468  if (loFd == -1)
3469  exit_horribly(NULL, "could not open large object %u: %s",
3470  blobOid, PQerrorMessage(conn));
3471 
3472  StartBlob(fout, blobOid);
3473 
3474  /* Now read it in chunks, sending data to archive */
3475  do
3476  {
3477  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3478  if (cnt < 0)
3479  exit_horribly(NULL, "error reading large object %u: %s",
3480  blobOid, PQerrorMessage(conn));
3481 
3482  WriteData(fout, buf, cnt);
3483  } while (cnt > 0);
3484 
3485  lo_close(conn, loFd);
3486 
3487  EndBlob(fout, blobOid);
3488  }
3489 
3490  PQclear(res);
3491  } while (ntups > 0);
3492 
3493  return 1;
3494 }
3495 
3496 /*
3497  * getPolicies
3498  * get information about policies on a dumpable table.
3499  */
3500 void
3501 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3502 {
3503  PQExpBuffer query;
3504  PGresult *res;
3505  PolicyInfo *polinfo;
3506  int i_oid;
3507  int i_tableoid;
3508  int i_polname;
3509  int i_polcmd;
3510  int i_polpermissive;
3511  int i_polroles;
3512  int i_polqual;
3513  int i_polwithcheck;
3514  int i,
3515  j,
3516  ntups;
3517 
3518  if (fout->remoteVersion < 90500)
3519  return;
3520 
3521  query = createPQExpBuffer();
3522 
3523  for (i = 0; i < numTables; i++)
3524  {
3525  TableInfo *tbinfo = &tblinfo[i];
3526 
3527  /* Ignore row security on tables not to be dumped */
3528  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3529  continue;
3530 
3531  if (g_verbose)
3532  write_msg(NULL, "reading row security enabled for table \"%s.%s\"\n",
3533  tbinfo->dobj.namespace->dobj.name,
3534  tbinfo->dobj.name);
3535 
3536  /*
3537  * Get row security enabled information for the table. We represent
3538  * RLS being enabled on a table by creating a PolicyInfo object with
3539  * null polname.
3540  */
3541  if (tbinfo->rowsec)
3542  {
3543  /*
3544  * Note: use tableoid 0 so that this object won't be mistaken for
3545  * something that pg_depend entries apply to.
3546  */
3547  polinfo = pg_malloc(sizeof(PolicyInfo));
3548  polinfo->dobj.objType = DO_POLICY;
3549  polinfo->dobj.catId.tableoid = 0;
3550  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3551  AssignDumpId(&polinfo->dobj);
3552  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3553  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3554  polinfo->poltable = tbinfo;
3555  polinfo->polname = NULL;
3556  polinfo->polcmd = '\0';
3557  polinfo->polpermissive = 0;
3558  polinfo->polroles = NULL;
3559  polinfo->polqual = NULL;
3560  polinfo->polwithcheck = NULL;
3561  }
3562 
3563  if (g_verbose)
3564  write_msg(NULL, "reading policies for table \"%s.%s\"\n",
3565  tbinfo->dobj.namespace->dobj.name,
3566  tbinfo->dobj.name);
3567 
3568  resetPQExpBuffer(query);
3569 
3570  /* Get the policies for the table. */
3571  if (fout->remoteVersion >= 100000)
3572  appendPQExpBuffer(query,
3573  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3574  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3575  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3576  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3577  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3578  "FROM pg_catalog.pg_policy pol "
3579  "WHERE polrelid = '%u'",
3580  tbinfo->dobj.catId.oid);
3581  else
3582  appendPQExpBuffer(query,
3583  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3584  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3585  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3586  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3587  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3588  "FROM pg_catalog.pg_policy pol "
3589  "WHERE polrelid = '%u'",
3590  tbinfo->dobj.catId.oid);
3591  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3592 
3593  ntups = PQntuples(res);
3594 
3595  if (ntups == 0)
3596  {
3597  /*
3598  * No explicit policies to handle (only the default-deny policy,
3599  * which is handled as part of the table definition). Clean up
3600  * and return.
3601  */
3602  PQclear(res);
3603  continue;
3604  }
3605 
3606  i_oid = PQfnumber(res, "oid");
3607  i_tableoid = PQfnumber(res, "tableoid");
3608  i_polname = PQfnumber(res, "polname");
3609  i_polcmd = PQfnumber(res, "polcmd");
3610  i_polpermissive = PQfnumber(res, "polpermissive");
3611  i_polroles = PQfnumber(res, "polroles");
3612  i_polqual = PQfnumber(res, "polqual");
3613  i_polwithcheck = PQfnumber(res, "polwithcheck");
3614 
3615  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3616 
3617  for (j = 0; j < ntups; j++)
3618  {
3619  polinfo[j].dobj.objType = DO_POLICY;
3620  polinfo[j].dobj.catId.tableoid =
3621  atooid(PQgetvalue(res, j, i_tableoid));
3622  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3623  AssignDumpId(&polinfo[j].dobj);
3624  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3625  polinfo[j].poltable = tbinfo;
3626  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3627  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3628 
3629  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3630  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3631 
3632  if (PQgetisnull(res, j, i_polroles))
3633  polinfo[j].polroles = NULL;
3634  else
3635  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3636 
3637  if (PQgetisnull(res, j, i_polqual))
3638  polinfo[j].polqual = NULL;
3639  else
3640  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3641 
3642  if (PQgetisnull(res, j, i_polwithcheck))
3643  polinfo[j].polwithcheck = NULL;
3644  else
3645  polinfo[j].polwithcheck
3646  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3647  }
3648  PQclear(res);
3649  }
3650  destroyPQExpBuffer(query);
3651 }
3652 
3653 /*
3654  * dumpPolicy
3655  * dump the definition of the given policy
3656  */
3657 static void
3659 {
3660  DumpOptions *dopt = fout->dopt;
3661  TableInfo *tbinfo = polinfo->poltable;
3662  PQExpBuffer query;
3663  PQExpBuffer delqry;
3664  const char *cmd;
3665  char *tag;
3666 
3667  if (dopt->dataOnly)
3668  return;
3669 
3670  /*
3671  * If polname is NULL, then this record is just indicating that ROW LEVEL
3672  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3673  * ROW LEVEL SECURITY.
3674  */
3675  if (polinfo->polname == NULL)
3676  {
3677  query = createPQExpBuffer();
3678 
3679  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3680  fmtQualifiedDumpable(tbinfo));
3681 
3682  /*
3683  * We must emit the ROW SECURITY object's dependency on its table
3684  * explicitly, because it will not match anything in pg_depend (unlike
3685  * the case for other PolicyInfo objects).
3686  */
3687  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3688  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3689  ARCHIVE_OPTS(.tag = polinfo->dobj.name,
3690  .namespace = polinfo->dobj.namespace->dobj.name,
3691  .owner = tbinfo->rolname,
3692  .description = "ROW SECURITY",
3693  .section = SECTION_POST_DATA,
3694  .createStmt = query->data,
3695  .dropStmt = "",
3696  .deps = &(tbinfo->dobj.dumpId),
3697  .nDeps = 1));
3698 
3699  destroyPQExpBuffer(query);
3700  return;
3701  }
3702 
3703  if (polinfo->polcmd == '*')
3704  cmd = "";
3705  else if (polinfo->polcmd == 'r')
3706  cmd = " FOR SELECT";
3707  else if (polinfo->polcmd == 'a')
3708  cmd = " FOR INSERT";
3709  else if (polinfo->polcmd == 'w')
3710  cmd = " FOR UPDATE";
3711  else if (polinfo->polcmd == 'd')
3712  cmd = " FOR DELETE";
3713  else
3714  {
3715  write_msg(NULL, "unexpected policy command type: %c\n",
3716  polinfo->polcmd);
3717  exit_nicely(1);
3718  }
3719 
3720  query = createPQExpBuffer();
3721  delqry = createPQExpBuffer();
3722 
3723  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3724 
3725  appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
3726  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3727 
3728  if (polinfo->polroles != NULL)
3729  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3730 
3731  if (polinfo->polqual != NULL)
3732  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3733 
3734  if (polinfo->polwithcheck != NULL)
3735  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3736 
3737  appendPQExpBuffer(query, ";\n");
3738 
3739  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3740  appendPQExpBuffer(delqry, " ON %s;\n", fmtQualifiedDumpable(tbinfo));
3741 
3742  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3743 
3744  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3745  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3746  ARCHIVE_OPTS(.tag = tag,
3747  .namespace = polinfo->dobj.namespace->dobj.name,
3748  .owner = tbinfo->rolname,
3749  .description = "POLICY",
3750  .section = SECTION_POST_DATA,
3751  .createStmt = query->data,
3752  .dropStmt = delqry->data));
3753 
3754  free(tag);
3755  destroyPQExpBuffer(query);
3756  destroyPQExpBuffer(delqry);
3757 }
3758 
3759 /*
3760  * getPublications
3761  * get information about publications
3762  */
3763 void
3765 {
3766  DumpOptions *dopt = fout->dopt;
3767  PQExpBuffer query;
3768  PGresult *res;
3769  PublicationInfo *pubinfo;
3770  int i_tableoid;
3771  int i_oid;
3772  int i_pubname;
3773  int i_rolname;
3774  int i_puballtables;
3775  int i_pubinsert;
3776  int i_pubupdate;
3777  int i_pubdelete;
3778  int i_pubtruncate;
3779  int i,
3780  ntups;
3781 
3782  if (dopt->no_publications || fout->remoteVersion < 100000)
3783  return;
3784 
3785  query = createPQExpBuffer();
3786 
3787  resetPQExpBuffer(query);
3788 
3789  /* Get the publications. */
3790  if (fout->remoteVersion >= 110000)
3791  appendPQExpBuffer(query,
3792  "SELECT p.tableoid, p.oid, p.pubname, "
3793  "(%s p.pubowner) AS rolname, "
3794  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate "
3795  "FROM pg_publication p",
3797  else
3798  appendPQExpBuffer(query,
3799  "SELECT p.tableoid, p.oid, p.pubname, "
3800  "(%s p.pubowner) AS rolname, "
3801  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, false AS pubtruncate "
3802  "FROM pg_publication p",
3804 
3805  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3806 
3807  ntups = PQntuples(res);
3808 
3809  i_tableoid = PQfnumber(res, "tableoid");
3810  i_oid = PQfnumber(res, "oid");
3811  i_pubname = PQfnumber(res, "pubname");
3812  i_rolname = PQfnumber(res, "rolname");
3813  i_puballtables = PQfnumber(res, "puballtables");
3814  i_pubinsert = PQfnumber(res, "pubinsert");
3815  i_pubupdate = PQfnumber(res, "pubupdate");
3816  i_pubdelete = PQfnumber(res, "pubdelete");
3817  i_pubtruncate = PQfnumber(res, "pubtruncate");
3818 
3819  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3820 
3821  for (i = 0; i < ntups; i++)
3822  {
3823  pubinfo[i].dobj.objType = DO_PUBLICATION;
3824  pubinfo[i].dobj.catId.tableoid =
3825  atooid(PQgetvalue(res, i, i_tableoid));
3826  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3827  AssignDumpId(&pubinfo[i].dobj);
3828  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3829  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3830  pubinfo[i].puballtables =
3831  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3832  pubinfo[i].pubinsert =
3833  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3834  pubinfo[i].pubupdate =
3835  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3836  pubinfo[i].pubdelete =
3837  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3838  pubinfo[i].pubtruncate =
3839  (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
3840 
3841  if (strlen(pubinfo[i].rolname) == 0)
3842  write_msg(NULL, "WARNING: owner of publication \"%s\" appears to be invalid\n",
3843  pubinfo[i].dobj.name);
3844 
3845  /* Decide whether we want to dump it */
3846  selectDumpableObject(&(pubinfo[i].dobj), fout);
3847  }
3848  PQclear(res);
3849 
3850  destroyPQExpBuffer(query);
3851 }
3852 
3853 /*
3854  * dumpPublication
3855  * dump the definition of the given publication
3856  */
3857 static void
3859 {
3860  PQExpBuffer delq;
3861  PQExpBuffer query;
3862  char *qpubname;
3863  bool first = true;
3864 
3865  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3866  return;
3867 
3868  delq = createPQExpBuffer();
3869  query = createPQExpBuffer();
3870 
3871  qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
3872 
3873  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3874  qpubname);
3875 
3876  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3877  qpubname);
3878 
3879  if (pubinfo->puballtables)
3880  appendPQExpBufferStr(query, " FOR ALL TABLES");
3881 
3882  appendPQExpBufferStr(query, " WITH (publish = '");
3883  if (pubinfo->pubinsert)
3884  {
3885  appendPQExpBufferStr(query, "insert");
3886  first = false;
3887  }
3888 
3889  if (pubinfo->pubupdate)
3890  {
3891  if (!first)
3892  appendPQExpBufferStr(query, ", ");
3893 
3894  appendPQExpBufferStr(query, "update");
3895  first = false;
3896  }
3897 
3898  if (pubinfo->pubdelete)
3899  {
3900  if (!first)
3901  appendPQExpBufferStr(query, ", ");
3902 
3903  appendPQExpBufferStr(query, "delete");
3904  first = false;
3905  }
3906 
3907  if (pubinfo->pubtruncate)
3908  {
3909  if (!first)
3910  appendPQExpBufferStr(query, ", ");
3911 
3912  appendPQExpBufferStr(query, "truncate");
3913  first = false;
3914  }
3915 
3916  appendPQExpBufferStr(query, "');\n");
3917 
3918  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
3919  ARCHIVE_OPTS(.tag = pubinfo->dobj.name,
3920  .owner = pubinfo->rolname,
3921  .description = "PUBLICATION",
3922  .section = SECTION_POST_DATA,
3923  .createStmt = query->data,
3924  .dropStmt = delq->data));
3925 
3926  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3927  dumpComment(fout, "PUBLICATION", qpubname,
3928  NULL, pubinfo->rolname,
3929  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3930 
3931  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3932  dumpSecLabel(fout, "PUBLICATION", qpubname,
3933  NULL, pubinfo->rolname,
3934  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3935 
3936  destroyPQExpBuffer(delq);
3937  destroyPQExpBuffer(query);
3938  free(qpubname);
3939 }
3940 
3941 /*
3942  * getPublicationTables
3943  * get information about publication membership for dumpable tables.
3944  */
3945 void
3947 {
3948  PQExpBuffer query;
3949  PGresult *res;
3950  PublicationRelInfo *pubrinfo;
3951  DumpOptions *dopt = fout->dopt;
3952  int i_tableoid;
3953  int i_oid;
3954  int i_pubname;
3955  int i,
3956  j,
3957  ntups;
3958 
3959  if (dopt->no_publications || fout->remoteVersion < 100000)
3960  return;
3961 
3962  query = createPQExpBuffer();
3963 
3964  for (i = 0; i < numTables; i++)
3965  {
3966  TableInfo *tbinfo = &tblinfo[i];
3967 
3968  /* Only plain tables can be aded to publications. */
3969  if (tbinfo->relkind != RELKIND_RELATION)
3970  continue;
3971 
3972  /*
3973  * Ignore publication membership of tables whose definitions are not
3974  * to be dumped.
3975  */
3976  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3977  continue;
3978 
3979  if (g_verbose)
3980  write_msg(NULL, "reading publication membership for table \"%s.%s\"\n",
3981  tbinfo->dobj.namespace->dobj.name,
3982  tbinfo->dobj.name);
3983 
3984  resetPQExpBuffer(query);
3985 
3986  /* Get the publication membership for the table. */
3987  appendPQExpBuffer(query,
3988  "SELECT pr.tableoid, pr.oid, p.pubname "
3989  "FROM pg_publication_rel pr, pg_publication p "
3990  "WHERE pr.prrelid = '%u'"
3991  " AND p.oid = pr.prpubid",
3992  tbinfo->dobj.catId.oid);
3993  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3994 
3995  ntups = PQntuples(res);
3996 
3997  if (ntups == 0)
3998  {
3999  /*
4000  * Table is not member of any publications. Clean up and return.
4001  */
4002  PQclear(res);
4003  continue;
4004  }
4005 
4006  i_tableoid = PQfnumber(res, "tableoid");
4007  i_oid = PQfnumber(res, "oid");
4008  i_pubname = PQfnumber(res, "pubname");
4009 
4010  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
4011 
4012  for (j = 0; j < ntups; j++)
4013  {
4014  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
4015  pubrinfo[j].dobj.catId.tableoid =
4016  atooid(PQgetvalue(res, j, i_tableoid));
4017  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
4018  AssignDumpId(&pubrinfo[j].dobj);
4019  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4020  pubrinfo[j].dobj.name = tbinfo->dobj.name;
4021  pubrinfo[j].pubname = pg_strdup(PQgetvalue(res, j, i_pubname));
4022  pubrinfo[j].pubtable = tbinfo;
4023 
4024  /* Decide whether we want to dump it */
4025  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
4026  }
4027  PQclear(res);
4028  }
4029  destroyPQExpBuffer(query);
4030 }
4031 
4032 /*
4033  * dumpPublicationTable
4034  * dump the definition of the given publication table mapping
4035  */
4036 static void
4038 {
4039  TableInfo *tbinfo = pubrinfo->pubtable;
4040  PQExpBuffer query;
4041  char *tag;
4042 
4043  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4044  return;
4045 
4046  tag = psprintf("%s %s", pubrinfo->pubname, tbinfo->dobj.name);
4047 
4048  query = createPQExpBuffer();
4049 
4050  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
4051  fmtId(pubrinfo->pubname));
4052  appendPQExpBuffer(query, " %s;\n",
4053  fmtQualifiedDumpable(tbinfo));
4054 
4055  /*
4056  * There is no point in creating drop query as drop query as the drop is
4057  * done by table drop.
4058  */
4059  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
4060  ARCHIVE_OPTS(.tag = tag,
4061  .namespace = tbinfo->dobj.namespace->dobj.name,
4062  .description = "PUBLICATION TABLE",
4063  .owner = "",
4064  .section = SECTION_POST_DATA,
4065  .createStmt = query->data,
4066  .dropStmt = ""));
4067 
4068  free(tag);
4069  destroyPQExpBuffer(query);
4070 }
4071 
4072 /*
4073  * Is the currently connected user a superuser?
4074  */
4075 static bool
4077 {
4078  ArchiveHandle *AH = (ArchiveHandle *) fout;
4079  const char *val;
4080 
4081  val = PQparameterStatus(AH->connection, "is_superuser");
4082 
4083  if (val && strcmp(val, "on") == 0)
4084  return true;
4085 
4086  return false;
4087 }
4088 
4089 /*
4090  * getSubscriptions
4091  * get information about subscriptions
4092  */
4093 void
4095 {
4096  DumpOptions *dopt = fout->dopt;
4097  PQExpBuffer query;
4098  PGresult *res;
4099  SubscriptionInfo *subinfo;
4100  int i_tableoid;
4101  int i_oid;
4102  int i_subname;
4103  int i_rolname;
4104  int i_subconninfo;
4105  int i_subslotname;
4106  int i_subsynccommit;
4107  int i_subpublications;
4108  int i,
4109  ntups;
4110 
4111  if (dopt->no_subscriptions || fout->remoteVersion < 100000)
4112  return;
4113 
4114  if (!is_superuser(fout))
4115  {
4116  int n;
4117 
4118  res = ExecuteSqlQuery(fout,
4119  "SELECT count(*) FROM pg_subscription "
4120  "WHERE subdbid = (SELECT oid FROM pg_database"
4121  " WHERE datname = current_database())",
4122  PGRES_TUPLES_OK);
4123  n = atoi(PQgetvalue(res, 0, 0));
4124  if (n > 0)
4125  write_msg(NULL, "WARNING: subscriptions not dumped because current user is not a superuser\n");
4126  PQclear(res);
4127  return;
4128  }
4129 
4130  query = createPQExpBuffer();
4131 
4132  resetPQExpBuffer(query);
4133 
4134  /* Get the subscriptions in current database. */
4135  appendPQExpBuffer(query,
4136  "SELECT s.tableoid, s.oid, s.subname,"
4137  "(%s s.subowner) AS rolname, "
4138  " s.subconninfo, s.subslotname, s.subsynccommit, "
4139  " s.subpublications "
4140  "FROM pg_subscription s "
4141  "WHERE s.subdbid = (SELECT oid FROM pg_database"
4142  " WHERE datname = current_database())",
4144  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4145 
4146  ntups = PQntuples(res);
4147 
4148  i_tableoid = PQfnumber(res, "tableoid");
4149  i_oid = PQfnumber(res, "oid");
4150  i_subname = PQfnumber(res, "subname");
4151  i_rolname = PQfnumber(res, "rolname");
4152  i_subconninfo = PQfnumber(res, "subconninfo");
4153  i_subslotname = PQfnumber(res, "subslotname");
4154  i_subsynccommit = PQfnumber(res, "subsynccommit");
4155  i_subpublications = PQfnumber(res, "subpublications");
4156 
4157  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
4158 
4159  for (i = 0; i < ntups; i++)
4160  {
4161  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
4162  subinfo[i].dobj.catId.tableoid =
4163  atooid(PQgetvalue(res, i, i_tableoid));
4164  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4165  AssignDumpId(&subinfo[i].dobj);
4166  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
4167  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4168  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
4169  if (PQgetisnull(res, i, i_subslotname))
4170  subinfo[i].subslotname = NULL;
4171  else
4172  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
4173  subinfo[i].subsynccommit =
4174  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
4175  subinfo[i].subpublications =
4176  pg_strdup(PQgetvalue(res, i, i_subpublications));
4177 
4178  if (strlen(subinfo[i].rolname) == 0)
4179  write_msg(NULL, "WARNING: owner of subscription \"%s\" appears to be invalid\n",
4180  subinfo[i].dobj.name);
4181 
4182  /* Decide whether we want to dump it */
4183  selectDumpableObject(&(subinfo[i].dobj), fout);
4184  }
4185  PQclear(res);
4186 
4187  destroyPQExpBuffer(query);
4188 }
4189 
4190 /*
4191  * dumpSubscription
4192  * dump the definition of the given subscription
4193  */
4194 static void
4196 {
4197  PQExpBuffer delq;
4198  PQExpBuffer query;
4199  PQExpBuffer publications;
4200  char *qsubname;
4201  char **pubnames = NULL;
4202  int npubnames = 0;
4203  int i;
4204 
4205  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4206  return;
4207 
4208  delq = createPQExpBuffer();
4209  query = createPQExpBuffer();
4210 
4211  qsubname = pg_strdup(fmtId(subinfo->dobj.name));
4212 
4213  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
4214  qsubname);
4215 
4216  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
4217  qsubname);
4218  appendStringLiteralAH(query, subinfo->subconninfo, fout);
4219 
4220  /* Build list of quoted publications and append them to query. */
4221  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
4222  {
4223  write_msg(NULL,
4224  "WARNING: could not parse subpublications array\n");
4225  if (pubnames)
4226  free(pubnames);
4227  pubnames = NULL;
4228  npubnames = 0;
4229  }
4230 
4231  publications = createPQExpBuffer();
4232  for (i = 0; i < npubnames; i++)
4233  {
4234  if (i > 0)
4235  appendPQExpBufferStr(publications, ", ");
4236 
4237  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
4238  }
4239 
4240  appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
4241  if (subinfo->subslotname)
4242  appendStringLiteralAH(query, subinfo->subslotname, fout);
4243  else
4244  appendPQExpBufferStr(query, "NONE");
4245 
4246  if (strcmp(subinfo->subsynccommit, "off") != 0)
4247  appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
4248 
4249  appendPQExpBufferStr(query, ");\n");
4250 
4251  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
4252  ARCHIVE_OPTS(.tag = subinfo->dobj.name,
4253  .owner = subinfo->rolname,
4254  .description = "SUBSCRIPTION",
4255  .section = SECTION_POST_DATA,
4256  .createStmt = query->data,
4257  .dropStmt = delq->data));
4258 
4259  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4260  dumpComment(fout, "SUBSCRIPTION", qsubname,
4261  NULL, subinfo->rolname,
4262  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4263 
4264  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4265  dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
4266  NULL, subinfo->rolname,
4267  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4268 
4269  destroyPQExpBuffer(publications);
4270  if (pubnames)
4271  free(pubnames);
4272 
4273  destroyPQExpBuffer(delq);
4274  destroyPQExpBuffer(query);
4275  free(qsubname);
4276 }
4277 
4278 static void
4280  PQExpBuffer upgrade_buffer,
4281  Oid pg_type_oid,
4282  bool force_array_type)
4283 {
4284  PQExpBuffer upgrade_query = createPQExpBuffer();
4285  PGresult *res;
4286  Oid pg_type_array_oid;
4287 
4288  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
4289  appendPQExpBuffer(upgrade_buffer,
4290  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4291  pg_type_oid);
4292 
4293  /* we only support old >= 8.3 for binary upgrades */
4294  appendPQExpBuffer(upgrade_query,
4295  "SELECT typarray "
4296  "FROM pg_catalog.pg_type "
4297  "WHERE oid = '%u'::pg_catalog.oid;",
4298  pg_type_oid);
4299 
4300  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4301 
4302  pg_type_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
4303 
4304  PQclear(res);
4305 
4306  if (!OidIsValid(pg_type_array_oid) && force_array_type)
4307  {
4308  /*
4309  * If the old version didn't assign an array type, but the new version
4310  * does, we must select an unused type OID to assign. This currently
4311  * only happens for domains, when upgrading pre-v11 to v11 and up.
4312  *
4313  * Note: local state here is kind of ugly, but we must have some,
4314  * since we mustn't choose the same unused OID more than once.
4315  */
4316  static Oid next_possible_free_oid = FirstNormalObjectId;
4317  bool is_dup;
4318 
4319  do
4320  {
4321  ++next_possible_free_oid;
4322  printfPQExpBuffer(upgrade_query,
4323  "SELECT EXISTS(SELECT 1 "
4324  "FROM pg_catalog.pg_type "
4325  "WHERE oid = '%u'::pg_catalog.oid);",
4326  next_possible_free_oid);
4327  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4328  is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
4329  PQclear(res);
4330  } while (is_dup);
4331 
4332  pg_type_array_oid = next_possible_free_oid;
4333  }
4334 
4335  if (OidIsValid(pg_type_array_oid))
4336  {
4337  appendPQExpBufferStr(upgrade_buffer,
4338  "\n-- For binary upgrade, must preserve pg_type array oid\n");
4339  appendPQExpBuffer(upgrade_buffer,
4340  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4341  pg_type_array_oid);
4342  }
4343 
4344  destroyPQExpBuffer(upgrade_query);
4345 }
4346 
4347 static bool
4349  PQExpBuffer upgrade_buffer,
4350  Oid pg_rel_oid)
4351 {
4352  PQExpBuffer upgrade_query = createPQExpBuffer();
4353  PGresult *upgrade_res;
4354  Oid pg_type_oid;
4355  bool toast_set = false;
4356 
4357  /*
4358  * We only support old >= 8.3 for binary upgrades.
4359  *
4360  * We purposefully ignore toast OIDs for partitioned tables; the reason is
4361  * that versions 10 and 11 have them, but 12 does not, so emitting them
4362  * causes the upgrade to fail.
4363  */
4364  appendPQExpBuffer(upgrade_query,
4365  "SELECT c.reltype AS crel, t.reltype AS trel "
4366  "FROM pg_catalog.pg_class c "
4367  "LEFT JOIN pg_catalog.pg_class t ON "
4368  " (c.reltoastrelid = t.oid AND c.relkind <> '%c') "
4369  "WHERE c.oid = '%u'::pg_catalog.oid;",
4370  RELKIND_PARTITIONED_TABLE, pg_rel_oid);
4371 
4372  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4373 
4374  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
4375 
4376  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
4377  pg_type_oid, false);
4378 
4379  if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
4380  {
4381  /* Toast tables do not have pg_type array rows */
4382  Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
4383  PQfnumber(upgrade_res, "trel")));
4384 
4385  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
4386  appendPQExpBuffer(upgrade_buffer,
4387  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4388  pg_type_toast_oid);
4389 
4390  toast_set = true;
4391  }
4392 
4393  PQclear(upgrade_res);
4394  destroyPQExpBuffer(upgrade_query);
4395 
4396  return toast_set;
4397 }
4398 
4399 static void
4401  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
4402  bool is_index)
4403 {
4404  PQExpBuffer upgrade_query = createPQExpBuffer();
4405  PGresult *upgrade_res;
4406  Oid pg_class_reltoastrelid;
4407  Oid pg_index_indexrelid;
4408 
4409  appendPQExpBuffer(upgrade_query,
4410  "SELECT c.reltoastrelid, i.indexrelid "
4411  "FROM pg_catalog.pg_class c LEFT JOIN "
4412  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
4413  "WHERE c.oid = '%u'::pg_catalog.oid;",
4414  pg_class_oid);
4415 
4416  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4417 
4418  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastrelid")));
4419  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
4420 
4421  appendPQExpBufferStr(upgrade_buffer,
4422  "\n-- For binary upgrade, must preserve pg_class oids\n");
4423 
4424  if (!is_index)
4425  {
4426  appendPQExpBuffer(upgrade_buffer,
4427  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
4428  pg_class_oid);
4429  /* only tables have toast tables, not indexes */
4430  if (OidIsValid(pg_class_reltoastrelid))
4431  {
4432  /*
4433  * One complexity is that the table definition might not require
4434  * the creation of a TOAST table, and the TOAST table might have
4435  * been created long after table creation, when the table was
4436  * loaded with wide data. By setting the TOAST oid we force
4437  * creation of the TOAST heap and TOAST index by the backend so we
4438  * can cleanly copy the files during binary upgrade.
4439  */
4440 
4441  appendPQExpBuffer(upgrade_buffer,
4442  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
4443  pg_class_reltoastrelid);
4444 
4445  /* every toast table has an index */
4446  appendPQExpBuffer(upgrade_buffer,
4447  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4448  pg_index_indexrelid);
4449  }
4450  }
4451  else
4452  appendPQExpBuffer(upgrade_buffer,
4453  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4454  pg_class_oid);
4455 
4456  appendPQExpBufferChar(upgrade_buffer, '\n');
4457 
4458  PQclear(upgrade_res);
4459  destroyPQExpBuffer(upgrade_query);
4460 }
4461 
4462 /*
4463  * If the DumpableObject is a member of an extension, add a suitable
4464  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4465  *
4466  * For somewhat historical reasons, objname should already be quoted,
4467  * but not objnamespace (if any).
4468  */
4469 static void
4471  DumpableObject *dobj,
4472  const char *objtype,
4473  const char *objname,
4474  const char *objnamespace)
4475 {
4476  DumpableObject *extobj = NULL;
4477  int i;
4478 
4479  if (!dobj->ext_member)
4480  return;
4481 
4482  /*
4483  * Find the parent extension. We could avoid this search if we wanted to
4484  * add a link field to DumpableObject, but the space costs of that would
4485  * be considerable. We assume that member objects could only have a
4486  * direct dependency on their own extension, not any others.
4487  */
4488  for (i = 0; i < dobj->nDeps; i++)
4489  {
4490  extobj = findObjectByDumpId(dobj->dependencies[i]);
4491  if (extobj && extobj->objType == DO_EXTENSION)
4492  break;
4493  extobj = NULL;
4494  }
4495  if (extobj == NULL)
4496  exit_horribly(NULL, "could not find parent extension for %s %s\n",
4497  objtype, objname);
4498 
4499  appendPQExpBufferStr(upgrade_buffer,
4500  "\n-- For binary upgrade, handle extension membership the hard way\n");
4501  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
4502  fmtId(extobj->name),
4503  objtype);
4504  if (objnamespace && *objnamespace)
4505  appendPQExpBuffer(upgrade_buffer, "%s.", fmtId(objnamespace));
4506  appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
4507 }
4508 
4509 /*
4510  * getNamespaces:
4511  * read all namespaces in the system catalogs and return them in the
4512  * NamespaceInfo* structure
4513  *
4514  * numNamespaces is set to the number of namespaces read in
4515  */
4516 NamespaceInfo *
4518 {
4519  DumpOptions *dopt = fout->dopt;
4520  PGresult *res;
4521  int ntups;
4522  int i;
4523  PQExpBuffer query;
4524  NamespaceInfo *nsinfo;
4525  int i_tableoid;
4526  int i_oid;
4527  int i_nspname;
4528  int i_rolname;
4529  int i_nspacl;
4530  int i_rnspacl;
4531  int i_initnspacl;
4532  int i_initrnspacl;
4533 
4534  query = createPQExpBuffer();
4535 
4536  /*
4537  * we fetch all namespaces including system ones, so that every object we
4538  * read in can be linked to a containing namespace.
4539  */
4540  if (fout->remoteVersion >= 90600)
4541  {
4542  PQExpBuffer acl_subquery = createPQExpBuffer();
4543  PQExpBuffer racl_subquery = createPQExpBuffer();
4544  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4545  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4546 
4547  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4548  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4549  dopt->binary_upgrade);
4550 
4551  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4552  "(%s nspowner) AS rolname, "
4553  "%s as nspacl, "
4554  "%s as rnspacl, "
4555  "%s as initnspacl, "
4556  "%s as initrnspacl "
4557  "FROM pg_namespace n "
4558  "LEFT JOIN pg_init_privs pip "
4559  "ON (n.oid = pip.objoid "
4560  "AND pip.classoid = 'pg_namespace'::regclass "
4561  "AND pip.objsubid = 0",
4563  acl_subquery->data,
4564  racl_subquery->data,
4565  init_acl_subquery->data,
4566  init_racl_subquery->data);
4567 
4568  appendPQExpBuffer(query, ") ");
4569 
4570  destroyPQExpBuffer(acl_subquery);
4571  destroyPQExpBuffer(racl_subquery);
4572  destroyPQExpBuffer(init_acl_subquery);
4573  destroyPQExpBuffer(init_racl_subquery);
4574  }
4575  else
4576  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4577  "(%s nspowner) AS rolname, "
4578  "nspacl, NULL as rnspacl, "
4579  "NULL AS initnspacl, NULL as initrnspacl "
4580  "FROM pg_namespace",
4582 
4583  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4584 
4585  ntups = PQntuples(res);
4586 
4587  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4588 
4589  i_tableoid = PQfnumber(res, "tableoid");
4590  i_oid = PQfnumber(res, "oid");
4591  i_nspname = PQfnumber(res, "nspname");
4592  i_rolname = PQfnumber(res, "rolname");
4593  i_nspacl = PQfnumber(res, "nspacl");
4594  i_rnspacl = PQfnumber(res, "rnspacl");
4595  i_initnspacl = PQfnumber(res, "initnspacl");
4596  i_initrnspacl = PQfnumber(res, "initrnspacl");
4597 
4598  for (i = 0; i < ntups; i++)
4599  {
4600  nsinfo[i].dobj.objType = DO_NAMESPACE;
4601  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4602  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4603  AssignDumpId(&nsinfo[i].dobj);
4604  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4605  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4606  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4607  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4608  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4609  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4610 
4611  /* Decide whether to dump this namespace */
4612  selectDumpableNamespace(&nsinfo[i], fout);
4613 
4614  /*
4615  * Do not try to dump ACL if the ACL is empty or the default.
4616  *
4617  * This is useful because, for some schemas/objects, the only
4618  * component we are going to try and dump is the ACL and if we can
4619  * remove that then 'dump' goes to zero/false and we don't consider
4620  * this object for dumping at all later on.
4621  */
4622  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4623  PQgetisnull(res, i, i_initnspacl) &&
4624  PQgetisnull(res, i, i_initrnspacl))
4625  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4626 
4627  if (strlen(nsinfo[i].rolname) == 0)
4628  write_msg(NULL, "WARNING: owner of schema \"%s\" appears to be invalid\n",
4629  nsinfo[i].dobj.name);
4630  }
4631 
4632  PQclear(res);
4633  destroyPQExpBuffer(query);
4634 
4635  *numNamespaces = ntups;
4636 
4637  return nsinfo;
4638 }
4639 
4640 /*
4641  * findNamespace:
4642  * given a namespace OID, look up the info read by getNamespaces
4643  */
4644 static NamespaceInfo *
4646 {
4647  NamespaceInfo *nsinfo;
4648 
4649  nsinfo = findNamespaceByOid(nsoid);
4650  if (nsinfo == NULL)
4651  exit_horribly(NULL, "schema with OID %u does not exist\n", nsoid);
4652  return nsinfo;
4653 }
4654 
4655 /*
4656  * getExtensions:
4657  * read all extensions in the system catalogs and return them in the
4658  * ExtensionInfo* structure
4659  *
4660  * numExtensions is set to the number of extensions read in
4661  */
4662 ExtensionInfo *
4664 {
4665  DumpOptions *dopt = fout->dopt;
4666  PGresult *res;
4667  int ntups;
4668  int i;
4669  PQExpBuffer query;
4670  ExtensionInfo *extinfo;
4671  int i_tableoid;
4672  int i_oid;
4673  int i_extname;
4674  int i_nspname;
4675  int i_extrelocatable;
4676  int i_extversion;
4677  int i_extconfig;
4678  int i_extcondition;
4679 
4680  /*
4681  * Before 9.1, there are no extensions.
4682  */
4683  if (fout->remoteVersion < 90100)
4684  {
4685  *numExtensions = 0;
4686  return NULL;
4687  }
4688 
4689  query = createPQExpBuffer();
4690 
4691  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4692  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4693  "FROM pg_extension x "
4694  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4695 
4696  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4697 
4698  ntups = PQntuples(res);
4699 
4700  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4701 
4702  i_tableoid = PQfnumber(res, "tableoid");
4703  i_oid = PQfnumber(res, "oid");
4704  i_extname = PQfnumber(res, "extname");
4705  i_nspname = PQfnumber(res, "nspname");
4706  i_extrelocatable = PQfnumber(res, "extrelocatable");
4707  i_extversion = PQfnumber(res, "extversion");
4708  i_extconfig = PQfnumber(res, "extconfig");
4709  i_extcondition = PQfnumber(res, "extcondition");
4710 
4711  for (i = 0; i < ntups; i++)
4712  {
4713  extinfo[i].dobj.objType = DO_EXTENSION;
4714  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4715  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4716  AssignDumpId(&extinfo[i].dobj);
4717  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4718  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4719  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4720  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4721  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4722  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4723 
4724  /* Decide whether we want to dump it */
4725  selectDumpableExtension(&(extinfo[i]), dopt);
4726  }
4727 
4728  PQclear(res);
4729  destroyPQExpBuffer(query);
4730 
4731  *numExtensions = ntups;
4732 
4733  return extinfo;
4734 }
4735 
4736 /*
4737  * getTypes:
4738  * read all types in the system catalogs and return them in the
4739  * TypeInfo* structure
4740  *
4741  * numTypes is set to the number of types read in
4742  *
4743  * NB: this must run after getFuncs() because we assume we can do
4744  * findFuncByOid().
4745  */
4746 TypeInfo *
4748 {
4749  DumpOptions *dopt = fout->dopt;
4750  PGresult *res;
4751  int ntups;
4752  int i;
4753  PQExpBuffer query = createPQExpBuffer();
4754  TypeInfo *tyinfo;
4755  ShellTypeInfo *stinfo;
4756  int i_tableoid;
4757  int i_oid;
4758  int i_typname;
4759  int i_typnamespace;
4760  int i_typacl;
4761  int i_rtypacl;
4762  int i_inittypacl;
4763  int i_initrtypacl;
4764  int i_rolname;
4765  int i_typelem;
4766  int i_typrelid;
4767  int i_typrelkind;
4768  int i_typtype;
4769  int i_typisdefined;
4770  int i_isarray;
4771 
4772  /*
4773  * we include even the built-in types because those may be used as array
4774  * elements by user-defined types
4775  *
4776  * we filter out the built-in types when we dump out the types
4777  *
4778  * same approach for undefined (shell) types and array types
4779  *
4780  * Note: as of 8.3 we can reliably detect whether a type is an
4781  * auto-generated array type by checking the element type's typarray.
4782  * (Before that the test is capable of generating false positives.) We
4783  * still check for name beginning with '_', though, so as to avoid the
4784  * cost of the subselect probe for all standard types. This would have to
4785  * be revisited if the backend ever allows renaming of array types.
4786  */
4787 
4788  if (fout->remoteVersion >= 90600)
4789  {
4790  PQExpBuffer acl_subquery = createPQExpBuffer();
4791  PQExpBuffer racl_subquery = createPQExpBuffer();
4792  PQExpBuffer initacl_subquery = createPQExpBuffer();
4793  PQExpBuffer initracl_subquery = createPQExpBuffer();
4794 
4795  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
4796  initracl_subquery, "t.typacl", "t.typowner", "'T'",
4797  dopt->binary_upgrade);
4798 
4799  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
4800  "t.typnamespace, "
4801  "%s AS typacl, "
4802  "%s AS rtypacl, "
4803  "%s AS inittypacl, "
4804  "%s AS initrtypacl, "
4805  "(%s t.typowner) AS rolname, "
4806  "t.typelem, t.typrelid, "
4807  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
4808  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
4809  "t.typtype, t.typisdefined, "
4810  "t.typname[0] = '_' AND t.typelem != 0 AND "
4811  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
4812  "FROM pg_type t "
4813  "LEFT JOIN pg_init_privs pip ON "
4814  "(t.oid = pip.objoid "
4815  "AND pip.classoid = 'pg_type'::regclass "
4816  "AND pip.objsubid = 0) ",
4817  acl_subquery->data,
4818  racl_subquery->data,
4819  initacl_subquery->data,
4820  initracl_subquery->data,
4822 
4823  destroyPQExpBuffer(acl_subquery);
4824  destroyPQExpBuffer(racl_subquery);
4825  destroyPQExpBuffer(initacl_subquery);
4826  destroyPQExpBuffer(initracl_subquery);
4827  }
4828  else if (fout->remoteVersion >= 90200)
4829  {
4830  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4831  "typnamespace, typacl, NULL as rtypacl, "
4832  "NULL AS inittypacl, NULL AS initrtypacl, "
4833  "(%s typowner) AS rolname, "
4834  "typelem, typrelid, "
4835  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4836  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4837  "typtype, typisdefined, "
4838  "typname[0] = '_' AND typelem != 0 AND "
4839  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4840  "FROM pg_type",
4842  }
4843  else if (fout->remoteVersion >= 80300)
4844  {
4845  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4846  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4847  "NULL AS inittypacl, NULL AS initrtypacl, "
4848  "(%s typowner) AS rolname, "
4849  "typelem, typrelid, "
4850  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4851  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4852  "typtype, typisdefined, "
4853  "typname[0] = '_' AND typelem != 0 AND "
4854  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4855  "FROM pg_type",
4857  }
4858  else
4859  {
4860  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4861  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4862  "NULL AS inittypacl, NULL AS initrtypacl, "
4863  "(%s typowner) AS rolname, "
4864  "typelem, typrelid, "
4865  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4866  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4867  "typtype, typisdefined, "
4868  "typname[0] = '_' AND typelem != 0 AS isarray "
4869  "FROM pg_type",
4871  }
4872 
4873  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4874 
4875  ntups = PQntuples(res);
4876 
4877  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
4878 
4879  i_tableoid = PQfnumber(res, "tableoid");
4880  i_oid = PQfnumber(res, "oid");
4881  i_typname = PQfnumber(res, "typname");
4882  i_typnamespace = PQfnumber(res, "typnamespace");
4883  i_typacl = PQfnumber(res, "typacl");
4884  i_rtypacl = PQfnumber(res, "rtypacl");
4885  i_inittypacl = PQfnumber(res, "inittypacl");
4886  i_initrtypacl = PQfnumber(res, "initrtypacl");
4887  i_rolname = PQfnumber(res, "rolname");
4888  i_typelem = PQfnumber(res, "typelem");
4889  i_typrelid = PQfnumber(res, "typrelid");
4890  i_typrelkind = PQfnumber(res, "typrelkind");
4891  i_typtype = PQfnumber(res, "typtype");
4892  i_typisdefined = PQfnumber(res, "typisdefined");
4893  i_isarray = PQfnumber(res, "isarray");
4894 
4895  for (i = 0; i < ntups; i++)
4896  {
4897  tyinfo[i].dobj.objType = DO_TYPE;
4898  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4899  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4900  AssignDumpId(&tyinfo[i].dobj);
4901  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
4902  tyinfo[i].dobj.namespace =
4903  findNamespace(fout,
4904  atooid(PQgetvalue(res, i, i_typnamespace)));
4905  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4906  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
4907  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
4908  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
4909  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
4910  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
4911  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
4912  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
4913  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
4914  tyinfo[i].shellType = NULL;
4915 
4916  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
4917  tyinfo[i].isDefined = true;
4918  else
4919  tyinfo[i].isDefined = false;
4920 
4921  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
4922  tyinfo[i].isArray = true;
4923  else
4924  tyinfo[i].isArray = false;
4925 
4926  /* Decide whether we want to dump it */
4927  selectDumpableType(&tyinfo[i], fout);
4928 
4929  /* Do not try to dump ACL if no ACL exists. */
4930  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
4931  PQgetisnull(res, i, i_inittypacl) &&
4932  PQgetisnull(res, i, i_initrtypacl))
4933  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4934 
4935  /*
4936  * If it's a domain, fetch info about its constraints, if any
4937  */
4938  tyinfo[i].nDomChecks = 0;
4939  tyinfo[i].domChecks = NULL;
4940  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4941  tyinfo[i].typtype == TYPTYPE_DOMAIN)
4942  getDomainConstraints(fout, &(tyinfo[i]));
4943 
4944  /*
4945  * If it's a base type, make a DumpableObject representing a shell
4946  * definition of the type. We will need to dump that ahead of the I/O
4947  * functions for the type. Similarly, range types need a shell
4948  * definition in case they have a canonicalize function.
4949  *
4950  * Note: the shell type doesn't have a catId. You might think it
4951  * should copy the base type's catId, but then it might capture the
4952  * pg_depend entries for the type, which we don't want.
4953  */
4954  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4955  (tyinfo[i].typtype == TYPTYPE_BASE ||
4956  tyinfo[i].typtype == TYPTYPE_RANGE))
4957  {
4958  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
4959  stinfo->dobj.objType = DO_SHELL_TYPE;
4960  stinfo->dobj.catId = nilCatalogId;
4961  AssignDumpId(&stinfo->dobj);
4962  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
4963  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
4964  stinfo->baseType = &(tyinfo[i]);
4965  tyinfo[i].shellType = stinfo;
4966 
4967  /*
4968  * Initially mark the shell type as not to be dumped. We'll only
4969  * dump it if the I/O or canonicalize functions need to be dumped;
4970  * this is taken care of while sorting dependencies.
4971  */
4972  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
4973  }
4974 
4975  if (strlen(tyinfo[i].rolname) == 0)
4976  write_msg(NULL, "WARNING: owner of data type \"%s\" appears to be invalid\n",
4977  tyinfo[i].dobj.name);
4978  }
4979 
4980  *numTypes = ntups;
4981 
4982  PQclear(res);
4983 
4984  destroyPQExpBuffer(query);
4985 
4986  return tyinfo;
4987 }
4988 
4989 /*
4990  * getOperators:
4991  * read all operators in the system catalogs and return them in the
4992  * OprInfo* structure
4993  *
4994  * numOprs is set to the number of operators read in
4995  */
4996 OprInfo *
4997 getOperators(Archive *fout, int *numOprs)
4998 {
4999  PGresult *res;
5000  int ntups;
5001  int i;
5002  PQExpBuffer query = createPQExpBuffer();
5003  OprInfo *oprinfo;
5004  int i_tableoid;
5005  int i_oid;
5006  int i_oprname;
5007  int i_oprnamespace;
5008  int i_rolname;
5009  int i_oprkind;
5010  int i_oprcode;
5011 
5012  /*
5013  * find all operators, including builtin operators; we filter out
5014  * system-defined operators at dump-out time.
5015  */
5016 
5017  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
5018  "oprnamespace, "
5019  "(%s oprowner) AS rolname, "
5020  "oprkind, "
5021  "oprcode::oid AS oprcode "
5022  "FROM pg_operator",
5024 
5025  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5026 
5027  ntups = PQntuples(res);
5028  *numOprs = ntups;
5029 
5030  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
5031 
5032  i_tableoid = PQfnumber(res, "tableoid");
5033  i_oid = PQfnumber(res, "oid");
5034  i_oprname = PQfnumber(res, "oprname");
5035  i_oprnamespace = PQfnumber(res, "oprnamespace");
5036  i_rolname = PQfnumber(res, "rolname");
5037  i_oprkind = PQfnumber(res, "oprkind");
5038  i_oprcode = PQfnumber(res, "oprcode");
5039 
5040  for (i = 0; i < ntups; i++)
5041  {
5042  oprinfo[i].dobj.objType = DO_OPERATOR;
5043  oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5044  oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5045  AssignDumpId(&oprinfo[i].dobj);
5046  oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
5047  oprinfo[i].dobj.namespace =
5048  findNamespace(fout,
5049  atooid(PQgetvalue(res, i, i_oprnamespace)));
5050  oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5051  oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
5052  oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
5053 
5054  /* Decide whether we want to dump it */
5055  selectDumpableObject(&(oprinfo[i].dobj), fout);
5056 
5057  /* Operators do not currently have ACLs. */
5058  oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5059 
5060  if (strlen(oprinfo[i].rolname) == 0)
5061  write_msg(NULL, "WARNING: owner of operator \"%s\" appears to be invalid\n",
5062  oprinfo[i].dobj.name);
5063  }
5064 
5065  PQclear(res);
5066 
5067  destroyPQExpBuffer(query);
5068 
5069  return oprinfo;
5070 }
5071 
5072 /*
5073  * getCollations:
5074  * read all collations in the system catalogs and return them in the
5075  * CollInfo* structure
5076  *
5077  * numCollations is set to the number of collations read in
5078  */
5079 CollInfo *
5081 {
5082  PGresult *res;
5083  int ntups;
5084  int i;
5085  PQExpBuffer query;
5086  CollInfo *collinfo;
5087  int i_tableoid;
5088  int i_oid;
5089  int i_collname;
5090  int i_collnamespace;
5091  int i_rolname;
5092 
5093  /* Collations didn't exist pre-9.1 */
5094  if (fout->remoteVersion < 90100)
5095  {
5096  *numCollations = 0;
5097  return NULL;
5098  }
5099 
5100  query = createPQExpBuffer();
5101 
5102  /*
5103  * find all collations, including builtin collations; we filter out
5104  * system-defined collations at dump-out time.
5105  */
5106 
5107  appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
5108  "collnamespace, "
5109  "(%s collowner) AS rolname "
5110  "FROM pg_collation",
5112 
5113  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5114 
5115  ntups = PQntuples(res);
5116  *numCollations = ntups;
5117 
5118  collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
5119 
5120  i_tableoid = PQfnumber(res, "tableoid");
5121  i_oid = PQfnumber(res, "oid");
5122  i_collname = PQfnumber(res, "collname");
5123  i_collnamespace = PQfnumber(res, "collnamespace");
5124  i_rolname = PQfnumber(res, "rolname");
5125 
5126  for (i = 0; i < ntups; i++)
5127  {
5128  collinfo[i].dobj.objType = DO_COLLATION;
5129  collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5130  collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5131  AssignDumpId(&collinfo[i].dobj);
5132  collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
5133  collinfo[i].dobj.namespace =
5134  findNamespace(fout,
5135  atooid(PQgetvalue(res, i, i_collnamespace)));
5136  collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5137 
5138  /* Decide whether we want to dump it */
5139  selectDumpableObject(&(collinfo[i].dobj), fout);
5140 
5141  /* Collations do not currently have ACLs. */
5142  collinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5143  }
5144 
5145  PQclear(res);
5146 
5147  destroyPQExpBuffer(query);
5148 
5149  return collinfo;
5150 }
5151 
5152 /*
5153  * getConversions:
5154  * read all conversions in the system catalogs and return them in the
5155  * ConvInfo* structure
5156  *
5157  * numConversions is set to the number of conversions read in
5158  */
5159 ConvInfo *
5160 getConversions(Archive *fout, int *numConversions)
5161 {
5162  PGresult *res;
5163  int ntups;
5164  int i;
5165  PQExpBuffer query;
5166  ConvInfo *convinfo;
5167  int i_tableoid;
5168  int i_oid;
5169  int i_conname;
5170  int i_connamespace;
5171  int i_rolname;
5172 
5173  query = createPQExpBuffer();
5174 
5175  /*
5176  * find all conversions, including builtin conversions; we filter out
5177  * system-defined conversions at dump-out time.
5178  */
5179 
5180  appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
5181  "connamespace, "
5182  "(%s conowner) AS rolname "
5183  "FROM pg_conversion",
5185 
5186  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5187 
5188  ntups = PQntuples(res);
5189  *numConversions = ntups;
5190 
5191  convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
5192 
5193  i_tableoid = PQfnumber(res, "tableoid");
5194  i_oid = PQfnumber(res, "oid");
5195  i_conname = PQfnumber(res, "conname");
5196  i_connamespace = PQfnumber(res, "connamespace");
5197  i_rolname = PQfnumber(res, "rolname");
5198 
5199  for (i = 0; i < ntups; i++)
5200  {
5201  convinfo[i].dobj.objType = DO_CONVERSION;
5202  convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5203  convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5204  AssignDumpId(&convinfo[i].dobj);
5205  convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
5206  convinfo[i].dobj.namespace =
5207  findNamespace(fout,
5208  atooid(PQgetvalue(res, i, i_connamespace)));
5209  convinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5210 
5211  /* Decide whether we want to dump it */
5212  selectDumpableObject(&(convinfo[i].dobj), fout);
5213 
5214  /* Conversions do not currently have ACLs. */
5215  convinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5216  }
5217 
5218  PQclear(res);
5219 
5220  destroyPQExpBuffer(query);
5221 
5222  return convinfo;
5223 }
5224 
5225 /*
5226  * getAccessMethods:
5227  * read all user-defined access methods in the system catalogs and return
5228  * them in the AccessMethodInfo* structure
5229  *
5230  * numAccessMethods is set to the number of access methods read in
5231  */
5233 getAccessMethods(Archive *fout, int *numAccessMethods)
5234 {
5235  PGresult *res;
5236  int ntups;
5237  int i;
5238  PQExpBuffer query;
5239  AccessMethodInfo *aminfo;
5240  int i_tableoid;
5241  int i_oid;
5242  int i_amname;
5243  int i_amhandler;
5244  int i_amtype;
5245 
5246  /* Before 9.6, there are no user-defined access methods */
5247  if (fout->remoteVersion < 90600)
5248  {
5249  *numAccessMethods = 0;
5250  return NULL;
5251  }
5252 
5253  query = createPQExpBuffer();
5254 
5255  /* Select all access methods from pg_am table */
5256  appendPQExpBuffer(query, "SELECT tableoid, oid, amname, amtype, "
5257  "amhandler::pg_catalog.regproc AS amhandler "
5258  "FROM pg_am");
5259 
5260  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5261 
5262  ntups = PQntuples(res);
5263  *numAccessMethods = ntups;
5264 
5265  aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
5266 
5267  i_tableoid = PQfnumber(res, "tableoid");
5268  i_oid = PQfnumber(res, "oid");
5269  i_amname = PQfnumber(res, "amname");
5270  i_amhandler = PQfnumber(res, "amhandler");
5271  i_amtype = PQfnumber(res, "amtype");
5272 
5273  for (i = 0; i < ntups; i++)
5274  {
5275  aminfo[i].dobj.objType = DO_ACCESS_METHOD;
5276  aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5277  aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5278  AssignDumpId(&aminfo[i].dobj);
5279  aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
5280  aminfo[i].dobj.namespace = NULL;
5281  aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
5282  aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
5283 
5284  /* Decide whether we want to dump it */
5285  selectDumpableAccessMethod(&(aminfo[i]), fout);
5286 
5287  /* Access methods do not currently have ACLs. */
5288  aminfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5289  }
5290 
5291  PQclear(res);
5292 
5293  destroyPQExpBuffer(query);
5294 
5295  return aminfo;
5296 }
5297 
5298 
5299 /*
5300  * getOpclasses:
5301  * read all opclasses in the system catalogs and return them in the
5302  * OpclassInfo* structure
5303  *
5304  * numOpclasses is set to the number of opclasses read in
5305  */
5306 OpclassInfo *