PostgreSQL Source Code  git master
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #ifdef HAVE_TERMIOS_H
37 #include <termios.h>
38 #endif
39 
40 #include "getopt_long.h"
41 
42 #include "access/attnum.h"
43 #include "access/sysattr.h"
44 #include "access/transam.h"
45 #include "catalog/pg_aggregate_d.h"
46 #include "catalog/pg_am_d.h"
47 #include "catalog/pg_attribute_d.h"
48 #include "catalog/pg_cast_d.h"
49 #include "catalog/pg_class_d.h"
50 #include "catalog/pg_default_acl_d.h"
51 #include "catalog/pg_largeobject_d.h"
52 #include "catalog/pg_largeobject_metadata_d.h"
53 #include "catalog/pg_proc_d.h"
54 #include "catalog/pg_trigger_d.h"
55 #include "catalog/pg_type_d.h"
56 #include "libpq/libpq-fs.h"
57 
58 #include "dumputils.h"
59 #include "parallel.h"
60 #include "pg_backup_db.h"
61 #include "pg_backup_utils.h"
62 #include "pg_dump.h"
63 #include "fe_utils/connect.h"
64 #include "fe_utils/string_utils.h"
65 
66 
67 typedef struct
68 {
69  const char *descr; /* comment for an object */
70  Oid classoid; /* object class (catalog OID) */
71  Oid objoid; /* object OID */
72  int objsubid; /* subobject (table column #) */
73 } CommentItem;
74 
75 typedef struct
76 {
77  const char *provider; /* label provider of this security label */
78  const char *label; /* security label for an object */
79  Oid classoid; /* object class (catalog OID) */
80  Oid objoid; /* object OID */
81  int objsubid; /* subobject (table column #) */
82 } SecLabelItem;
83 
84 typedef enum OidOptions
85 {
87  zeroAsAny = 2,
90 } OidOptions;
91 
92 /* global decls */
93 bool g_verbose; /* User wants verbose narration of our
94  * activities. */
95 static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
96 
97 /* subquery used to convert user ID (eg, datdba) to user name */
98 static const char *username_subquery;
99 
100 /*
101  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
102  * FirstNormalObjectId - 1.
103  */
104 static Oid g_last_builtin_oid; /* value of the last builtin oid */
105 
106 /* The specified names/patterns should to match at least one entity */
107 static int strict_names = 0;
108 
109 /*
110  * Object inclusion/exclusion lists
111  *
112  * The string lists record the patterns given by command-line switches,
113  * which we then convert to lists of OIDs of matching objects.
114  */
116 static SimpleOidList schema_include_oids = {NULL, NULL};
118 static SimpleOidList schema_exclude_oids = {NULL, NULL};
119 
121 static SimpleOidList table_include_oids = {NULL, NULL};
123 static SimpleOidList table_exclude_oids = {NULL, NULL};
125 static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
126 
127 
128 char g_opaque_type[10]; /* name for the opaque type */
129 
130 /* placeholders for the delimiters for comments */
132 char g_comment_end[10];
133 
134 static const CatalogId nilCatalogId = {0, 0};
135 
136 /*
137  * Macro for producing quoted, schema-qualified name of a dumpable object.
138  * Note implicit dependence on "fout"; we should get rid of that argument.
139  */
140 #define fmtQualifiedDumpable(obj) \
141  fmtQualifiedId(fout->remoteVersion, \
142  (obj)->dobj.namespace->dobj.name, \
143  (obj)->dobj.name)
144 
145 static void help(const char *progname);
146 static void setup_connection(Archive *AH,
147  const char *dumpencoding, const char *dumpsnapshot,
148  char *use_role);
149 static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
150 static void expand_schema_name_patterns(Archive *fout,
151  SimpleStringList *patterns,
152  SimpleOidList *oids,
153  bool strict_names);
154 static void expand_table_name_patterns(Archive *fout,
155  SimpleStringList *patterns,
156  SimpleOidList *oids,
157  bool strict_names);
158 static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
159 static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
160 static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
161 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
162 static void dumpComment(Archive *fout, const char *type, const char *name,
163  const char *namespace, const char *owner,
164  CatalogId catalogId, int subid, DumpId dumpId);
165 static int findComments(Archive *fout, Oid classoid, Oid objoid,
166  CommentItem **items);
167 static int collectComments(Archive *fout, CommentItem **items);
168 static void dumpSecLabel(Archive *fout, const char *type, const char *name,
169  const char *namespace, const char *owner,
170  CatalogId catalogId, int subid, DumpId dumpId);
171 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
172  SecLabelItem **items);
173 static int collectSecLabels(Archive *fout, SecLabelItem **items);
174 static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
175 static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
176 static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
177 static void dumpType(Archive *fout, TypeInfo *tyinfo);
178 static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
179 static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
180 static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
181 static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
182 static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
183 static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
184 static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
185 static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
186 static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
187 static void dumpFunc(Archive *fout, FuncInfo *finfo);
188 static void dumpCast(Archive *fout, CastInfo *cast);
189 static void dumpTransform(Archive *fout, TransformInfo *transform);
190 static void dumpOpr(Archive *fout, OprInfo *oprinfo);
191 static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
192 static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
193 static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
194 static void dumpCollation(Archive *fout, CollInfo *collinfo);
195 static void dumpConversion(Archive *fout, ConvInfo *convinfo);
196 static void dumpRule(Archive *fout, RuleInfo *rinfo);
197 static void dumpAgg(Archive *fout, AggInfo *agginfo);
198 static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
199 static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
200 static void dumpTable(Archive *fout, TableInfo *tbinfo);
201 static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
202 static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
203 static void dumpSequence(Archive *fout, TableInfo *tbinfo);
204 static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
205 static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
206 static void dumpIndexAttach(Archive *fout, IndexAttachInfo *attachinfo);
207 static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
208 static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
209 static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
210 static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
211 static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
212 static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
213 static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
214 static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
215 static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
216 static void dumpUserMappings(Archive *fout,
217  const char *servername, const char *namespace,
218  const char *owner, CatalogId catalogId, DumpId dumpId);
219 static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
220 
221 static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
222  const char *type, const char *name, const char *subname,
223  const char *nspname, const char *owner,
224  const char *acls, const char *racls,
225  const char *initacls, const char *initracls);
226 
227 static void getDependencies(Archive *fout);
228 static void BuildArchiveDependencies(Archive *fout);
230  DumpId **dependencies, int *nDeps, int *allocDeps);
231 
233 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
234  DumpableObject *boundaryObjs);
235 
236 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
237 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind);
238 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids);
239 static void buildMatViewRefreshDependencies(Archive *fout);
240 static void getTableDataFKConstraints(void);
241 static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
242  bool is_agg);
243 static char *format_function_arguments_old(Archive *fout,
244  FuncInfo *finfo, int nallargs,
245  char **allargtypes,
246  char **argmodes,
247  char **argnames);
248 static char *format_function_signature(Archive *fout,
249  FuncInfo *finfo, bool honor_quotes);
250 static char *convertRegProcReference(Archive *fout,
251  const char *proc);
252 static char *getFormattedOperatorName(Archive *fout, const char *oproid);
253 static char *convertTSFunction(Archive *fout, Oid funcOid);
254 static Oid findLastBuiltinOid_V71(Archive *fout);
255 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
256 static void getBlobs(Archive *fout);
257 static void dumpBlob(Archive *fout, BlobInfo *binfo);
258 static int dumpBlobs(Archive *fout, void *arg);
259 static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
260 static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
261 static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
262 static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
263 static void dumpDatabase(Archive *AH);
264 static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
265  const char *dbname, Oid dboid);
266 static void dumpEncoding(Archive *AH);
267 static void dumpStdStrings(Archive *AH);
268 static void dumpSearchPath(Archive *AH);
270  PQExpBuffer upgrade_buffer,
271  Oid pg_type_oid,
272  bool force_array_type);
274  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
275 static void binary_upgrade_set_pg_class_oids(Archive *fout,
276  PQExpBuffer upgrade_buffer,
277  Oid pg_class_oid, bool is_index);
278 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
279  DumpableObject *dobj,
280  const char *objtype,
281  const char *objname,
282  const char *objnamespace);
283 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
284 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
285 static bool nonemptyReloptions(const char *reloptions);
286 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
287  const char *prefix, Archive *fout);
288 static char *get_synchronized_snapshot(Archive *fout);
289 static void setupDumpWorker(Archive *AHX);
290 static TableInfo *getRootTableInfo(TableInfo *tbinfo);
291 
292 
293 int
294 main(int argc, char **argv)
295 {
296  int c;
297  const char *filename = NULL;
298  const char *format = "p";
299  TableInfo *tblinfo;
300  int numTables;
301  DumpableObject **dobjs;
302  int numObjs;
303  DumpableObject *boundaryObjs;
304  int i;
305  int optindex;
306  RestoreOptions *ropt;
307  Archive *fout; /* the script file */
308  const char *dumpencoding = NULL;
309  const char *dumpsnapshot = NULL;
310  char *use_role = NULL;
311  int numWorkers = 1;
312  trivalue prompt_password = TRI_DEFAULT;
313  int compressLevel = -1;
314  int plainText = 0;
315  ArchiveFormat archiveFormat = archUnknown;
316  ArchiveMode archiveMode;
317 
318  static DumpOptions dopt;
319 
320  static struct option long_options[] = {
321  {"data-only", no_argument, NULL, 'a'},
322  {"blobs", no_argument, NULL, 'b'},
323  {"no-blobs", no_argument, NULL, 'B'},
324  {"clean", no_argument, NULL, 'c'},
325  {"create", no_argument, NULL, 'C'},
326  {"dbname", required_argument, NULL, 'd'},
327  {"file", required_argument, NULL, 'f'},
328  {"format", required_argument, NULL, 'F'},
329  {"host", required_argument, NULL, 'h'},
330  {"jobs", 1, NULL, 'j'},
331  {"no-reconnect", no_argument, NULL, 'R'},
332  {"oids", no_argument, NULL, 'o'},
333  {"no-owner", no_argument, NULL, 'O'},
334  {"port", required_argument, NULL, 'p'},
335  {"schema", required_argument, NULL, 'n'},
336  {"exclude-schema", required_argument, NULL, 'N'},
337  {"schema-only", no_argument, NULL, 's'},
338  {"superuser", required_argument, NULL, 'S'},
339  {"table", required_argument, NULL, 't'},
340  {"exclude-table", required_argument, NULL, 'T'},
341  {"no-password", no_argument, NULL, 'w'},
342  {"password", no_argument, NULL, 'W'},
343  {"username", required_argument, NULL, 'U'},
344  {"verbose", no_argument, NULL, 'v'},
345  {"no-privileges", no_argument, NULL, 'x'},
346  {"no-acl", no_argument, NULL, 'x'},
347  {"compress", required_argument, NULL, 'Z'},
348  {"encoding", required_argument, NULL, 'E'},
349  {"help", no_argument, NULL, '?'},
350  {"version", no_argument, NULL, 'V'},
351 
352  /*
353  * the following options don't have an equivalent short option letter
354  */
355  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
356  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
357  {"column-inserts", no_argument, &dopt.column_inserts, 1},
358  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
359  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
360  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
361  {"exclude-table-data", required_argument, NULL, 4},
362  {"if-exists", no_argument, &dopt.if_exists, 1},
363  {"inserts", no_argument, &dopt.dump_inserts, 1},
364  {"lock-wait-timeout", required_argument, NULL, 2},
365  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
366  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
367  {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
368  {"role", required_argument, NULL, 3},
369  {"section", required_argument, NULL, 5},
370  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
371  {"snapshot", required_argument, NULL, 6},
372  {"strict-names", no_argument, &strict_names, 1},
373  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
374  {"no-comments", no_argument, &dopt.no_comments, 1},
375  {"no-publications", no_argument, &dopt.no_publications, 1},
376  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
377  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
378  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
379  {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
380  {"no-sync", no_argument, NULL, 7},
381 
382  {NULL, 0, NULL, 0}
383  };
384 
385  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
386 
387  /*
388  * Initialize what we need for parallel execution, especially for thread
389  * support on Windows.
390  */
392 
393  g_verbose = false;
394 
395  strcpy(g_comment_start, "-- ");
396  g_comment_end[0] = '\0';
397  strcpy(g_opaque_type, "opaque");
398 
399  progname = get_progname(argv[0]);
400 
401  if (argc > 1)
402  {
403  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
404  {
405  help(progname);
406  exit_nicely(0);
407  }
408  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
409  {
410  puts("pg_dump (PostgreSQL) " PG_VERSION);
411  exit_nicely(0);
412  }
413  }
414 
415  InitDumpOptions(&dopt);
416 
417  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:oOp:RsS:t:T:U:vwWxZ:",
418  long_options, &optindex)) != -1)
419  {
420  switch (c)
421  {
422  case 'a': /* Dump data only */
423  dopt.dataOnly = true;
424  break;
425 
426  case 'b': /* Dump blobs */
427  dopt.outputBlobs = true;
428  break;
429 
430  case 'B': /* Don't dump blobs */
431  dopt.dontOutputBlobs = true;
432  break;
433 
434  case 'c': /* clean (i.e., drop) schema prior to create */
435  dopt.outputClean = 1;
436  break;
437 
438  case 'C': /* Create DB */
439  dopt.outputCreateDB = 1;
440  break;
441 
442  case 'd': /* database name */
443  dopt.dbname = pg_strdup(optarg);
444  break;
445 
446  case 'E': /* Dump encoding */
447  dumpencoding = pg_strdup(optarg);
448  break;
449 
450  case 'f':
451  filename = pg_strdup(optarg);
452  break;
453 
454  case 'F':
455  format = pg_strdup(optarg);
456  break;
457 
458  case 'h': /* server host */
459  dopt.pghost = pg_strdup(optarg);
460  break;
461 
462  case 'j': /* number of dump jobs */
463  numWorkers = atoi(optarg);
464  break;
465 
466  case 'n': /* include schema(s) */
467  simple_string_list_append(&schema_include_patterns, optarg);
468  dopt.include_everything = false;
469  break;
470 
471  case 'N': /* exclude schema(s) */
472  simple_string_list_append(&schema_exclude_patterns, optarg);
473  break;
474 
475  case 'o': /* Dump oids */
476  dopt.oids = true;
477  break;
478 
479  case 'O': /* Don't reconnect to match owner */
480  dopt.outputNoOwner = 1;
481  break;
482 
483  case 'p': /* server port */
484  dopt.pgport = pg_strdup(optarg);
485  break;
486 
487  case 'R':
488  /* no-op, still accepted for backwards compatibility */
489  break;
490 
491  case 's': /* dump schema only */
492  dopt.schemaOnly = true;
493  break;
494 
495  case 'S': /* Username for superuser in plain text output */
497  break;
498 
499  case 't': /* include table(s) */
500  simple_string_list_append(&table_include_patterns, optarg);
501  dopt.include_everything = false;
502  break;
503 
504  case 'T': /* exclude table(s) */
505  simple_string_list_append(&table_exclude_patterns, optarg);
506  break;
507 
508  case 'U':
509  dopt.username = pg_strdup(optarg);
510  break;
511 
512  case 'v': /* verbose */
513  g_verbose = true;
514  break;
515 
516  case 'w':
517  prompt_password = TRI_NO;
518  break;
519 
520  case 'W':
521  prompt_password = TRI_YES;
522  break;
523 
524  case 'x': /* skip ACL dump */
525  dopt.aclsSkip = true;
526  break;
527 
528  case 'Z': /* Compression Level */
529  compressLevel = atoi(optarg);
530  if (compressLevel < 0 || compressLevel > 9)
531  {
532  write_msg(NULL, "compression level must be in range 0..9\n");
533  exit_nicely(1);
534  }
535  break;
536 
537  case 0:
538  /* This covers the long options. */
539  break;
540 
541  case 2: /* lock-wait-timeout */
543  break;
544 
545  case 3: /* SET ROLE */
546  use_role = pg_strdup(optarg);
547  break;
548 
549  case 4: /* exclude table(s) data */
550  simple_string_list_append(&tabledata_exclude_patterns, optarg);
551  break;
552 
553  case 5: /* section */
555  break;
556 
557  case 6: /* snapshot */
558  dumpsnapshot = pg_strdup(optarg);
559  break;
560 
561  case 7: /* no-sync */
562  dosync = false;
563  break;
564 
565  default:
566  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
567  exit_nicely(1);
568  }
569  }
570 
571  /*
572  * Non-option argument specifies database name as long as it wasn't
573  * already specified with -d / --dbname
574  */
575  if (optind < argc && dopt.dbname == NULL)
576  dopt.dbname = argv[optind++];
577 
578  /* Complain if any arguments remain */
579  if (optind < argc)
580  {
581  fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
582  progname, argv[optind]);
583  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
584  progname);
585  exit_nicely(1);
586  }
587 
588  /* --column-inserts implies --inserts */
589  if (dopt.column_inserts)
590  dopt.dump_inserts = 1;
591 
592  /*
593  * Binary upgrade mode implies dumping sequence data even in schema-only
594  * mode. This is not exposed as a separate option, but kept separate
595  * internally for clarity.
596  */
597  if (dopt.binary_upgrade)
598  dopt.sequence_data = 1;
599 
600  if (dopt.dataOnly && dopt.schemaOnly)
601  {
602  write_msg(NULL, "options -s/--schema-only and -a/--data-only cannot be used together\n");
603  exit_nicely(1);
604  }
605 
606  if (dopt.dataOnly && dopt.outputClean)
607  {
608  write_msg(NULL, "options -c/--clean and -a/--data-only cannot be used together\n");
609  exit_nicely(1);
610  }
611 
612  if (dopt.dump_inserts && dopt.oids)
613  {
614  write_msg(NULL, "options --inserts/--column-inserts and -o/--oids cannot be used together\n");
615  write_msg(NULL, "(The INSERT command cannot set OIDs.)\n");
616  exit_nicely(1);
617  }
618 
619  if (dopt.if_exists && !dopt.outputClean)
620  exit_horribly(NULL, "option --if-exists requires option -c/--clean\n");
621 
622  /* Identify archive format to emit */
623  archiveFormat = parseArchiveFormat(format, &archiveMode);
624 
625  /* archiveFormat specific setup */
626  if (archiveFormat == archNull)
627  plainText = 1;
628 
629  /* Custom and directory formats are compressed by default, others not */
630  if (compressLevel == -1)
631  {
632 #ifdef HAVE_LIBZ
633  if (archiveFormat == archCustom || archiveFormat == archDirectory)
634  compressLevel = Z_DEFAULT_COMPRESSION;
635  else
636 #endif
637  compressLevel = 0;
638  }
639 
640 #ifndef HAVE_LIBZ
641  if (compressLevel != 0)
642  write_msg(NULL, "WARNING: requested compression not available in this "
643  "installation -- archive will be uncompressed\n");
644  compressLevel = 0;
645 #endif
646 
647  /*
648  * If emitting an archive format, we always want to emit a DATABASE item,
649  * in case --create is specified at pg_restore time.
650  */
651  if (!plainText)
652  dopt.outputCreateDB = 1;
653 
654  /*
655  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
656  * parallel jobs because that's the maximum limit for the
657  * WaitForMultipleObjects() call.
658  */
659  if (numWorkers <= 0
660 #ifdef WIN32
661  || numWorkers > MAXIMUM_WAIT_OBJECTS
662 #endif
663  )
664  exit_horribly(NULL, "invalid number of parallel jobs\n");
665 
666  /* Parallel backup only in the directory archive format so far */
667  if (archiveFormat != archDirectory && numWorkers > 1)
668  exit_horribly(NULL, "parallel backup only supported by the directory format\n");
669 
670  /* Open the output file */
671  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
672  archiveMode, setupDumpWorker);
673 
674  /* Make dump options accessible right away */
675  SetArchiveOptions(fout, &dopt, NULL);
676 
677  /* Register the cleanup hook */
678  on_exit_close_archive(fout);
679 
680  /* Let the archiver know how noisy to be */
681  fout->verbose = g_verbose;
682 
683  /*
684  * We allow the server to be back to 8.0, and up to any minor release of
685  * our own major version. (See also version check in pg_dumpall.c.)
686  */
687  fout->minRemoteVersion = 80000;
688  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
689 
690  fout->numWorkers = numWorkers;
691 
692  /*
693  * Open the database using the Archiver, so it knows about it. Errors mean
694  * death.
695  */
696  ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
697  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
698 
699  /*
700  * Disable security label support if server version < v9.1.x (prevents
701  * access to nonexistent pg_seclabel catalog)
702  */
703  if (fout->remoteVersion < 90100)
704  dopt.no_security_labels = 1;
705 
706  /*
707  * On hot standbys, never try to dump unlogged table data, since it will
708  * just throw an error.
709  */
710  if (fout->isStandby)
711  dopt.no_unlogged_table_data = true;
712 
713  /* Select the appropriate subquery to convert user IDs to names */
714  if (fout->remoteVersion >= 80100)
715  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
716  else
717  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
718 
719  /* check the version for the synchronized snapshots feature */
720  if (numWorkers > 1 && fout->remoteVersion < 90200
721  && !dopt.no_synchronized_snapshots)
722  exit_horribly(NULL,
723  "Synchronized snapshots are not supported by this server version.\n"
724  "Run with --no-synchronized-snapshots instead if you do not need\n"
725  "synchronized snapshots.\n");
726 
727  /* check the version when a snapshot is explicitly specified by user */
728  if (dumpsnapshot && fout->remoteVersion < 90200)
729  exit_horribly(NULL,
730  "Exported snapshots are not supported by this server version.\n");
731 
732  /*
733  * Find the last built-in OID, if needed (prior to 8.1)
734  *
735  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
736  */
737  if (fout->remoteVersion < 80100)
739  else
741 
742  if (g_verbose)
743  write_msg(NULL, "last built-in OID is %u\n", g_last_builtin_oid);
744 
745  /* Expand schema selection patterns into OID lists */
746  if (schema_include_patterns.head != NULL)
747  {
748  expand_schema_name_patterns(fout, &schema_include_patterns,
749  &schema_include_oids,
750  strict_names);
751  if (schema_include_oids.head == NULL)
752  exit_horribly(NULL, "no matching schemas were found\n");
753  }
754  expand_schema_name_patterns(fout, &schema_exclude_patterns,
755  &schema_exclude_oids,
756  false);
757  /* non-matching exclusion patterns aren't an error */
758 
759  /* Expand table selection patterns into OID lists */
760  if (table_include_patterns.head != NULL)
761  {
762  expand_table_name_patterns(fout, &table_include_patterns,
763  &table_include_oids,
764  strict_names);
765  if (table_include_oids.head == NULL)
766  exit_horribly(NULL, "no matching tables were found\n");
767  }
768  expand_table_name_patterns(fout, &table_exclude_patterns,
769  &table_exclude_oids,
770  false);
771 
772  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
773  &tabledata_exclude_oids,
774  false);
775 
776  /* non-matching exclusion patterns aren't an error */
777 
778  /*
779  * Dumping blobs is the default for dumps where an inclusion switch is not
780  * used (an "include everything" dump). -B can be used to exclude blobs
781  * from those dumps. -b can be used to include blobs even when an
782  * inclusion switch is used.
783  *
784  * -s means "schema only" and blobs are data, not schema, so we never
785  * include blobs when -s is used.
786  */
787  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
788  dopt.outputBlobs = true;
789 
790  /*
791  * Now scan the database and create DumpableObject structs for all the
792  * objects we intend to dump.
793  */
794  tblinfo = getSchemaData(fout, &numTables);
795 
796  if (fout->remoteVersion < 80400)
797  guessConstraintInheritance(tblinfo, numTables);
798 
799  if (!dopt.schemaOnly)
800  {
801  getTableData(&dopt, tblinfo, numTables, dopt.oids, 0);
803  if (dopt.dataOnly)
805  }
806 
807  if (dopt.schemaOnly && dopt.sequence_data)
808  getTableData(&dopt, tblinfo, numTables, dopt.oids, RELKIND_SEQUENCE);
809 
810  /*
811  * In binary-upgrade mode, we do not have to worry about the actual blob
812  * data or the associated metadata that resides in the pg_largeobject and
813  * pg_largeobject_metadata tables, respectively.
814  *
815  * However, we do need to collect blob information as there may be
816  * comments or other information on blobs that we do need to dump out.
817  */
818  if (dopt.outputBlobs || dopt.binary_upgrade)
819  getBlobs(fout);
820 
821  /*
822  * Collect dependency data to assist in ordering the objects.
823  */
824  getDependencies(fout);
825 
826  /* Lastly, create dummy objects to represent the section boundaries */
827  boundaryObjs = createBoundaryObjects();
828 
829  /* Get pointers to all the known DumpableObjects */
830  getDumpableObjects(&dobjs, &numObjs);
831 
832  /*
833  * Add dummy dependencies to enforce the dump section ordering.
834  */
835  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
836 
837  /*
838  * Sort the objects into a safe dump order (no forward references).
839  *
840  * We rely on dependency information to help us determine a safe order, so
841  * the initial sort is mostly for cosmetic purposes: we sort by name to
842  * ensure that logically identical schemas will dump identically.
843  */
844  sortDumpableObjectsByTypeName(dobjs, numObjs);
845 
846  /* If we do a parallel dump, we want the largest tables to go first */
847  if (archiveFormat == archDirectory && numWorkers > 1)
848  sortDataAndIndexObjectsBySize(dobjs, numObjs);
849 
850  sortDumpableObjects(dobjs, numObjs,
851  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
852 
853  /*
854  * Create archive TOC entries for all the objects to be dumped, in a safe
855  * order.
856  */
857 
858  /* First the special ENCODING, STDSTRINGS, and SEARCHPATH entries. */
859  dumpEncoding(fout);
860  dumpStdStrings(fout);
861  dumpSearchPath(fout);
862 
863  /* The database items are always next, unless we don't want them at all */
864  if (dopt.outputCreateDB)
865  dumpDatabase(fout);
866 
867  /* Now the rearrangeable objects. */
868  for (i = 0; i < numObjs; i++)
869  dumpDumpableObject(fout, dobjs[i]);
870 
871  /*
872  * Set up options info to ensure we dump what we want.
873  */
874  ropt = NewRestoreOptions();
875  ropt->filename = filename;
876 
877  /* if you change this list, see dumpOptionsFromRestoreOptions */
878  ropt->dropSchema = dopt.outputClean;
879  ropt->dataOnly = dopt.dataOnly;
880  ropt->schemaOnly = dopt.schemaOnly;
881  ropt->if_exists = dopt.if_exists;
882  ropt->column_inserts = dopt.column_inserts;
883  ropt->dumpSections = dopt.dumpSections;
884  ropt->aclsSkip = dopt.aclsSkip;
885  ropt->superuser = dopt.outputSuperuser;
886  ropt->createDB = dopt.outputCreateDB;
887  ropt->noOwner = dopt.outputNoOwner;
888  ropt->noTablespace = dopt.outputNoTablespaces;
889  ropt->disable_triggers = dopt.disable_triggers;
890  ropt->use_setsessauth = dopt.use_setsessauth;
892  ropt->dump_inserts = dopt.dump_inserts;
893  ropt->no_comments = dopt.no_comments;
894  ropt->no_publications = dopt.no_publications;
896  ropt->no_subscriptions = dopt.no_subscriptions;
897  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
900  ropt->sequence_data = dopt.sequence_data;
901  ropt->binary_upgrade = dopt.binary_upgrade;
902 
903  if (compressLevel == -1)
904  ropt->compression = 0;
905  else
906  ropt->compression = compressLevel;
907 
908  ropt->suppressDumpWarnings = true; /* We've already shown them */
909 
910  SetArchiveOptions(fout, &dopt, ropt);
911 
912  /* Mark which entries should be output */
914 
915  /*
916  * The archive's TOC entries are now marked as to which ones will actually
917  * be output, so we can set up their dependency lists properly. This isn't
918  * necessary for plain-text output, though.
919  */
920  if (!plainText)
922 
923  /*
924  * And finally we can do the actual output.
925  *
926  * Note: for non-plain-text output formats, the output file is written
927  * inside CloseArchive(). This is, um, bizarre; but not worth changing
928  * right now.
929  */
930  if (plainText)
931  RestoreArchive(fout);
932 
933  CloseArchive(fout);
934 
935  exit_nicely(0);
936 }
937 
938 
939 static void
940 help(const char *progname)
941 {
942  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
943  printf(_("Usage:\n"));
944  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
945 
946  printf(_("\nGeneral options:\n"));
947  printf(_(" -f, --file=FILENAME output file or directory name\n"));
948  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
949  " plain text (default))\n"));
950  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
951  printf(_(" -v, --verbose verbose mode\n"));
952  printf(_(" -V, --version output version information, then exit\n"));
953  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
954  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
955  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
956  printf(_(" -?, --help show this help, then exit\n"));
957 
958  printf(_("\nOptions controlling the output content:\n"));
959  printf(_(" -a, --data-only dump only the data, not the schema\n"));
960  printf(_(" -b, --blobs include large objects in dump\n"));
961  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
962  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
963  printf(_(" -C, --create include commands to create database in dump\n"));
964  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
965  printf(_(" -n, --schema=SCHEMA dump the named schema(s) only\n"));
966  printf(_(" -N, --exclude-schema=SCHEMA do NOT dump the named schema(s)\n"));
967  printf(_(" -o, --oids include OIDs in dump\n"));
968  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
969  " plain-text format\n"));
970  printf(_(" -s, --schema-only dump only the schema, no data\n"));
971  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
972  printf(_(" -t, --table=TABLE dump the named table(s) only\n"));
973  printf(_(" -T, --exclude-table=TABLE do NOT dump the named table(s)\n"));
974  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
975  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
976  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
977  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
978  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
979  printf(_(" --enable-row-security enable row security (dump only content user has\n"
980  " access to)\n"));
981  printf(_(" --exclude-table-data=TABLE do NOT dump data for the named table(s)\n"));
982  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
983  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
984  printf(_(" --no-comments do not dump comments\n"));
985  printf(_(" --no-publications do not dump publications\n"));
986  printf(_(" --no-security-labels do not dump security label assignments\n"));
987  printf(_(" --no-subscriptions do not dump subscriptions\n"));
988  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
989  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
990  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
991  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
992  printf(_(" --load-via-partition-root load partitions via the root table\n"));
993  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
994  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
995  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
996  printf(_(" --strict-names require table and/or schema include patterns to\n"
997  " match at least one entity each\n"));
998  printf(_(" --use-set-session-authorization\n"
999  " use SET SESSION AUTHORIZATION commands instead of\n"
1000  " ALTER OWNER commands to set ownership\n"));
1001 
1002  printf(_("\nConnection options:\n"));
1003  printf(_(" -d, --dbname=DBNAME database to dump\n"));
1004  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
1005  printf(_(" -p, --port=PORT database server port number\n"));
1006  printf(_(" -U, --username=NAME connect as specified database user\n"));
1007  printf(_(" -w, --no-password never prompt for password\n"));
1008  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
1009  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
1010 
1011  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
1012  "variable value is used.\n\n"));
1013  printf(_("Report bugs to <pgsql-bugs@postgresql.org>.\n"));
1014 }
1015 
1016 static void
1017 setup_connection(Archive *AH, const char *dumpencoding,
1018  const char *dumpsnapshot, char *use_role)
1019 {
1020  DumpOptions *dopt = AH->dopt;
1021  PGconn *conn = GetConnection(AH);
1022  const char *std_strings;
1023 
1025 
1026  /*
1027  * Set the client encoding if requested.
1028  */
1029  if (dumpencoding)
1030  {
1031  if (PQsetClientEncoding(conn, dumpencoding) < 0)
1032  exit_horribly(NULL, "invalid client encoding \"%s\" specified\n",
1033  dumpencoding);
1034  }
1035 
1036  /*
1037  * Get the active encoding and the standard_conforming_strings setting, so
1038  * we know how to escape strings.
1039  */
1040  AH->encoding = PQclientEncoding(conn);
1041 
1042  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1043  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1044 
1045  /*
1046  * Set the role if requested. In a parallel dump worker, we'll be passed
1047  * use_role == NULL, but AH->use_role is already set (if user specified it
1048  * originally) and we should use that.
1049  */
1050  if (!use_role && AH->use_role)
1051  use_role = AH->use_role;
1052 
1053  /* Set the role if requested */
1054  if (use_role && AH->remoteVersion >= 80100)
1055  {
1056  PQExpBuffer query = createPQExpBuffer();
1057 
1058  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1059  ExecuteSqlStatement(AH, query->data);
1060  destroyPQExpBuffer(query);
1061 
1062  /* save it for possible later use by parallel workers */
1063  if (!AH->use_role)
1064  AH->use_role = pg_strdup(use_role);
1065  }
1066 
1067  /* Set the datestyle to ISO to ensure the dump's portability */
1068  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1069 
1070  /* Likewise, avoid using sql_standard intervalstyle */
1071  if (AH->remoteVersion >= 80400)
1072  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1073 
1074  /*
1075  * Set extra_float_digits so that we can dump float data exactly (given
1076  * correctly implemented float I/O code, anyway)
1077  */
1078  if (AH->remoteVersion >= 90000)
1079  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1080  else
1081  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1082 
1083  /*
1084  * If synchronized scanning is supported, disable it, to prevent
1085  * unpredictable changes in row ordering across a dump and reload.
1086  */
1087  if (AH->remoteVersion >= 80300)
1088  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1089 
1090  /*
1091  * Disable timeouts if supported.
1092  */
1093  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1094  if (AH->remoteVersion >= 90300)
1095  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1096  if (AH->remoteVersion >= 90600)
1097  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1098 
1099  /*
1100  * Quote all identifiers, if requested.
1101  */
1102  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1103  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1104 
1105  /*
1106  * Adjust row-security mode, if supported.
1107  */
1108  if (AH->remoteVersion >= 90500)
1109  {
1110  if (dopt->enable_row_security)
1111  ExecuteSqlStatement(AH, "SET row_security = on");
1112  else
1113  ExecuteSqlStatement(AH, "SET row_security = off");
1114  }
1115 
1116  /*
1117  * Start transaction-snapshot mode transaction to dump consistent data.
1118  */
1119  ExecuteSqlStatement(AH, "BEGIN");
1120  if (AH->remoteVersion >= 90100)
1121  {
1122  /*
1123  * To support the combination of serializable_deferrable with the jobs
1124  * option we use REPEATABLE READ for the worker connections that are
1125  * passed a snapshot. As long as the snapshot is acquired in a
1126  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1127  * REPEATABLE READ transaction provides the appropriate integrity
1128  * guarantees. This is a kluge, but safe for back-patching.
1129  */
1130  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1132  "SET TRANSACTION ISOLATION LEVEL "
1133  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1134  else
1136  "SET TRANSACTION ISOLATION LEVEL "
1137  "REPEATABLE READ, READ ONLY");
1138  }
1139  else
1140  {
1142  "SET TRANSACTION ISOLATION LEVEL "
1143  "SERIALIZABLE, READ ONLY");
1144  }
1145 
1146  /*
1147  * If user specified a snapshot to use, select that. In a parallel dump
1148  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1149  * is already set (if the server can handle it) and we should use that.
1150  */
1151  if (dumpsnapshot)
1152  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1153 
1154  if (AH->sync_snapshot_id)
1155  {
1156  PQExpBuffer query = createPQExpBuffer();
1157 
1158  appendPQExpBuffer(query, "SET TRANSACTION SNAPSHOT ");
1159  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1160  ExecuteSqlStatement(AH, query->data);
1161  destroyPQExpBuffer(query);
1162  }
1163  else if (AH->numWorkers > 1 &&
1164  AH->remoteVersion >= 90200 &&
1166  {
1167  if (AH->isStandby && AH->remoteVersion < 100000)
1168  exit_horribly(NULL,
1169  "Synchronized snapshots on standby servers are not supported by this server version.\n"
1170  "Run with --no-synchronized-snapshots instead if you do not need\n"
1171  "synchronized snapshots.\n");
1172 
1173 
1175  }
1176 }
1177 
1178 /* Set up connection for a parallel worker process */
1179 static void
1181 {
1182  /*
1183  * We want to re-select all the same values the master connection is
1184  * using. We'll have inherited directly-usable values in
1185  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1186  * inherited encoding value back to a string to pass to setup_connection.
1187  */
1188  setup_connection(AH,
1190  NULL,
1191  NULL);
1192 }
1193 
1194 static char *
1196 {
1197  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1198  char *result;
1199  PGresult *res;
1200 
1201  res = ExecuteSqlQueryForSingleRow(fout, query);
1202  result = pg_strdup(PQgetvalue(res, 0, 0));
1203  PQclear(res);
1204 
1205  return result;
1206 }
1207 
1208 static ArchiveFormat
1210 {
1211  ArchiveFormat archiveFormat;
1212 
1213  *mode = archModeWrite;
1214 
1215  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1216  {
1217  /* This is used by pg_dumpall, and is not documented */
1218  archiveFormat = archNull;
1219  *mode = archModeAppend;
1220  }
1221  else if (pg_strcasecmp(format, "c") == 0)
1222  archiveFormat = archCustom;
1223  else if (pg_strcasecmp(format, "custom") == 0)
1224  archiveFormat = archCustom;
1225  else if (pg_strcasecmp(format, "d") == 0)
1226  archiveFormat = archDirectory;
1227  else if (pg_strcasecmp(format, "directory") == 0)
1228  archiveFormat = archDirectory;
1229  else if (pg_strcasecmp(format, "p") == 0)
1230  archiveFormat = archNull;
1231  else if (pg_strcasecmp(format, "plain") == 0)
1232  archiveFormat = archNull;
1233  else if (pg_strcasecmp(format, "t") == 0)
1234  archiveFormat = archTar;
1235  else if (pg_strcasecmp(format, "tar") == 0)
1236  archiveFormat = archTar;
1237  else
1238  exit_horribly(NULL, "invalid output format \"%s\" specified\n", format);
1239  return archiveFormat;
1240 }
1241 
1242 /*
1243  * Find the OIDs of all schemas matching the given list of patterns,
1244  * and append them to the given OID list.
1245  */
1246 static void
1248  SimpleStringList *patterns,
1249  SimpleOidList *oids,
1250  bool strict_names)
1251 {
1252  PQExpBuffer query;
1253  PGresult *res;
1254  SimpleStringListCell *cell;
1255  int i;
1256 
1257  if (patterns->head == NULL)
1258  return; /* nothing to do */
1259 
1260  query = createPQExpBuffer();
1261 
1262  /*
1263  * The loop below runs multiple SELECTs might sometimes result in
1264  * duplicate entries in the OID list, but we don't care.
1265  */
1266 
1267  for (cell = patterns->head; cell; cell = cell->next)
1268  {
1269  appendPQExpBuffer(query,
1270  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1271  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1272  false, NULL, "n.nspname", NULL, NULL);
1273 
1274  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1275  if (strict_names && PQntuples(res) == 0)
1276  exit_horribly(NULL, "no matching schemas were found for pattern \"%s\"\n", cell->val);
1277 
1278  for (i = 0; i < PQntuples(res); i++)
1279  {
1280  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1281  }
1282 
1283  PQclear(res);
1284  resetPQExpBuffer(query);
1285  }
1286 
1287  destroyPQExpBuffer(query);
1288 }
1289 
1290 /*
1291  * Find the OIDs of all tables matching the given list of patterns,
1292  * and append them to the given OID list.
1293  */
1294 static void
1296  SimpleStringList *patterns, SimpleOidList *oids,
1297  bool strict_names)
1298 {
1299  PQExpBuffer query;
1300  PGresult *res;
1301  SimpleStringListCell *cell;
1302  int i;
1303 
1304  if (patterns->head == NULL)
1305  return; /* nothing to do */
1306 
1307  query = createPQExpBuffer();
1308 
1309  /*
1310  * this might sometimes result in duplicate entries in the OID list, but
1311  * we don't care.
1312  */
1313 
1314  for (cell = patterns->head; cell; cell = cell->next)
1315  {
1316  /*
1317  * Query must remain ABSOLUTELY devoid of unqualified names. This
1318  * would be unnecessary given a pg_table_is_visible() variant taking a
1319  * search_path argument.
1320  */
1321  appendPQExpBuffer(query,
1322  "SELECT c.oid"
1323  "\nFROM pg_catalog.pg_class c"
1324  "\n LEFT JOIN pg_catalog.pg_namespace n"
1325  "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1326  "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1327  "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1328  RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
1329  RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
1330  RELKIND_PARTITIONED_TABLE);
1331  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1332  false, "n.nspname", "c.relname", NULL,
1333  "pg_catalog.pg_table_is_visible(c.oid)");
1334 
1335  ExecuteSqlStatement(fout, "RESET search_path");
1336  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1339  if (strict_names && PQntuples(res) == 0)
1340  exit_horribly(NULL, "no matching tables were found for pattern \"%s\"\n", cell->val);
1341 
1342  for (i = 0; i < PQntuples(res); i++)
1343  {
1344  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1345  }
1346 
1347  PQclear(res);
1348  resetPQExpBuffer(query);
1349  }
1350 
1351  destroyPQExpBuffer(query);
1352 }
1353 
1354 /*
1355  * checkExtensionMembership
1356  * Determine whether object is an extension member, and if so,
1357  * record an appropriate dependency and set the object's dump flag.
1358  *
1359  * It's important to call this for each object that could be an extension
1360  * member. Generally, we integrate this with determining the object's
1361  * to-be-dumped-ness, since extension membership overrides other rules for that.
1362  *
1363  * Returns true if object is an extension member, else false.
1364  */
1365 static bool
1367 {
1368  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1369 
1370  if (ext == NULL)
1371  return false;
1372 
1373  dobj->ext_member = true;
1374 
1375  /* Record dependency so that getDependencies needn't deal with that */
1376  addObjectDependency(dobj, ext->dobj.dumpId);
1377 
1378  /*
1379  * In 9.6 and above, mark the member object to have any non-initial ACL,
1380  * policies, and security labels dumped.
1381  *
1382  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1383  * extract the information about the object. We don't provide support for
1384  * initial policies and security labels and it seems unlikely for those to
1385  * ever exist, but we may have to revisit this later.
1386  *
1387  * Prior to 9.6, we do not include any extension member components.
1388  *
1389  * In binary upgrades, we still dump all components of the members
1390  * individually, since the idea is to exactly reproduce the database
1391  * contents rather than replace the extension contents with something
1392  * different.
1393  */
1394  if (fout->dopt->binary_upgrade)
1395  dobj->dump = ext->dobj.dump;
1396  else
1397  {
1398  if (fout->remoteVersion < 90600)
1399  dobj->dump = DUMP_COMPONENT_NONE;
1400  else
1401  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1404  }
1405 
1406  return true;
1407 }
1408 
1409 /*
1410  * selectDumpableNamespace: policy-setting subroutine
1411  * Mark a namespace as to be dumped or not
1412  */
1413 static void
1415 {
1416  /*
1417  * If specific tables are being dumped, do not dump any complete
1418  * namespaces. If specific namespaces are being dumped, dump just those
1419  * namespaces. Otherwise, dump all non-system namespaces.
1420  */
1421  if (table_include_oids.head != NULL)
1422  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1423  else if (schema_include_oids.head != NULL)
1424  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1425  simple_oid_list_member(&schema_include_oids,
1426  nsinfo->dobj.catId.oid) ?
1428  else if (fout->remoteVersion >= 90600 &&
1429  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1430  {
1431  /*
1432  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1433  * they are interesting (and not the original ACLs which were set at
1434  * initdb time, see pg_init_privs).
1435  */
1436  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1437  }
1438  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1439  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1440  {
1441  /* Other system schemas don't get dumped */
1442  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1443  }
1444  else if (strcmp(nsinfo->dobj.name, "public") == 0)
1445  {
1446  /*
1447  * The public schema is a strange beast that sits in a sort of
1448  * no-mans-land between being a system object and a user object. We
1449  * don't want to dump creation or comment commands for it, because
1450  * that complicates matters for non-superuser use of pg_dump. But we
1451  * should dump any ACL changes that have occurred for it, and of
1452  * course we should dump contained objects.
1453  */
1454  nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1456  }
1457  else
1458  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1459 
1460  /*
1461  * In any case, a namespace can be excluded by an exclusion switch
1462  */
1463  if (nsinfo->dobj.dump_contains &&
1464  simple_oid_list_member(&schema_exclude_oids,
1465  nsinfo->dobj.catId.oid))
1466  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1467 
1468  /*
1469  * If the schema belongs to an extension, allow extension membership to
1470  * override the dump decision for the schema itself. However, this does
1471  * not change dump_contains, so this won't change what we do with objects
1472  * within the schema. (If they belong to the extension, they'll get
1473  * suppressed by it, otherwise not.)
1474  */
1475  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1476 }
1477 
1478 /*
1479  * selectDumpableTable: policy-setting subroutine
1480  * Mark a table as to be dumped or not
1481  */
1482 static void
1484 {
1485  if (checkExtensionMembership(&tbinfo->dobj, fout))
1486  return; /* extension membership overrides all else */
1487 
1488  /*
1489  * If specific tables are being dumped, dump just those tables; else, dump
1490  * according to the parent namespace's dump flag.
1491  */
1492  if (table_include_oids.head != NULL)
1493  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1494  tbinfo->dobj.catId.oid) ?
1496  else
1497  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1498 
1499  /*
1500  * In any case, a table can be excluded by an exclusion switch
1501  */
1502  if (tbinfo->dobj.dump &&
1503  simple_oid_list_member(&table_exclude_oids,
1504  tbinfo->dobj.catId.oid))
1505  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1506 }
1507 
1508 /*
1509  * selectDumpableType: policy-setting subroutine
1510  * Mark a type as to be dumped or not
1511  *
1512  * If it's a table's rowtype or an autogenerated array type, we also apply a
1513  * special type code to facilitate sorting into the desired order. (We don't
1514  * want to consider those to be ordinary types because that would bring tables
1515  * up into the datatype part of the dump order.) We still set the object's
1516  * dump flag; that's not going to cause the dummy type to be dumped, but we
1517  * need it so that casts involving such types will be dumped correctly -- see
1518  * dumpCast. This means the flag should be set the same as for the underlying
1519  * object (the table or base type).
1520  */
1521 static void
1523 {
1524  /* skip complex types, except for standalone composite types */
1525  if (OidIsValid(tyinfo->typrelid) &&
1526  tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
1527  {
1528  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1529 
1530  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1531  if (tytable != NULL)
1532  tyinfo->dobj.dump = tytable->dobj.dump;
1533  else
1534  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1535  return;
1536  }
1537 
1538  /* skip auto-generated array types */
1539  if (tyinfo->isArray)
1540  {
1541  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1542 
1543  /*
1544  * Fall through to set the dump flag; we assume that the subsequent
1545  * rules will do the same thing as they would for the array's base
1546  * type. (We cannot reliably look up the base type here, since
1547  * getTypes may not have processed it yet.)
1548  */
1549  }
1550 
1551  if (checkExtensionMembership(&tyinfo->dobj, fout))
1552  return; /* extension membership overrides all else */
1553 
1554  /* Dump based on if the contents of the namespace are being dumped */
1555  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1556 }
1557 
1558 /*
1559  * selectDumpableDefaultACL: policy-setting subroutine
1560  * Mark a default ACL as to be dumped or not
1561  *
1562  * For per-schema default ACLs, dump if the schema is to be dumped.
1563  * Otherwise dump if we are dumping "everything". Note that dataOnly
1564  * and aclsSkip are checked separately.
1565  */
1566 static void
1568 {
1569  /* Default ACLs can't be extension members */
1570 
1571  if (dinfo->dobj.namespace)
1572  /* default ACLs are considered part of the namespace */
1573  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1574  else
1575  dinfo->dobj.dump = dopt->include_everything ?
1577 }
1578 
1579 /*
1580  * selectDumpableCast: policy-setting subroutine
1581  * Mark a cast as to be dumped or not
1582  *
1583  * Casts do not belong to any particular namespace (since they haven't got
1584  * names), nor do they have identifiable owners. To distinguish user-defined
1585  * casts from built-in ones, we must resort to checking whether the cast's
1586  * OID is in the range reserved for initdb.
1587  */
1588 static void
1590 {
1591  if (checkExtensionMembership(&cast->dobj, fout))
1592  return; /* extension membership overrides all else */
1593 
1594  /*
1595  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1596  * support ACLs currently.
1597  */
1598  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1599  cast->dobj.dump = DUMP_COMPONENT_NONE;
1600  else
1601  cast->dobj.dump = fout->dopt->include_everything ?
1603 }
1604 
1605 /*
1606  * selectDumpableProcLang: policy-setting subroutine
1607  * Mark a procedural language as to be dumped or not
1608  *
1609  * Procedural languages do not belong to any particular namespace. To
1610  * identify built-in languages, we must resort to checking whether the
1611  * language's OID is in the range reserved for initdb.
1612  */
1613 static void
1615 {
1616  if (checkExtensionMembership(&plang->dobj, fout))
1617  return; /* extension membership overrides all else */
1618 
1619  /*
1620  * Only include procedural languages when we are dumping everything.
1621  *
1622  * For from-initdb procedural languages, only include ACLs, as we do for
1623  * the pg_catalog namespace. We need this because procedural languages do
1624  * not live in any namespace.
1625  */
1626  if (!fout->dopt->include_everything)
1627  plang->dobj.dump = DUMP_COMPONENT_NONE;
1628  else
1629  {
1630  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1631  plang->dobj.dump = fout->remoteVersion < 90600 ?
1633  else
1634  plang->dobj.dump = DUMP_COMPONENT_ALL;
1635  }
1636 }
1637 
1638 /*
1639  * selectDumpableAccessMethod: policy-setting subroutine
1640  * Mark an access method as to be dumped or not
1641  *
1642  * Access methods do not belong to any particular namespace. To identify
1643  * built-in access methods, we must resort to checking whether the
1644  * method's OID is in the range reserved for initdb.
1645  */
1646 static void
1648 {
1649  if (checkExtensionMembership(&method->dobj, fout))
1650  return; /* extension membership overrides all else */
1651 
1652  /*
1653  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1654  * they do not support ACLs currently.
1655  */
1656  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1657  method->dobj.dump = DUMP_COMPONENT_NONE;
1658  else
1659  method->dobj.dump = fout->dopt->include_everything ?
1661 }
1662 
1663 /*
1664  * selectDumpableExtension: policy-setting subroutine
1665  * Mark an extension as to be dumped or not
1666  *
1667  * Built-in extensions should be skipped except for checking ACLs, since we
1668  * assume those will already be installed in the target database. We identify
1669  * such extensions by their having OIDs in the range reserved for initdb.
1670  * We dump all user-added extensions by default, or none of them if
1671  * include_everything is false (i.e., a --schema or --table switch was given).
1672  */
1673 static void
1675 {
1676  /*
1677  * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
1678  * change permissions on their member objects, if they wish to, and have
1679  * those changes preserved.
1680  */
1681  if (extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1682  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1683  else
1684  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1687 }
1688 
1689 /*
1690  * selectDumpablePublicationTable: policy-setting subroutine
1691  * Mark a publication table as to be dumped or not
1692  *
1693  * Publication tables have schemas, but those are ignored in decision making,
1694  * because publications are only dumped when we are dumping everything.
1695  */
1696 static void
1698 {
1699  if (checkExtensionMembership(dobj, fout))
1700  return; /* extension membership overrides all else */
1701 
1702  dobj->dump = fout->dopt->include_everything ?
1704 }
1705 
1706 /*
1707  * selectDumpableObject: policy-setting subroutine
1708  * Mark a generic dumpable object as to be dumped or not
1709  *
1710  * Use this only for object types without a special-case routine above.
1711  */
1712 static void
1714 {
1715  if (checkExtensionMembership(dobj, fout))
1716  return; /* extension membership overrides all else */
1717 
1718  /*
1719  * Default policy is to dump if parent namespace is dumpable, or for
1720  * non-namespace-associated items, dump if we're dumping "everything".
1721  */
1722  if (dobj->namespace)
1723  dobj->dump = dobj->namespace->dobj.dump_contains;
1724  else
1725  dobj->dump = fout->dopt->include_everything ?
1727 }
1728 
1729 /*
1730  * Dump a table's contents for loading using the COPY command
1731  * - this routine is called by the Archiver when it wants the table
1732  * to be dumped.
1733  */
1734 
1735 static int
1736 dumpTableData_copy(Archive *fout, void *dcontext)
1737 {
1738  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1739  TableInfo *tbinfo = tdinfo->tdtable;
1740  const char *classname = tbinfo->dobj.name;
1741  const bool hasoids = tbinfo->hasoids;
1742  const bool oids = tdinfo->oids;
1744 
1745  /*
1746  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1747  * which uses it already.
1748  */
1749  PQExpBuffer clistBuf = createPQExpBuffer();
1750  PGconn *conn = GetConnection(fout);
1751  PGresult *res;
1752  int ret;
1753  char *copybuf;
1754  const char *column_list;
1755 
1756  if (g_verbose)
1757  write_msg(NULL, "dumping contents of table \"%s.%s\"\n",
1758  tbinfo->dobj.namespace->dobj.name, classname);
1759 
1760  /*
1761  * Specify the column list explicitly so that we have no possibility of
1762  * retrieving data in the wrong column order. (The default column
1763  * ordering of COPY will not be what we want in certain corner cases
1764  * involving ADD COLUMN and inheritance.)
1765  */
1766  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1767 
1768  if (oids && hasoids)
1769  {
1770  appendPQExpBuffer(q, "COPY %s %s WITH OIDS TO stdout;",
1771  fmtQualifiedDumpable(tbinfo),
1772  column_list);
1773  }
1774  else if (tdinfo->filtercond)
1775  {
1776  /* Note: this syntax is only supported in 8.2 and up */
1777  appendPQExpBufferStr(q, "COPY (SELECT ");
1778  /* klugery to get rid of parens in column list */
1779  if (strlen(column_list) > 2)
1780  {
1781  appendPQExpBufferStr(q, column_list + 1);
1782  q->data[q->len - 1] = ' ';
1783  }
1784  else
1785  appendPQExpBufferStr(q, "* ");
1786  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1787  fmtQualifiedDumpable(tbinfo),
1788  tdinfo->filtercond);
1789  }
1790  else
1791  {
1792  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1793  fmtQualifiedDumpable(tbinfo),
1794  column_list);
1795  }
1796  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1797  PQclear(res);
1798  destroyPQExpBuffer(clistBuf);
1799 
1800  for (;;)
1801  {
1802  ret = PQgetCopyData(conn, &copybuf, 0);
1803 
1804  if (ret < 0)
1805  break; /* done or error */
1806 
1807  if (copybuf)
1808  {
1809  WriteData(fout, copybuf, ret);
1810  PQfreemem(copybuf);
1811  }
1812 
1813  /* ----------
1814  * THROTTLE:
1815  *
1816  * There was considerable discussion in late July, 2000 regarding
1817  * slowing down pg_dump when backing up large tables. Users with both
1818  * slow & fast (multi-processor) machines experienced performance
1819  * degradation when doing a backup.
1820  *
1821  * Initial attempts based on sleeping for a number of ms for each ms
1822  * of work were deemed too complex, then a simple 'sleep in each loop'
1823  * implementation was suggested. The latter failed because the loop
1824  * was too tight. Finally, the following was implemented:
1825  *
1826  * If throttle is non-zero, then
1827  * See how long since the last sleep.
1828  * Work out how long to sleep (based on ratio).
1829  * If sleep is more than 100ms, then
1830  * sleep
1831  * reset timer
1832  * EndIf
1833  * EndIf
1834  *
1835  * where the throttle value was the number of ms to sleep per ms of
1836  * work. The calculation was done in each loop.
1837  *
1838  * Most of the hard work is done in the backend, and this solution
1839  * still did not work particularly well: on slow machines, the ratio
1840  * was 50:1, and on medium paced machines, 1:1, and on fast
1841  * multi-processor machines, it had little or no effect, for reasons
1842  * that were unclear.
1843  *
1844  * Further discussion ensued, and the proposal was dropped.
1845  *
1846  * For those people who want this feature, it can be implemented using
1847  * gettimeofday in each loop, calculating the time since last sleep,
1848  * multiplying that by the sleep ratio, then if the result is more
1849  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1850  * function to sleep for a subsecond period ie.
1851  *
1852  * select(0, NULL, NULL, NULL, &tvi);
1853  *
1854  * This will return after the interval specified in the structure tvi.
1855  * Finally, call gettimeofday again to save the 'last sleep time'.
1856  * ----------
1857  */
1858  }
1859  archprintf(fout, "\\.\n\n\n");
1860 
1861  if (ret == -2)
1862  {
1863  /* copy data transfer failed */
1864  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.\n", classname);
1865  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1866  write_msg(NULL, "The command was: %s\n", q->data);
1867  exit_nicely(1);
1868  }
1869 
1870  /* Check command status and return to normal libpq state */
1871  res = PQgetResult(conn);
1872  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1873  {
1874  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetResult() failed.\n", classname);
1875  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1876  write_msg(NULL, "The command was: %s\n", q->data);
1877  exit_nicely(1);
1878  }
1879  PQclear(res);
1880 
1881  /* Do this to ensure we've pumped libpq back to idle state */
1882  if (PQgetResult(conn) != NULL)
1883  write_msg(NULL, "WARNING: unexpected extra results during COPY of table \"%s\"\n",
1884  classname);
1885 
1886  destroyPQExpBuffer(q);
1887  return 1;
1888 }
1889 
1890 /*
1891  * Dump table data using INSERT commands.
1892  *
1893  * Caution: when we restore from an archive file direct to database, the
1894  * INSERT commands emitted by this function have to be parsed by
1895  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
1896  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
1897  */
1898 static int
1899 dumpTableData_insert(Archive *fout, void *dcontext)
1900 {
1901  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1902  TableInfo *tbinfo = tdinfo->tdtable;
1903  DumpOptions *dopt = fout->dopt;
1905  PQExpBuffer insertStmt = NULL;
1906  PGresult *res;
1907  int tuple;
1908  int nfields;
1909  int field;
1910 
1911  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
1912  "SELECT * FROM ONLY %s",
1913  fmtQualifiedDumpable(tbinfo));
1914  if (tdinfo->filtercond)
1915  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
1916 
1917  ExecuteSqlStatement(fout, q->data);
1918 
1919  while (1)
1920  {
1921  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
1922  PGRES_TUPLES_OK);
1923  nfields = PQnfields(res);
1924  for (tuple = 0; tuple < PQntuples(res); tuple++)
1925  {
1926  /*
1927  * First time through, we build as much of the INSERT statement as
1928  * possible in "insertStmt", which we can then just print for each
1929  * line. If the table happens to have zero columns then this will
1930  * be a complete statement, otherwise it will end in "VALUES(" and
1931  * be ready to have the row's column values appended.
1932  */
1933  if (insertStmt == NULL)
1934  {
1935  TableInfo *targettab;
1936 
1937  insertStmt = createPQExpBuffer();
1938 
1939  /*
1940  * When load-via-partition-root is set, get the root table
1941  * name for the partition table, so that we can reload data
1942  * through the root table.
1943  */
1944  if (dopt->load_via_partition_root && tbinfo->ispartition)
1945  targettab = getRootTableInfo(tbinfo);
1946  else
1947  targettab = tbinfo;
1948 
1949  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
1950  fmtQualifiedDumpable(targettab));
1951 
1952  /* corner case for zero-column table */
1953  if (nfields == 0)
1954  {
1955  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
1956  }
1957  else
1958  {
1959  /* append the list of column names if required */
1960  if (dopt->column_inserts)
1961  {
1962  appendPQExpBufferChar(insertStmt, '(');
1963  for (field = 0; field < nfields; field++)
1964  {
1965  if (field > 0)
1966  appendPQExpBufferStr(insertStmt, ", ");
1967  appendPQExpBufferStr(insertStmt,
1968  fmtId(PQfname(res, field)));
1969  }
1970  appendPQExpBufferStr(insertStmt, ") ");
1971  }
1972 
1973  if (tbinfo->needs_override)
1974  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
1975 
1976  appendPQExpBufferStr(insertStmt, "VALUES (");
1977  }
1978  }
1979 
1980  archputs(insertStmt->data, fout);
1981 
1982  /* if it is zero-column table then we're done */
1983  if (nfields == 0)
1984  continue;
1985 
1986  for (field = 0; field < nfields; field++)
1987  {
1988  if (field > 0)
1989  archputs(", ", fout);
1990  if (PQgetisnull(res, tuple, field))
1991  {
1992  archputs("NULL", fout);
1993  continue;
1994  }
1995 
1996  /* XXX This code is partially duplicated in ruleutils.c */
1997  switch (PQftype(res, field))
1998  {
1999  case INT2OID:
2000  case INT4OID:
2001  case INT8OID:
2002  case OIDOID:
2003  case FLOAT4OID:
2004  case FLOAT8OID:
2005  case NUMERICOID:
2006  {
2007  /*
2008  * These types are printed without quotes unless
2009  * they contain values that aren't accepted by the
2010  * scanner unquoted (e.g., 'NaN'). Note that
2011  * strtod() and friends might accept NaN, so we
2012  * can't use that to test.
2013  *
2014  * In reality we only need to defend against
2015  * infinity and NaN, so we need not get too crazy
2016  * about pattern matching here.
2017  */
2018  const char *s = PQgetvalue(res, tuple, field);
2019 
2020  if (strspn(s, "0123456789 +-eE.") == strlen(s))
2021  archputs(s, fout);
2022  else
2023  archprintf(fout, "'%s'", s);
2024  }
2025  break;
2026 
2027  case BITOID:
2028  case VARBITOID:
2029  archprintf(fout, "B'%s'",
2030  PQgetvalue(res, tuple, field));
2031  break;
2032 
2033  case BOOLOID:
2034  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2035  archputs("true", fout);
2036  else
2037  archputs("false", fout);
2038  break;
2039 
2040  default:
2041  /* All other types are printed as string literals. */
2042  resetPQExpBuffer(q);
2044  PQgetvalue(res, tuple, field),
2045  fout);
2046  archputs(q->data, fout);
2047  break;
2048  }
2049  }
2050  archputs(");\n", fout);
2051  }
2052 
2053  if (PQntuples(res) <= 0)
2054  {
2055  PQclear(res);
2056  break;
2057  }
2058  PQclear(res);
2059  }
2060 
2061  archputs("\n\n", fout);
2062 
2063  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2064 
2065  destroyPQExpBuffer(q);
2066  if (insertStmt != NULL)
2067  destroyPQExpBuffer(insertStmt);
2068 
2069  return 1;
2070 }
2071 
2072 /*
2073  * getRootTableInfo:
2074  * get the root TableInfo for the given partition table.
2075  */
2076 static TableInfo *
2078 {
2079  TableInfo *parentTbinfo;
2080 
2081  Assert(tbinfo->ispartition);
2082  Assert(tbinfo->numParents == 1);
2083 
2084  parentTbinfo = tbinfo->parents[0];
2085  while (parentTbinfo->ispartition)
2086  {
2087  Assert(parentTbinfo->numParents == 1);
2088  parentTbinfo = parentTbinfo->parents[0];
2089  }
2090 
2091  return parentTbinfo;
2092 }
2093 
2094 /*
2095  * dumpTableData -
2096  * dump the contents of a single table
2097  *
2098  * Actually, this just makes an ArchiveEntry for the table contents.
2099  */
2100 static void
2102 {
2103  DumpOptions *dopt = fout->dopt;
2104  TableInfo *tbinfo = tdinfo->tdtable;
2105  PQExpBuffer copyBuf = createPQExpBuffer();
2106  PQExpBuffer clistBuf = createPQExpBuffer();
2107  DataDumperPtr dumpFn;
2108  char *copyStmt;
2109  const char *copyFrom;
2110 
2111  if (!dopt->dump_inserts)
2112  {
2113  /* Dump/restore using COPY */
2114  dumpFn = dumpTableData_copy;
2115 
2116  /*
2117  * When load-via-partition-root is set, get the root table name for
2118  * the partition table, so that we can reload data through the root
2119  * table.
2120  */
2121  if (dopt->load_via_partition_root && tbinfo->ispartition)
2122  {
2123  TableInfo *parentTbinfo;
2124 
2125  parentTbinfo = getRootTableInfo(tbinfo);
2126  copyFrom = fmtQualifiedDumpable(parentTbinfo);
2127  }
2128  else
2129  copyFrom = fmtQualifiedDumpable(tbinfo);
2130 
2131  /* must use 2 steps here 'cause fmtId is nonreentrant */
2132  appendPQExpBuffer(copyBuf, "COPY %s ",
2133  copyFrom);
2134  appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n",
2135  fmtCopyColumnList(tbinfo, clistBuf),
2136  (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
2137  copyStmt = copyBuf->data;
2138  }
2139  else
2140  {
2141  /* Restore using INSERT */
2142  dumpFn = dumpTableData_insert;
2143  copyStmt = NULL;
2144  }
2145 
2146  /*
2147  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2148  * dependency on its table as "special" and pass it to ArchiveEntry now.
2149  * See comments for BuildArchiveDependencies.
2150  */
2151  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2152  ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2153  tbinfo->dobj.name, tbinfo->dobj.namespace->dobj.name,
2154  NULL, tbinfo->rolname,
2155  false, "TABLE DATA", SECTION_DATA,
2156  "", "", copyStmt,
2157  &(tbinfo->dobj.dumpId), 1,
2158  dumpFn, tdinfo);
2159 
2160  destroyPQExpBuffer(copyBuf);
2161  destroyPQExpBuffer(clistBuf);
2162 }
2163 
2164 /*
2165  * refreshMatViewData -
2166  * load or refresh the contents of a single materialized view
2167  *
2168  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2169  * statement.
2170  */
2171 static void
2173 {
2174  TableInfo *tbinfo = tdinfo->tdtable;
2175  PQExpBuffer q;
2176 
2177  /* If the materialized view is not flagged as populated, skip this. */
2178  if (!tbinfo->relispopulated)
2179  return;
2180 
2181  q = createPQExpBuffer();
2182 
2183  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2184  fmtQualifiedDumpable(tbinfo));
2185 
2186  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2187  ArchiveEntry(fout,
2188  tdinfo->dobj.catId, /* catalog ID */
2189  tdinfo->dobj.dumpId, /* dump ID */
2190  tbinfo->dobj.name, /* Name */
2191  tbinfo->dobj.namespace->dobj.name, /* Namespace */
2192  NULL, /* Tablespace */
2193  tbinfo->rolname, /* Owner */
2194  false, /* with oids */
2195  "MATERIALIZED VIEW DATA", /* Desc */
2196  SECTION_POST_DATA, /* Section */
2197  q->data, /* Create */
2198  "", /* Del */
2199  NULL, /* Copy */
2200  tdinfo->dobj.dependencies, /* Deps */
2201  tdinfo->dobj.nDeps, /* # Deps */
2202  NULL, /* Dumper */
2203  NULL); /* Dumper Arg */
2204 
2205  destroyPQExpBuffer(q);
2206 }
2207 
2208 /*
2209  * getTableData -
2210  * set up dumpable objects representing the contents of tables
2211  */
2212 static void
2213 getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind)
2214 {
2215  int i;
2216 
2217  for (i = 0; i < numTables; i++)
2218  {
2219  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2220  (!relkind || tblinfo[i].relkind == relkind))
2221  makeTableDataInfo(dopt, &(tblinfo[i]), oids);
2222  }
2223 }
2224 
2225 /*
2226  * Make a dumpable object for the data of this specific table
2227  *
2228  * Note: we make a TableDataInfo if and only if we are going to dump the
2229  * table data; the "dump" flag in such objects isn't used.
2230  */
2231 static void
2232 makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids)
2233 {
2234  TableDataInfo *tdinfo;
2235 
2236  /*
2237  * Nothing to do if we already decided to dump the table. This will
2238  * happen for "config" tables.
2239  */
2240  if (tbinfo->dataObj != NULL)
2241  return;
2242 
2243  /* Skip VIEWs (no data to dump) */
2244  if (tbinfo->relkind == RELKIND_VIEW)
2245  return;
2246  /* Skip FOREIGN TABLEs (no data to dump) */
2247  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2248  return;
2249  /* Skip partitioned tables (data in partitions) */
2250  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2251  return;
2252 
2253  /* Don't dump data in unlogged tables, if so requested */
2254  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2255  dopt->no_unlogged_table_data)
2256  return;
2257 
2258  /* Check that the data is not explicitly excluded */
2259  if (simple_oid_list_member(&tabledata_exclude_oids,
2260  tbinfo->dobj.catId.oid))
2261  return;
2262 
2263  /* OK, let's dump it */
2264  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2265 
2266  if (tbinfo->relkind == RELKIND_MATVIEW)
2267  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2268  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2269  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2270  else
2271  tdinfo->dobj.objType = DO_TABLE_DATA;
2272 
2273  /*
2274  * Note: use tableoid 0 so that this object won't be mistaken for
2275  * something that pg_depend entries apply to.
2276  */
2277  tdinfo->dobj.catId.tableoid = 0;
2278  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2279  AssignDumpId(&tdinfo->dobj);
2280  tdinfo->dobj.name = tbinfo->dobj.name;
2281  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2282  tdinfo->tdtable = tbinfo;
2283  tdinfo->oids = oids;
2284  tdinfo->filtercond = NULL; /* might get set later */
2285  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2286 
2287  tbinfo->dataObj = tdinfo;
2288 }
2289 
2290 /*
2291  * The refresh for a materialized view must be dependent on the refresh for
2292  * any materialized view that this one is dependent on.
2293  *
2294  * This must be called after all the objects are created, but before they are
2295  * sorted.
2296  */
2297 static void
2299 {
2300  PQExpBuffer query;
2301  PGresult *res;
2302  int ntups,
2303  i;
2304  int i_classid,
2305  i_objid,
2306  i_refobjid;
2307 
2308  /* No Mat Views before 9.3. */
2309  if (fout->remoteVersion < 90300)
2310  return;
2311 
2312  query = createPQExpBuffer();
2313 
2314  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2315  "( "
2316  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2317  "FROM pg_depend d1 "
2318  "JOIN pg_class c1 ON c1.oid = d1.objid "
2319  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2320  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2321  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2322  "AND d2.objid = r1.oid "
2323  "AND d2.refobjid <> d1.objid "
2324  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2325  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2326  CppAsString2(RELKIND_VIEW) ") "
2327  "WHERE d1.classid = 'pg_class'::regclass "
2328  "UNION "
2329  "SELECT w.objid, d3.refobjid, c3.relkind "
2330  "FROM w "
2331  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2332  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2333  "AND d3.objid = r3.oid "
2334  "AND d3.refobjid <> w.refobjid "
2335  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2336  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2337  CppAsString2(RELKIND_VIEW) ") "
2338  ") "
2339  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2340  "FROM w "
2341  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2342 
2343  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2344 
2345  ntups = PQntuples(res);
2346 
2347  i_classid = PQfnumber(res, "classid");
2348  i_objid = PQfnumber(res, "objid");
2349  i_refobjid = PQfnumber(res, "refobjid");
2350 
2351  for (i = 0; i < ntups; i++)
2352  {
2353  CatalogId objId;
2354  CatalogId refobjId;
2355  DumpableObject *dobj;
2356  DumpableObject *refdobj;
2357  TableInfo *tbinfo;
2358  TableInfo *reftbinfo;
2359 
2360  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2361  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2362  refobjId.tableoid = objId.tableoid;
2363  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2364 
2365  dobj = findObjectByCatalogId(objId);
2366  if (dobj == NULL)
2367  continue;
2368 
2369  Assert(dobj->objType == DO_TABLE);
2370  tbinfo = (TableInfo *) dobj;
2371  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2372  dobj = (DumpableObject *) tbinfo->dataObj;
2373  if (dobj == NULL)
2374  continue;
2375  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2376 
2377  refdobj = findObjectByCatalogId(refobjId);
2378  if (refdobj == NULL)
2379  continue;
2380 
2381  Assert(refdobj->objType == DO_TABLE);
2382  reftbinfo = (TableInfo *) refdobj;
2383  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2384  refdobj = (DumpableObject *) reftbinfo->dataObj;
2385  if (refdobj == NULL)
2386  continue;
2387  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2388 
2389  addObjectDependency(dobj, refdobj->dumpId);
2390 
2391  if (!reftbinfo->relispopulated)
2392  tbinfo->relispopulated = false;
2393  }
2394 
2395  PQclear(res);
2396 
2397  destroyPQExpBuffer(query);
2398 }
2399 
2400 /*
2401  * getTableDataFKConstraints -
2402  * add dump-order dependencies reflecting foreign key constraints
2403  *
2404  * This code is executed only in a data-only dump --- in schema+data dumps
2405  * we handle foreign key issues by not creating the FK constraints until
2406  * after the data is loaded. In a data-only dump, however, we want to
2407  * order the table data objects in such a way that a table's referenced
2408  * tables are restored first. (In the presence of circular references or
2409  * self-references this may be impossible; we'll detect and complain about
2410  * that during the dependency sorting step.)
2411  */
2412 static void
2414 {
2415  DumpableObject **dobjs;
2416  int numObjs;
2417  int i;
2418 
2419  /* Search through all the dumpable objects for FK constraints */
2420  getDumpableObjects(&dobjs, &numObjs);
2421  for (i = 0; i < numObjs; i++)
2422  {
2423  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2424  {
2425  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2426  TableInfo *ftable;
2427 
2428  /* Not interesting unless both tables are to be dumped */
2429  if (cinfo->contable == NULL ||
2430  cinfo->contable->dataObj == NULL)
2431  continue;
2432  ftable = findTableByOid(cinfo->confrelid);
2433  if (ftable == NULL ||
2434  ftable->dataObj == NULL)
2435  continue;
2436 
2437  /*
2438  * Okay, make referencing table's TABLE_DATA object depend on the
2439  * referenced table's TABLE_DATA object.
2440  */
2442  ftable->dataObj->dobj.dumpId);
2443  }
2444  }
2445  free(dobjs);
2446 }
2447 
2448 
2449 /*
2450  * guessConstraintInheritance:
2451  * In pre-8.4 databases, we can't tell for certain which constraints
2452  * are inherited. We assume a CHECK constraint is inherited if its name
2453  * matches the name of any constraint in the parent. Originally this code
2454  * tried to compare the expression texts, but that can fail for various
2455  * reasons --- for example, if the parent and child tables are in different
2456  * schemas, reverse-listing of function calls may produce different text
2457  * (schema-qualified or not) depending on search path.
2458  *
2459  * In 8.4 and up we can rely on the conislocal field to decide which
2460  * constraints must be dumped; much safer.
2461  *
2462  * This function assumes all conislocal flags were initialized to true.
2463  * It clears the flag on anything that seems to be inherited.
2464  */
2465 static void
2467 {
2468  int i,
2469  j,
2470  k;
2471 
2472  for (i = 0; i < numTables; i++)
2473  {
2474  TableInfo *tbinfo = &(tblinfo[i]);
2475  int numParents;
2476  TableInfo **parents;
2477  TableInfo *parent;
2478 
2479  /* Sequences and views never have parents */
2480  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2481  tbinfo->relkind == RELKIND_VIEW)
2482  continue;
2483 
2484  /* Don't bother computing anything for non-target tables, either */
2485  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2486  continue;
2487 
2488  numParents = tbinfo->numParents;
2489  parents = tbinfo->parents;
2490 
2491  if (numParents == 0)
2492  continue; /* nothing to see here, move along */
2493 
2494  /* scan for inherited CHECK constraints */
2495  for (j = 0; j < tbinfo->ncheck; j++)
2496  {
2497  ConstraintInfo *constr;
2498 
2499  constr = &(tbinfo->checkexprs[j]);
2500 
2501  for (k = 0; k < numParents; k++)
2502  {
2503  int l;
2504 
2505  parent = parents[k];
2506  for (l = 0; l < parent->ncheck; l++)
2507  {
2508  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2509 
2510  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2511  {
2512  constr->conislocal = false;
2513  break;
2514  }
2515  }
2516  if (!constr->conislocal)
2517  break;
2518  }
2519  }
2520  }
2521 }
2522 
2523 
2524 /*
2525  * dumpDatabase:
2526  * dump the database definition
2527  */
2528 static void
2530 {
2531  DumpOptions *dopt = fout->dopt;
2532  PQExpBuffer dbQry = createPQExpBuffer();
2533  PQExpBuffer delQry = createPQExpBuffer();
2534  PQExpBuffer creaQry = createPQExpBuffer();
2535  PQExpBuffer labelq = createPQExpBuffer();
2536  PGconn *conn = GetConnection(fout);
2537  PGresult *res;
2538  int i_tableoid,
2539  i_oid,
2540  i_datname,
2541  i_dba,
2542  i_encoding,
2543  i_collate,
2544  i_ctype,
2545  i_frozenxid,
2546  i_minmxid,
2547  i_datacl,
2548  i_rdatacl,
2549  i_datistemplate,
2550  i_datconnlimit,
2551  i_tablespace;
2552  CatalogId dbCatId;
2553  DumpId dbDumpId;
2554  const char *datname,
2555  *dba,
2556  *encoding,
2557  *collate,
2558  *ctype,
2559  *datacl,
2560  *rdatacl,
2561  *datistemplate,
2562  *datconnlimit,
2563  *tablespace;
2564  uint32 frozenxid,
2565  minmxid;
2566  char *qdatname;
2567 
2568  if (g_verbose)
2569  write_msg(NULL, "saving database definition\n");
2570 
2571  /* Fetch the database-level properties for this database */
2572  if (fout->remoteVersion >= 90600)
2573  {
2574  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2575  "(%s datdba) AS dba, "
2576  "pg_encoding_to_char(encoding) AS encoding, "
2577  "datcollate, datctype, datfrozenxid, datminmxid, "
2578  "(SELECT array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
2579  " SELECT unnest(coalesce(datacl,acldefault('d',datdba))) AS acl "
2580  " EXCEPT SELECT unnest(acldefault('d',datdba))) as datacls)"
2581  " AS datacl, "
2582  "(SELECT array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
2583  " SELECT unnest(acldefault('d',datdba)) AS acl "
2584  " EXCEPT SELECT unnest(coalesce(datacl,acldefault('d',datdba)))) as rdatacls)"
2585  " AS rdatacl, "
2586  "datistemplate, datconnlimit, "
2587  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2588  "shobj_description(oid, 'pg_database') AS description "
2589 
2590  "FROM pg_database "
2591  "WHERE datname = current_database()",
2593  }
2594  else if (fout->remoteVersion >= 90300)
2595  {
2596  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2597  "(%s datdba) AS dba, "
2598  "pg_encoding_to_char(encoding) AS encoding, "
2599  "datcollate, datctype, datfrozenxid, datminmxid, "
2600  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2601  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2602  "shobj_description(oid, 'pg_database') AS description "
2603 
2604  "FROM pg_database "
2605  "WHERE datname = current_database()",
2607  }
2608  else if (fout->remoteVersion >= 80400)
2609  {
2610  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2611  "(%s datdba) AS dba, "
2612  "pg_encoding_to_char(encoding) AS encoding, "
2613  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2614  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2615  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2616  "shobj_description(oid, 'pg_database') AS description "
2617 
2618  "FROM pg_database "
2619  "WHERE datname = current_database()",
2621  }
2622  else if (fout->remoteVersion >= 80200)
2623  {
2624  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2625  "(%s datdba) AS dba, "
2626  "pg_encoding_to_char(encoding) AS encoding, "
2627  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2628  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2629  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2630  "shobj_description(oid, 'pg_database') AS description "
2631 
2632  "FROM pg_database "
2633  "WHERE datname = current_database()",
2635  }
2636  else
2637  {
2638  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2639  "(%s datdba) AS dba, "
2640  "pg_encoding_to_char(encoding) AS encoding, "
2641  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2642  "datacl, '' as rdatacl, datistemplate, "
2643  "-1 as datconnlimit, "
2644  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2645  "FROM pg_database "
2646  "WHERE datname = current_database()",
2648  }
2649 
2650  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2651 
2652  i_tableoid = PQfnumber(res, "tableoid");
2653  i_oid = PQfnumber(res, "oid");
2654  i_datname = PQfnumber(res, "datname");
2655  i_dba = PQfnumber(res, "dba");
2656  i_encoding = PQfnumber(res, "encoding");
2657  i_collate = PQfnumber(res, "datcollate");
2658  i_ctype = PQfnumber(res, "datctype");
2659  i_frozenxid = PQfnumber(res, "datfrozenxid");
2660  i_minmxid = PQfnumber(res, "datminmxid");
2661  i_datacl = PQfnumber(res, "datacl");
2662  i_rdatacl = PQfnumber(res, "rdatacl");
2663  i_datistemplate = PQfnumber(res, "datistemplate");
2664  i_datconnlimit = PQfnumber(res, "datconnlimit");
2665  i_tablespace = PQfnumber(res, "tablespace");
2666 
2667  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2668  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2669  datname = PQgetvalue(res, 0, i_datname);
2670  dba = PQgetvalue(res, 0, i_dba);
2671  encoding = PQgetvalue(res, 0, i_encoding);
2672  collate = PQgetvalue(res, 0, i_collate);
2673  ctype = PQgetvalue(res, 0, i_ctype);
2674  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2675  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2676  datacl = PQgetvalue(res, 0, i_datacl);
2677  rdatacl = PQgetvalue(res, 0, i_rdatacl);
2678  datistemplate = PQgetvalue(res, 0, i_datistemplate);
2679  datconnlimit = PQgetvalue(res, 0, i_datconnlimit);
2680  tablespace = PQgetvalue(res, 0, i_tablespace);
2681 
2682  qdatname = pg_strdup(fmtId(datname));
2683 
2684  /*
2685  * Prepare the CREATE DATABASE command. We must specify encoding, locale,
2686  * and tablespace since those can't be altered later. Other DB properties
2687  * are left to the DATABASE PROPERTIES entry, so that they can be applied
2688  * after reconnecting to the target DB.
2689  */
2690  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2691  qdatname);
2692  if (strlen(encoding) > 0)
2693  {
2694  appendPQExpBufferStr(creaQry, " ENCODING = ");
2695  appendStringLiteralAH(creaQry, encoding, fout);
2696  }
2697  if (strlen(collate) > 0)
2698  {
2699  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2700  appendStringLiteralAH(creaQry, collate, fout);
2701  }
2702  if (strlen(ctype) > 0)
2703  {
2704  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2705  appendStringLiteralAH(creaQry, ctype, fout);
2706  }
2707 
2708  /*
2709  * Note: looking at dopt->outputNoTablespaces here is completely the wrong
2710  * thing; the decision whether to specify a tablespace should be left till
2711  * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
2712  * label the DATABASE entry with the tablespace and let the normal
2713  * tablespace selection logic work ... but CREATE DATABASE doesn't pay
2714  * attention to default_tablespace, so that won't work.
2715  */
2716  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2717  !dopt->outputNoTablespaces)
2718  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2719  fmtId(tablespace));
2720  appendPQExpBufferStr(creaQry, ";\n");
2721 
2722  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2723  qdatname);
2724 
2725  dbDumpId = createDumpId();
2726 
2727  ArchiveEntry(fout,
2728  dbCatId, /* catalog ID */
2729  dbDumpId, /* dump ID */
2730  datname, /* Name */
2731  NULL, /* Namespace */
2732  NULL, /* Tablespace */
2733  dba, /* Owner */
2734  false, /* with oids */
2735  "DATABASE", /* Desc */
2736  SECTION_PRE_DATA, /* Section */
2737  creaQry->data, /* Create */
2738  delQry->data, /* Del */
2739  NULL, /* Copy */
2740  NULL, /* Deps */
2741  0, /* # Deps */
2742  NULL, /* Dumper */
2743  NULL); /* Dumper Arg */
2744 
2745  /* Compute correct tag for archive entry */
2746  appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
2747 
2748  /* Dump DB comment if any */
2749  if (fout->remoteVersion >= 80200)
2750  {
2751  /*
2752  * 8.2 and up keep comments on shared objects in a shared table, so we
2753  * cannot use the dumpComment() code used for other database objects.
2754  * Be careful that the ArchiveEntry parameters match that function.
2755  */
2756  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2757 
2758  if (comment && *comment && !dopt->no_comments)
2759  {
2760  resetPQExpBuffer(dbQry);
2761 
2762  /*
2763  * Generates warning when loaded into a differently-named
2764  * database.
2765  */
2766  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
2767  appendStringLiteralAH(dbQry, comment, fout);
2768  appendPQExpBufferStr(dbQry, ";\n");
2769 
2770  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2771  labelq->data, NULL, NULL, dba,
2772  false, "COMMENT", SECTION_NONE,
2773  dbQry->data, "", NULL,
2774  &(dbDumpId), 1,
2775  NULL, NULL);
2776  }
2777  }
2778  else
2779  {
2780  dumpComment(fout, "DATABASE", qdatname, NULL, dba,
2781  dbCatId, 0, dbDumpId);
2782  }
2783 
2784  /* Dump DB security label, if enabled */
2785  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2786  {
2787  PGresult *shres;
2788  PQExpBuffer seclabelQry;
2789 
2790  seclabelQry = createPQExpBuffer();
2791 
2792  buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry);
2793  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2794  resetPQExpBuffer(seclabelQry);
2795  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2796  if (seclabelQry->len > 0)
2797  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2798  labelq->data, NULL, NULL, dba,
2799  false, "SECURITY LABEL", SECTION_NONE,
2800  seclabelQry->data, "", NULL,
2801  &(dbDumpId), 1,
2802  NULL, NULL);
2803  destroyPQExpBuffer(seclabelQry);
2804  PQclear(shres);
2805  }
2806 
2807  /*
2808  * Dump ACL if any. Note that we do not support initial privileges
2809  * (pg_init_privs) on databases.
2810  */
2811  dumpACL(fout, dbCatId, dbDumpId, "DATABASE",
2812  qdatname, NULL, NULL,
2813  dba, datacl, rdatacl, "", "");
2814 
2815  /*
2816  * Now construct a DATABASE PROPERTIES archive entry to restore any
2817  * non-default database-level properties. (The reason this must be
2818  * separate is that we cannot put any additional commands into the TOC
2819  * entry that has CREATE DATABASE. pg_restore would execute such a group
2820  * in an implicit transaction block, and the backend won't allow CREATE
2821  * DATABASE in that context.)
2822  */
2823  resetPQExpBuffer(creaQry);
2824  resetPQExpBuffer(delQry);
2825 
2826  if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
2827  appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
2828  qdatname, datconnlimit);
2829 
2830  if (strcmp(datistemplate, "t") == 0)
2831  {
2832  appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
2833  qdatname);
2834 
2835  /*
2836  * The backend won't accept DROP DATABASE on a template database. We
2837  * can deal with that by removing the template marking before the DROP
2838  * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
2839  * since no such command is currently supported, fake it with a direct
2840  * UPDATE on pg_database.
2841  */
2842  appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
2843  "SET datistemplate = false WHERE datname = ");
2844  appendStringLiteralAH(delQry, datname, fout);
2845  appendPQExpBufferStr(delQry, ";\n");
2846  }
2847 
2848  /* Add database-specific SET options */
2849  dumpDatabaseConfig(fout, creaQry, datname, dbCatId.oid);
2850 
2851  /*
2852  * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
2853  * entry, too, for lack of a better place.
2854  */
2855  if (dopt->binary_upgrade)
2856  {
2857  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
2858  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
2859  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
2860  "WHERE datname = ",
2861  frozenxid, minmxid);
2862  appendStringLiteralAH(creaQry, datname, fout);
2863  appendPQExpBufferStr(creaQry, ";\n");
2864  }
2865 
2866  if (creaQry->len > 0)
2867  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2868  datname, NULL, NULL, dba,
2869  false, "DATABASE PROPERTIES", SECTION_PRE_DATA,
2870  creaQry->data, delQry->data, NULL,
2871  &(dbDumpId), 1,
2872  NULL, NULL);
2873 
2874  /*
2875  * pg_largeobject and pg_largeobject_metadata come from the old system
2876  * intact, so set their relfrozenxids and relminmxids.
2877  */
2878  if (dopt->binary_upgrade)
2879  {
2880  PGresult *lo_res;
2881  PQExpBuffer loFrozenQry = createPQExpBuffer();
2882  PQExpBuffer loOutQry = createPQExpBuffer();
2883  int i_relfrozenxid,
2884  i_relminmxid;
2885 
2886  /*
2887  * pg_largeobject
2888  */
2889  if (fout->remoteVersion >= 90300)
2890  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2891  "FROM pg_catalog.pg_class\n"
2892  "WHERE oid = %u;\n",
2893  LargeObjectRelationId);
2894  else
2895  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2896  "FROM pg_catalog.pg_class\n"
2897  "WHERE oid = %u;\n",
2898  LargeObjectRelationId);
2899 
2900  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2901 
2902  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2903  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2904 
2905  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
2906  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2907  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2908  "WHERE oid = %u;\n",
2909  atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2910  atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
2911  LargeObjectRelationId);
2912  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2913  "pg_largeobject", NULL, NULL, "",
2914  false, "pg_largeobject", SECTION_PRE_DATA,
2915  loOutQry->data, "", NULL,
2916  NULL, 0,
2917  NULL, NULL);
2918 
2919  PQclear(lo_res);
2920 
2921  /*
2922  * pg_largeobject_metadata
2923  */
2924  if (fout->remoteVersion >= 90000)
2925  {
2926  resetPQExpBuffer(loFrozenQry);
2927  resetPQExpBuffer(loOutQry);
2928 
2929  if (fout->remoteVersion >= 90300)
2930  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2931  "FROM pg_catalog.pg_class\n"
2932  "WHERE oid = %u;\n",
2933  LargeObjectMetadataRelationId);
2934  else
2935  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2936  "FROM pg_catalog.pg_class\n"
2937  "WHERE oid = %u;\n",
2938  LargeObjectMetadataRelationId);
2939 
2940  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2941 
2942  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2943  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2944 
2945  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
2946  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2947  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2948  "WHERE oid = %u;\n",
2949  atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2950  atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
2951  LargeObjectMetadataRelationId);
2952  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2953  "pg_largeobject_metadata", NULL, NULL, "",
2954  false, "pg_largeobject_metadata", SECTION_PRE_DATA,
2955  loOutQry->data, "", NULL,
2956  NULL, 0,
2957  NULL, NULL);
2958 
2959  PQclear(lo_res);
2960  }
2961 
2962  destroyPQExpBuffer(loFrozenQry);
2963  destroyPQExpBuffer(loOutQry);
2964  }
2965 
2966  PQclear(res);
2967 
2968  free(qdatname);
2969  destroyPQExpBuffer(dbQry);
2970  destroyPQExpBuffer(delQry);
2971  destroyPQExpBuffer(creaQry);
2972  destroyPQExpBuffer(labelq);
2973 }
2974 
2975 /*
2976  * Collect any database-specific or role-and-database-specific SET options
2977  * for this database, and append them to outbuf.
2978  */
2979 static void
2981  const char *dbname, Oid dboid)
2982 {
2983  PGconn *conn = GetConnection(AH);
2985  PGresult *res;
2986  int count = 1;
2987 
2988  /*
2989  * First collect database-specific options. Pre-8.4 server versions lack
2990  * unnest(), so we do this the hard way by querying once per subscript.
2991  */
2992  for (;;)
2993  {
2994  if (AH->remoteVersion >= 90000)
2995  printfPQExpBuffer(buf, "SELECT setconfig[%d] FROM pg_db_role_setting "
2996  "WHERE setrole = 0 AND setdatabase = '%u'::oid",
2997  count, dboid);
2998  else
2999  printfPQExpBuffer(buf, "SELECT datconfig[%d] FROM pg_database WHERE oid = '%u'::oid", count, dboid);
3000 
3001  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3002 
3003  if (PQntuples(res) == 1 &&
3004  !PQgetisnull(res, 0, 0))
3005  {
3006  makeAlterConfigCommand(conn, PQgetvalue(res, 0, 0),
3007  "DATABASE", dbname, NULL, NULL,
3008  outbuf);
3009  PQclear(res);
3010  count++;
3011  }
3012  else
3013  {
3014  PQclear(res);
3015  break;
3016  }
3017  }
3018 
3019  /* Now look for role-and-database-specific options */
3020  if (AH->remoteVersion >= 90000)
3021  {
3022  /* Here we can assume we have unnest() */
3023  printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
3024  "FROM pg_db_role_setting s, pg_roles r "
3025  "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
3026  dboid);
3027 
3028  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3029 
3030  if (PQntuples(res) > 0)
3031  {
3032  int i;
3033 
3034  for (i = 0; i < PQntuples(res); i++)
3035  makeAlterConfigCommand(conn, PQgetvalue(res, i, 1),
3036  "ROLE", PQgetvalue(res, i, 0),
3037  "DATABASE", dbname,
3038  outbuf);
3039  }
3040 
3041  PQclear(res);
3042  }
3043 
3044  destroyPQExpBuffer(buf);
3045 }
3046 
3047 /*
3048  * dumpEncoding: put the correct encoding into the archive
3049  */
3050 static void
3052 {
3053  const char *encname = pg_encoding_to_char(AH->encoding);
3055 
3056  if (g_verbose)
3057  write_msg(NULL, "saving encoding = %s\n", encname);
3058 
3059  appendPQExpBufferStr(qry, "SET client_encoding = ");
3060  appendStringLiteralAH(qry, encname, AH);
3061  appendPQExpBufferStr(qry, ";\n");
3062 
3063  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3064  "ENCODING", NULL, NULL, "",
3065  false, "ENCODING", SECTION_PRE_DATA,
3066  qry->data, "", NULL,
3067  NULL, 0,
3068  NULL, NULL);
3069 
3070  destroyPQExpBuffer(qry);
3071 }
3072 
3073 
3074 /*
3075  * dumpStdStrings: put the correct escape string behavior into the archive
3076  */
3077 static void
3079 {
3080  const char *stdstrings = AH->std_strings ? "on" : "off";
3082 
3083  if (g_verbose)
3084  write_msg(NULL, "saving standard_conforming_strings = %s\n",
3085  stdstrings);
3086 
3087  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3088  stdstrings);
3089 
3090  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3091  "STDSTRINGS", NULL, NULL, "",
3092  false, "STDSTRINGS", SECTION_PRE_DATA,
3093  qry->data, "", NULL,
3094  NULL, 0,
3095  NULL, NULL);
3096 
3097  destroyPQExpBuffer(qry);
3098 }
3099 
3100 /*
3101  * dumpSearchPath: record the active search_path in the archive
3102  */
3103 static void
3105 {
3107  PQExpBuffer path = createPQExpBuffer();
3108  PGresult *res;
3109  char **schemanames = NULL;
3110  int nschemanames = 0;
3111  int i;
3112 
3113  /*
3114  * We use the result of current_schemas(), not the search_path GUC,
3115  * because that might contain wildcards such as "$user", which won't
3116  * necessarily have the same value during restore. Also, this way avoids
3117  * listing schemas that may appear in search_path but not actually exist,
3118  * which seems like a prudent exclusion.
3119  */
3120  res = ExecuteSqlQueryForSingleRow(AH,
3121  "SELECT pg_catalog.current_schemas(false)");
3122 
3123  if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3124  exit_horribly(NULL, "could not parse result of current_schemas()\n");
3125 
3126  /*
3127  * We use set_config(), not a simple "SET search_path" command, because
3128  * the latter has less-clean behavior if the search path is empty. While
3129  * that's likely to get fixed at some point, it seems like a good idea to
3130  * be as backwards-compatible as possible in what we put into archives.
3131  */
3132  for (i = 0; i < nschemanames; i++)
3133  {
3134  if (i > 0)
3135  appendPQExpBufferStr(path, ", ");
3136  appendPQExpBufferStr(path, fmtId(schemanames[i]));
3137  }
3138 
3139  appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3140  appendStringLiteralAH(qry, path->data, AH);
3141  appendPQExpBufferStr(qry, ", false);\n");
3142 
3143  if (g_verbose)
3144  write_msg(NULL, "saving search_path = %s\n", path->data);
3145 
3146  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3147  "SEARCHPATH", NULL, NULL, "",
3148  false, "SEARCHPATH", SECTION_PRE_DATA,
3149  qry->data, "", NULL,
3150  NULL, 0,
3151  NULL, NULL);
3152 
3153  /* Also save it in AH->searchpath, in case we're doing plain text dump */
3154  AH->searchpath = pg_strdup(qry->data);
3155 
3156  if (schemanames)
3157  free(schemanames);
3158  PQclear(res);
3159  destroyPQExpBuffer(qry);
3160  destroyPQExpBuffer(path);
3161 }
3162 
3163 
3164 /*
3165  * getBlobs:
3166  * Collect schema-level data about large objects
3167  */
3168 static void
3170 {
3171  DumpOptions *dopt = fout->dopt;
3172  PQExpBuffer blobQry = createPQExpBuffer();
3173  BlobInfo *binfo;
3174  DumpableObject *bdata;
3175  PGresult *res;
3176  int ntups;
3177  int i;
3178  int i_oid;
3179  int i_lomowner;
3180  int i_lomacl;
3181  int i_rlomacl;
3182  int i_initlomacl;
3183  int i_initrlomacl;
3184 
3185  /* Verbose message */
3186  if (g_verbose)
3187  write_msg(NULL, "reading large objects\n");
3188 
3189  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
3190  if (fout->remoteVersion >= 90600)
3191  {
3192  PQExpBuffer acl_subquery = createPQExpBuffer();
3193  PQExpBuffer racl_subquery = createPQExpBuffer();
3194  PQExpBuffer init_acl_subquery = createPQExpBuffer();
3195  PQExpBuffer init_racl_subquery = createPQExpBuffer();
3196 
3197  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
3198  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
3199  dopt->binary_upgrade);
3200 
3201  appendPQExpBuffer(blobQry,
3202  "SELECT l.oid, (%s l.lomowner) AS rolname, "
3203  "%s AS lomacl, "
3204  "%s AS rlomacl, "
3205  "%s AS initlomacl, "
3206  "%s AS initrlomacl "
3207  "FROM pg_largeobject_metadata l "
3208  "LEFT JOIN pg_init_privs pip ON "
3209  "(l.oid = pip.objoid "
3210  "AND pip.classoid = 'pg_largeobject'::regclass "
3211  "AND pip.objsubid = 0) ",
3213  acl_subquery->data,
3214  racl_subquery->data,
3215  init_acl_subquery->data,
3216  init_racl_subquery->data);
3217 
3218  destroyPQExpBuffer(acl_subquery);
3219  destroyPQExpBuffer(racl_subquery);
3220  destroyPQExpBuffer(init_acl_subquery);
3221  destroyPQExpBuffer(init_racl_subquery);
3222  }
3223  else if (fout->remoteVersion >= 90000)
3224  appendPQExpBuffer(blobQry,
3225  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
3226  "NULL AS rlomacl, NULL AS initlomacl, "
3227  "NULL AS initrlomacl "
3228  " FROM pg_largeobject_metadata",
3230  else
3231  appendPQExpBufferStr(blobQry,
3232  "SELECT DISTINCT loid AS oid, "
3233  "NULL::name AS rolname, NULL::oid AS lomacl, "
3234  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
3235  "NULL::oid AS initrlomacl "
3236  " FROM pg_largeobject");
3237 
3238  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
3239 
3240  i_oid = PQfnumber(res, "oid");
3241  i_lomowner = PQfnumber(res, "rolname");
3242  i_lomacl = PQfnumber(res, "lomacl");
3243  i_rlomacl = PQfnumber(res, "rlomacl");
3244  i_initlomacl = PQfnumber(res, "initlomacl");
3245  i_initrlomacl = PQfnumber(res, "initrlomacl");
3246 
3247  ntups = PQntuples(res);
3248 
3249  /*
3250  * Each large object has its own BLOB archive entry.
3251  */
3252  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
3253 
3254  for (i = 0; i < ntups; i++)
3255  {
3256  binfo[i].dobj.objType = DO_BLOB;
3257  binfo[i].dobj.catId.tableoid = LargeObjectRelationId;
3258  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3259  AssignDumpId(&binfo[i].dobj);
3260 
3261  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
3262  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
3263  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
3264  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
3265  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
3266  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
3267 
3268  if (PQgetisnull(res, i, i_lomacl) &&
3269  PQgetisnull(res, i, i_rlomacl) &&
3270  PQgetisnull(res, i, i_initlomacl) &&
3271  PQgetisnull(res, i, i_initrlomacl))
3272  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
3273 
3274  /*
3275  * In binary-upgrade mode for blobs, we do *not* dump out the data or
3276  * the ACLs, should any exist. The data and ACL (if any) will be
3277  * copied by pg_upgrade, which simply copies the pg_largeobject and
3278  * pg_largeobject_metadata tables.
3279  *
3280  * We *do* dump out the definition of the blob because we need that to
3281  * make the restoration of the comments, and anything else, work since
3282  * pg_upgrade copies the files behind pg_largeobject and
3283  * pg_largeobject_metadata after the dump is restored.
3284  */
3285  if (dopt->binary_upgrade)
3287  }
3288 
3289  /*
3290  * If we have any large objects, a "BLOBS" archive entry is needed. This
3291  * is just a placeholder for sorting; it carries no data now.
3292  */
3293  if (ntups > 0)
3294  {
3295  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
3296  bdata->objType = DO_BLOB_DATA;
3297  bdata->catId = nilCatalogId;
3298  AssignDumpId(bdata);
3299  bdata->name = pg_strdup("BLOBS");
3300  }
3301 
3302  PQclear(res);
3303  destroyPQExpBuffer(blobQry);
3304 }
3305 
3306 /*
3307  * dumpBlob
3308  *
3309  * dump the definition (metadata) of the given large object
3310  */
3311 static void
3312 dumpBlob(Archive *fout, BlobInfo *binfo)
3313 {
3314  PQExpBuffer cquery = createPQExpBuffer();
3315  PQExpBuffer dquery = createPQExpBuffer();
3316 
3317  appendPQExpBuffer(cquery,
3318  "SELECT pg_catalog.lo_create('%s');\n",
3319  binfo->dobj.name);
3320 
3321  appendPQExpBuffer(dquery,
3322  "SELECT pg_catalog.lo_unlink('%s');\n",
3323  binfo->dobj.name);
3324 
3325  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
3326  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
3327  binfo->dobj.name,
3328  NULL, NULL,
3329  binfo->rolname, false,
3330  "BLOB", SECTION_PRE_DATA,
3331  cquery->data, dquery->data, NULL,
3332  NULL, 0,
3333  NULL, NULL);
3334 
3335  /* Dump comment if any */
3336  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3337  dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
3338  NULL, binfo->rolname,
3339  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3340 
3341  /* Dump security label if any */
3342  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3343  dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
3344  NULL, binfo->rolname,
3345  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3346 
3347  /* Dump ACL if any */
3348  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3349  dumpACL(fout, binfo->dobj.catId, binfo->dobj.dumpId, "LARGE OBJECT",
3350  binfo->dobj.name, NULL,
3351  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3352  binfo->initblobacl, binfo->initrblobacl);
3353 
3354  destroyPQExpBuffer(cquery);
3355  destroyPQExpBuffer(dquery);
3356 }
3357 
3358 /*
3359  * dumpBlobs:
3360  * dump the data contents of all large objects
3361  */
3362 static int
3363 dumpBlobs(Archive *fout, void *arg)
3364 {
3365  const char *blobQry;
3366  const char *blobFetchQry;
3367  PGconn *conn = GetConnection(fout);
3368  PGresult *res;
3369  char buf[LOBBUFSIZE];
3370  int ntups;
3371  int i;
3372  int cnt;
3373 
3374  if (g_verbose)
3375  write_msg(NULL, "saving large objects\n");
3376 
3377  /*
3378  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3379  * the already-in-memory dumpable objects instead...
3380  */
3381  if (fout->remoteVersion >= 90000)
3382  blobQry = "DECLARE bloboid CURSOR FOR SELECT oid FROM pg_largeobject_metadata";
3383  else
3384  blobQry = "DECLARE bloboid CURSOR FOR SELECT DISTINCT loid FROM pg_largeobject";
3385 
3386  ExecuteSqlStatement(fout, blobQry);
3387 
3388  /* Command to fetch from cursor */
3389  blobFetchQry = "FETCH 1000 IN bloboid";
3390 
3391  do
3392  {
3393  /* Do a fetch */
3394  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3395 
3396  /* Process the tuples, if any */
3397  ntups = PQntuples(res);
3398  for (i = 0; i < ntups; i++)
3399  {
3400  Oid blobOid;
3401  int loFd;
3402 
3403  blobOid = atooid(PQgetvalue(res, i, 0));
3404  /* Open the BLOB */
3405  loFd = lo_open(conn, blobOid, INV_READ);
3406  if (loFd == -1)
3407  exit_horribly(NULL, "could not open large object %u: %s",
3408  blobOid, PQerrorMessage(conn));
3409 
3410  StartBlob(fout, blobOid);
3411 
3412  /* Now read it in chunks, sending data to archive */
3413  do
3414  {
3415  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3416  if (cnt < 0)
3417  exit_horribly(NULL, "error reading large object %u: %s",
3418  blobOid, PQerrorMessage(conn));
3419 
3420  WriteData(fout, buf, cnt);
3421  } while (cnt > 0);
3422 
3423  lo_close(conn, loFd);
3424 
3425  EndBlob(fout, blobOid);
3426  }
3427 
3428  PQclear(res);
3429  } while (ntups > 0);
3430 
3431  return 1;
3432 }
3433 
3434 /*
3435  * getPolicies
3436  * get information about policies on a dumpable table.
3437  */
3438 void
3439 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3440 {
3441  PQExpBuffer query;
3442  PGresult *res;
3443  PolicyInfo *polinfo;
3444  int i_oid;
3445  int i_tableoid;
3446  int i_polname;
3447  int i_polcmd;
3448  int i_polpermissive;
3449  int i_polroles;
3450  int i_polqual;
3451  int i_polwithcheck;
3452  int i,
3453  j,
3454  ntups;
3455 
3456  if (fout->remoteVersion < 90500)
3457  return;
3458 
3459  query = createPQExpBuffer();
3460 
3461  for (i = 0; i < numTables; i++)
3462  {
3463  TableInfo *tbinfo = &tblinfo[i];
3464 
3465  /* Ignore row security on tables not to be dumped */
3466  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3467  continue;
3468 
3469  if (g_verbose)
3470  write_msg(NULL, "reading row security enabled for table \"%s.%s\"\n",
3471  tbinfo->dobj.namespace->dobj.name,
3472  tbinfo->dobj.name);
3473 
3474  /*
3475  * Get row security enabled information for the table. We represent
3476  * RLS enabled on a table by creating PolicyInfo object with an empty
3477  * policy.
3478  */
3479  if (tbinfo->rowsec)
3480  {
3481  /*
3482  * Note: use tableoid 0 so that this object won't be mistaken for
3483  * something that pg_depend entries apply to.
3484  */
3485  polinfo = pg_malloc(sizeof(PolicyInfo));
3486  polinfo->dobj.objType = DO_POLICY;
3487  polinfo->dobj.catId.tableoid = 0;
3488  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3489  AssignDumpId(&polinfo->dobj);
3490  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3491  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3492  polinfo->poltable = tbinfo;
3493  polinfo->polname = NULL;
3494  polinfo->polcmd = '\0';
3495  polinfo->polpermissive = 0;
3496  polinfo->polroles = NULL;
3497  polinfo->polqual = NULL;
3498  polinfo->polwithcheck = NULL;
3499  }
3500 
3501  if (g_verbose)
3502  write_msg(NULL, "reading policies for table \"%s.%s\"\n",
3503  tbinfo->dobj.namespace->dobj.name,
3504  tbinfo->dobj.name);
3505 
3506  resetPQExpBuffer(query);
3507 
3508  /* Get the policies for the table. */
3509  if (fout->remoteVersion >= 100000)
3510  appendPQExpBuffer(query,
3511  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3512  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3513  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3514  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3515  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3516  "FROM pg_catalog.pg_policy pol "
3517  "WHERE polrelid = '%u'",
3518  tbinfo->dobj.catId.oid);
3519  else
3520  appendPQExpBuffer(query,
3521  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3522  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3523  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3524  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3525  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3526  "FROM pg_catalog.pg_policy pol "
3527  "WHERE polrelid = '%u'",
3528  tbinfo->dobj.catId.oid);
3529  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3530 
3531  ntups = PQntuples(res);
3532 
3533  if (ntups == 0)
3534  {
3535  /*
3536  * No explicit policies to handle (only the default-deny policy,
3537  * which is handled as part of the table definition). Clean up
3538  * and return.
3539  */
3540  PQclear(res);
3541  continue;
3542  }
3543 
3544  i_oid = PQfnumber(res, "oid");
3545  i_tableoid = PQfnumber(res, "tableoid");
3546  i_polname = PQfnumber(res, "polname");
3547  i_polcmd = PQfnumber(res, "polcmd");
3548  i_polpermissive = PQfnumber(res, "polpermissive");
3549  i_polroles = PQfnumber(res, "polroles");
3550  i_polqual = PQfnumber(res, "polqual");
3551  i_polwithcheck = PQfnumber(res, "polwithcheck");
3552 
3553  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3554 
3555  for (j = 0; j < ntups; j++)
3556  {
3557  polinfo[j].dobj.objType = DO_POLICY;
3558  polinfo[j].dobj.catId.tableoid =
3559  atooid(PQgetvalue(res, j, i_tableoid));
3560  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3561  AssignDumpId(&polinfo[j].dobj);
3562  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3563  polinfo[j].poltable = tbinfo;
3564  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3565  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3566 
3567  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3568  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3569 
3570  if (PQgetisnull(res, j, i_polroles))
3571  polinfo[j].polroles = NULL;
3572  else
3573  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3574 
3575  if (PQgetisnull(res, j, i_polqual))
3576  polinfo[j].polqual = NULL;
3577  else
3578  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3579 
3580  if (PQgetisnull(res, j, i_polwithcheck))
3581  polinfo[j].polwithcheck = NULL;
3582  else
3583  polinfo[j].polwithcheck
3584  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3585  }
3586  PQclear(res);
3587  }
3588  destroyPQExpBuffer(query);
3589 }
3590 
3591 /*
3592  * dumpPolicy
3593  * dump the definition of the given policy
3594  */
3595 static void
3597 {
3598  DumpOptions *dopt = fout->dopt;
3599  TableInfo *tbinfo = polinfo->poltable;
3600  PQExpBuffer query;
3601  PQExpBuffer delqry;
3602  const char *cmd;
3603  char *tag;
3604 
3605  if (dopt->dataOnly)
3606  return;
3607 
3608  /*
3609  * If polname is NULL, then this record is just indicating that ROW LEVEL
3610  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3611  * ROW LEVEL SECURITY.
3612  */
3613  if (polinfo->polname == NULL)
3614  {
3615  query = createPQExpBuffer();
3616 
3617  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3618  fmtQualifiedDumpable(polinfo));
3619 
3620  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3621  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3622  polinfo->dobj.name,
3623  polinfo->dobj.namespace->dobj.name,
3624  NULL,
3625  tbinfo->rolname, false,
3626  "ROW SECURITY", SECTION_POST_DATA,
3627  query->data, "", NULL,
3628  NULL, 0,
3629  NULL, NULL);
3630 
3631  destroyPQExpBuffer(query);
3632  return;
3633  }
3634 
3635  if (polinfo->polcmd == '*')
3636  cmd = "";
3637  else if (polinfo->polcmd == 'r')
3638  cmd = " FOR SELECT";
3639  else if (polinfo->polcmd == 'a')
3640  cmd = " FOR INSERT";
3641  else if (polinfo->polcmd == 'w')
3642  cmd = " FOR UPDATE";
3643  else if (polinfo->polcmd == 'd')
3644  cmd = " FOR DELETE";
3645  else
3646  {
3647  write_msg(NULL, "unexpected policy command type: %c\n",
3648  polinfo->polcmd);
3649  exit_nicely(1);
3650  }
3651 
3652  query = createPQExpBuffer();
3653  delqry = createPQExpBuffer();
3654 
3655  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3656 
3657  appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
3658  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3659 
3660  if (polinfo->polroles != NULL)
3661  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3662 
3663  if (polinfo->polqual != NULL)
3664  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3665 
3666  if (polinfo->polwithcheck != NULL)
3667  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3668 
3669  appendPQExpBuffer(query, ";\n");
3670 
3671  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3672  appendPQExpBuffer(delqry, " ON %s;\n", fmtQualifiedDumpable(tbinfo));
3673 
3674  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3675 
3676  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3677  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3678  tag,
3679  polinfo->dobj.namespace->dobj.name,
3680  NULL,
3681  tbinfo->rolname, false,
3682  "POLICY", SECTION_POST_DATA,
3683  query->data, delqry->data, NULL,
3684  NULL, 0,
3685  NULL, NULL);
3686 
3687  free(tag);
3688  destroyPQExpBuffer(query);
3689  destroyPQExpBuffer(delqry);
3690 }
3691 
3692 /*
3693  * getPublications
3694  * get information about publications
3695  */
3696 void
3698 {
3699  DumpOptions *dopt = fout->dopt;
3700  PQExpBuffer query;
3701  PGresult *res;
3702  PublicationInfo *pubinfo;
3703  int i_tableoid;
3704  int i_oid;
3705  int i_pubname;
3706  int i_rolname;
3707  int i_puballtables;
3708  int i_pubinsert;
3709  int i_pubupdate;
3710  int i_pubdelete;
3711  int i_pubtruncate;
3712  int i,
3713  ntups;
3714 
3715  if (dopt->no_publications || fout->remoteVersion < 100000)
3716  return;
3717 
3718  query = createPQExpBuffer();
3719 
3720  resetPQExpBuffer(query);
3721 
3722  /* Get the publications. */
3723  if (fout->remoteVersion >= 110000)
3724  appendPQExpBuffer(query,
3725  "SELECT p.tableoid, p.oid, p.pubname, "
3726  "(%s p.pubowner) AS rolname, "
3727  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate "
3728  "FROM pg_publication p",
3730  else
3731  appendPQExpBuffer(query,
3732  "SELECT p.tableoid, p.oid, p.pubname, "
3733  "(%s p.pubowner) AS rolname, "
3734  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, false AS pubtruncate "
3735  "FROM pg_publication p",
3737 
3738  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3739 
3740  ntups = PQntuples(res);
3741 
3742  i_tableoid = PQfnumber(res, "tableoid");
3743  i_oid = PQfnumber(res, "oid");
3744  i_pubname = PQfnumber(res, "pubname");
3745  i_rolname = PQfnumber(res, "rolname");
3746  i_puballtables = PQfnumber(res, "puballtables");
3747  i_pubinsert = PQfnumber(res, "pubinsert");
3748  i_pubupdate = PQfnumber(res, "pubupdate");
3749  i_pubdelete = PQfnumber(res, "pubdelete");
3750  i_pubtruncate = PQfnumber(res, "pubtruncate");
3751 
3752  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3753 
3754  for (i = 0; i < ntups; i++)
3755  {
3756  pubinfo[i].dobj.objType = DO_PUBLICATION;
3757  pubinfo[i].dobj.catId.tableoid =
3758  atooid(PQgetvalue(res, i, i_tableoid));
3759  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3760  AssignDumpId(&pubinfo[i].dobj);
3761  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3762  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3763  pubinfo[i].puballtables =
3764  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3765  pubinfo[i].pubinsert =
3766  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3767  pubinfo[i].pubupdate =
3768  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3769  pubinfo[i].pubdelete =
3770  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3771  pubinfo[i].pubtruncate =
3772  (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
3773 
3774  if (strlen(pubinfo[i].rolname) == 0)
3775  write_msg(NULL, "WARNING: owner of publication \"%s\" appears to be invalid\n",
3776  pubinfo[i].dobj.name);
3777 
3778  /* Decide whether we want to dump it */
3779  selectDumpableObject(&(pubinfo[i].dobj), fout);
3780  }
3781  PQclear(res);
3782 
3783  destroyPQExpBuffer(query);
3784 }
3785 
3786 /*
3787  * dumpPublication
3788  * dump the definition of the given publication
3789  */
3790 static void
3792 {
3793  PQExpBuffer delq;
3794  PQExpBuffer query;
3795  char *qpubname;
3796  bool first = true;
3797 
3798  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3799  return;
3800 
3801  delq = createPQExpBuffer();
3802  query = createPQExpBuffer();
3803 
3804  qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
3805 
3806  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3807  qpubname);
3808 
3809  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3810  qpubname);
3811 
3812  if (pubinfo->puballtables)
3813  appendPQExpBufferStr(query, " FOR ALL TABLES");
3814 
3815  appendPQExpBufferStr(query, " WITH (publish = '");
3816  if (pubinfo->pubinsert)
3817  {
3818  appendPQExpBufferStr(query, "insert");
3819  first = false;
3820  }
3821 
3822  if (pubinfo->pubupdate)
3823  {
3824  if (!first)
3825  appendPQExpBufferStr(query, ", ");
3826 
3827  appendPQExpBufferStr(query, "update");
3828  first = false;
3829  }
3830 
3831  if (pubinfo->pubdelete)
3832  {
3833  if (!first)
3834  appendPQExpBufferStr(query, ", ");
3835 
3836  appendPQExpBufferStr(query, "delete");
3837  first = false;
3838  }
3839 
3840  if (pubinfo->pubtruncate)
3841  {
3842  if (!first)
3843  appendPQExpBufferStr(query, ", ");
3844 
3845  appendPQExpBufferStr(query, "truncate");
3846  first = false;
3847  }
3848 
3849  appendPQExpBufferStr(query, "');\n");
3850 
3851  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
3852  pubinfo->dobj.name,
3853  NULL,
3854  NULL,
3855  pubinfo->rolname, false,
3856  "PUBLICATION", SECTION_POST_DATA,
3857  query->data, delq->data, NULL,
3858  NULL, 0,
3859  NULL, NULL);
3860 
3861  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3862  dumpComment(fout, "PUBLICATION", qpubname,
3863  NULL, pubinfo->rolname,
3864  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3865 
3866  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3867  dumpSecLabel(fout, "PUBLICATION", qpubname,
3868  NULL, pubinfo->rolname,
3869  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3870 
3871  destroyPQExpBuffer(delq);
3872  destroyPQExpBuffer(query);
3873  free(qpubname);
3874 }
3875 
3876 /*
3877  * getPublicationTables
3878  * get information about publication membership for dumpable tables.
3879  */
3880 void
3882 {
3883  PQExpBuffer query;
3884  PGresult *res;
3885  PublicationRelInfo *pubrinfo;
3886  int i_tableoid;
3887  int i_oid;
3888  int i_pubname;
3889  int i,
3890  j,
3891  ntups;
3892 
3893  if (fout->remoteVersion < 100000)
3894  return;
3895 
3896  query = createPQExpBuffer();
3897 
3898  for (i = 0; i < numTables; i++)
3899  {
3900  TableInfo *tbinfo = &tblinfo[i];
3901 
3902  /* Only plain tables can be aded to publications. */
3903  if (tbinfo->relkind != RELKIND_RELATION)
3904  continue;
3905 
3906  /*
3907  * Ignore publication membership of tables whose definitions are not
3908  * to be dumped.
3909  */
3910  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3911  continue;
3912 
3913  if (g_verbose)
3914  write_msg(NULL, "reading publication membership for table \"%s.%s\"\n",
3915  tbinfo->dobj.namespace->dobj.name,
3916  tbinfo->dobj.name);
3917 
3918  resetPQExpBuffer(query);
3919 
3920  /* Get the publication membership for the table. */
3921  appendPQExpBuffer(query,
3922  "SELECT pr.tableoid, pr.oid, p.pubname "
3923  "FROM pg_publication_rel pr, pg_publication p "
3924  "WHERE pr.prrelid = '%u'"
3925  " AND p.oid = pr.prpubid",
3926  tbinfo->dobj.catId.oid);
3927  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3928 
3929  ntups = PQntuples(res);
3930 
3931  if (ntups == 0)
3932  {
3933  /*
3934  * Table is not member of any publications. Clean up and return.
3935  */
3936  PQclear(res);
3937  continue;
3938  }
3939 
3940  i_tableoid = PQfnumber(res, "tableoid");
3941  i_oid = PQfnumber(res, "oid");
3942  i_pubname = PQfnumber(res, "pubname");
3943 
3944  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
3945 
3946  for (j = 0; j < ntups; j++)
3947  {
3948  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
3949  pubrinfo[j].dobj.catId.tableoid =
3950  atooid(PQgetvalue(res, j, i_tableoid));
3951  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3952  AssignDumpId(&pubrinfo[j].dobj);
3953  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3954  pubrinfo[j].dobj.name = tbinfo->dobj.name;
3955  pubrinfo[j].pubname = pg_strdup(PQgetvalue(res, j, i_pubname));
3956  pubrinfo[j].pubtable = tbinfo;
3957 
3958  /* Decide whether we want to dump it */
3959  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
3960  }
3961  PQclear(res);
3962  }
3963  destroyPQExpBuffer(query);
3964 }
3965 
3966 /*
3967  * dumpPublicationTable
3968  * dump the definition of the given publication table mapping
3969  */
3970 static void
3972 {
3973  TableInfo *tbinfo = pubrinfo->pubtable;
3974  PQExpBuffer query;
3975  char *tag;
3976 
3977  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3978  return;
3979 
3980  tag = psprintf("%s %s", pubrinfo->pubname, tbinfo->dobj.name);
3981 
3982  query = createPQExpBuffer();
3983 
3984  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
3985  fmtId(pubrinfo->pubname));
3986  appendPQExpBuffer(query, " %s;\n",
3987  fmtQualifiedDumpable(tbinfo));
3988 
3989  /*
3990  * There is no point in creating drop query as drop query as the drop is
3991  * done by table drop.
3992  */
3993  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
3994  tag,
3995  tbinfo->dobj.namespace->dobj.name,
3996  NULL,
3997  "", false,
3998  "PUBLICATION TABLE", SECTION_POST_DATA,
3999  query->data, "", NULL,
4000  NULL, 0,
4001  NULL, NULL);
4002 
4003  free(tag);
4004  destroyPQExpBuffer(query);
4005 }
4006 
4007 /*
4008  * Is the currently connected user a superuser?
4009  */
4010 static bool
4012 {
4013  ArchiveHandle *AH = (ArchiveHandle *) fout;
4014  const char *val;
4015 
4016  val = PQparameterStatus(AH->connection, "is_superuser");
4017 
4018  if (val && strcmp(val, "on") == 0)
4019  return true;
4020 
4021  return false;
4022 }
4023 
4024 /*
4025  * getSubscriptions
4026  * get information about subscriptions
4027  */
4028 void
4030 {
4031  DumpOptions *dopt = fout->dopt;
4032  PQExpBuffer query;
4033  PGresult *res;
4034  SubscriptionInfo *subinfo;
4035  int i_tableoid;
4036  int i_oid;
4037  int i_subname;
4038  int i_rolname;
4039  int i_subconninfo;
4040  int i_subslotname;
4041  int i_subsynccommit;
4042  int i_subpublications;
4043  int i,
4044  ntups;
4045 
4046  if (dopt->no_subscriptions || fout->remoteVersion < 100000)
4047  return;
4048 
4049  if (!is_superuser(fout))
4050  {
4051  int n;
4052 
4053  res = ExecuteSqlQuery(fout,
4054  "SELECT count(*) FROM pg_subscription "
4055  "WHERE subdbid = (SELECT oid FROM pg_database"
4056  " WHERE datname = current_database())",
4057  PGRES_TUPLES_OK);
4058  n = atoi(PQgetvalue(res, 0, 0));
4059  if (n > 0)
4060  write_msg(NULL, "WARNING: subscriptions not dumped because current user is not a superuser\n");
4061  PQclear(res);
4062  return;
4063  }
4064 
4065  query = createPQExpBuffer();
4066 
4067  resetPQExpBuffer(query);
4068 
4069  /* Get the subscriptions in current database. */
4070  appendPQExpBuffer(query,
4071  "SELECT s.tableoid, s.oid, s.subname,"
4072  "(%s s.subowner) AS rolname, "
4073  " s.subconninfo, s.subslotname, s.subsynccommit, "
4074  " s.subpublications "
4075  "FROM pg_subscription s "
4076  "WHERE s.subdbid = (SELECT oid FROM pg_database"
4077  " WHERE datname = current_database())",
4079  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4080 
4081  ntups = PQntuples(res);
4082 
4083  i_tableoid = PQfnumber(res, "tableoid");
4084  i_oid = PQfnumber(res, "oid");
4085  i_subname = PQfnumber(res, "subname");
4086  i_rolname = PQfnumber(res, "rolname");
4087  i_subconninfo = PQfnumber(res, "subconninfo");
4088  i_subslotname = PQfnumber(res, "subslotname");
4089  i_subsynccommit = PQfnumber(res, "subsynccommit");
4090  i_subpublications = PQfnumber(res, "subpublications");
4091 
4092  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
4093 
4094  for (i = 0; i < ntups; i++)
4095  {
4096  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
4097  subinfo[i].dobj.catId.tableoid =
4098  atooid(PQgetvalue(res, i, i_tableoid));
4099  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4100  AssignDumpId(&subinfo[i].dobj);
4101  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
4102  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4103  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
4104  if (PQgetisnull(res, i, i_subslotname))
4105  subinfo[i].subslotname = NULL;
4106  else
4107  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
4108  subinfo[i].subsynccommit =
4109  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
4110  subinfo[i].subpublications =
4111  pg_strdup(PQgetvalue(res, i, i_subpublications));
4112 
4113  if (strlen(subinfo[i].rolname) == 0)
4114  write_msg(NULL, "WARNING: owner of subscription \"%s\" appears to be invalid\n",
4115  subinfo[i].dobj.name);
4116 
4117  /* Decide whether we want to dump it */
4118  selectDumpableObject(&(subinfo[i].dobj), fout);
4119  }
4120  PQclear(res);
4121 
4122  destroyPQExpBuffer(query);
4123 }
4124 
4125 /*
4126  * dumpSubscription
4127  * dump the definition of the given subscription
4128  */
4129 static void
4131 {
4132  PQExpBuffer delq;
4133  PQExpBuffer query;
4134  PQExpBuffer publications;
4135  char *qsubname;
4136  char **pubnames = NULL;
4137  int npubnames = 0;
4138  int i;
4139 
4140  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4141  return;
4142 
4143  delq = createPQExpBuffer();
4144  query = createPQExpBuffer();
4145 
4146  qsubname = pg_strdup(fmtId(subinfo->dobj.name));
4147 
4148  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
4149  qsubname);
4150 
4151  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
4152  qsubname);
4153  appendStringLiteralAH(query, subinfo->subconninfo, fout);
4154 
4155  /* Build list of quoted publications and append them to query. */
4156  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
4157  {
4158  write_msg(NULL,
4159  "WARNING: could not parse subpublications array\n");
4160  if (pubnames)
4161  free(pubnames);
4162  pubnames = NULL;
4163  npubnames = 0;
4164  }
4165 
4166  publications = createPQExpBuffer();
4167  for (i = 0; i < npubnames; i++)
4168  {
4169  if (i > 0)
4170  appendPQExpBufferStr(publications, ", ");
4171 
4172  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
4173  }
4174 
4175  appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
4176  if (subinfo->subslotname)
4177  appendStringLiteralAH(query, subinfo->subslotname, fout);
4178  else
4179  appendPQExpBufferStr(query, "NONE");
4180 
4181  if (strcmp(subinfo->subsynccommit, "off") != 0)
4182  appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
4183 
4184  appendPQExpBufferStr(query, ");\n");
4185 
4186  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
4187  subinfo->dobj.name,
4188  NULL,
4189  NULL,
4190  subinfo->rolname, false,
4191  "SUBSCRIPTION", SECTION_POST_DATA,
4192  query->data, delq->data, NULL,
4193  NULL, 0,
4194  NULL, NULL);
4195 
4196  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4197  dumpComment(fout, "SUBSCRIPTION", qsubname,
4198  NULL, subinfo->rolname,
4199  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4200 
4201  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4202  dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
4203  NULL, subinfo->rolname,
4204  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4205 
4206  destroyPQExpBuffer(publications);
4207  if (pubnames)
4208  free(pubnames);
4209 
4210  destroyPQExpBuffer(delq);
4211  destroyPQExpBuffer(query);
4212  free(qsubname);
4213 }
4214 
4215 static void
4217  PQExpBuffer upgrade_buffer,
4218  Oid pg_type_oid,
4219  bool force_array_type)
4220 {
4221  PQExpBuffer upgrade_query = createPQExpBuffer();
4222  PGresult *res;
4223  Oid pg_type_array_oid;
4224 
4225  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
4226  appendPQExpBuffer(upgrade_buffer,
4227  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4228  pg_type_oid);
4229 
4230  /* we only support old >= 8.3 for binary upgrades */
4231  appendPQExpBuffer(upgrade_query,
4232  "SELECT typarray "
4233  "FROM pg_catalog.pg_type "
4234  "WHERE oid = '%u'::pg_catalog.oid;",
4235  pg_type_oid);
4236 
4237  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4238 
4239  pg_type_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
4240 
4241  PQclear(res);
4242 
4243  if (!OidIsValid(pg_type_array_oid) && force_array_type)
4244  {
4245  /*
4246  * If the old version didn't assign an array type, but the new version
4247  * does, we must select an unused type OID to assign. This currently
4248  * only happens for domains, when upgrading pre-v11 to v11 and up.
4249  *
4250  * Note: local state here is kind of ugly, but we must have some,
4251  * since we mustn't choose the same unused OID more than once.
4252  */
4253  static Oid next_possible_free_oid = FirstNormalObjectId;
4254  bool is_dup;
4255 
4256  do
4257  {
4258  ++next_possible_free_oid;
4259  printfPQExpBuffer(upgrade_query,
4260  "SELECT EXISTS(SELECT 1 "
4261  "FROM pg_catalog.pg_type "
4262  "WHERE oid = '%u'::pg_catalog.oid);",
4263  next_possible_free_oid);
4264  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4265  is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
4266  PQclear(res);
4267  } while (is_dup);
4268 
4269  pg_type_array_oid = next_possible_free_oid;
4270  }
4271 
4272  if (OidIsValid(pg_type_array_oid))
4273  {
4274  appendPQExpBufferStr(upgrade_buffer,
4275  "\n-- For binary upgrade, must preserve pg_type array oid\n");
4276  appendPQExpBuffer(upgrade_buffer,
4277  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4278  pg_type_array_oid);
4279  }
4280 
4281  destroyPQExpBuffer(upgrade_query);
4282 }
4283 
4284 static bool
4286  PQExpBuffer upgrade_buffer,
4287  Oid pg_rel_oid)
4288 {
4289  PQExpBuffer upgrade_query = createPQExpBuffer();
4290  PGresult *upgrade_res;
4291  Oid pg_type_oid;
4292  bool toast_set = false;
4293 
4294  /* we only support old >= 8.3 for binary upgrades */
4295  appendPQExpBuffer(upgrade_query,
4296  "SELECT c.reltype AS crel, t.reltype AS trel "
4297  "FROM pg_catalog.pg_class c "
4298  "LEFT JOIN pg_catalog.pg_class t ON "
4299  " (c.reltoastrelid = t.oid) "
4300  "WHERE c.oid = '%u'::pg_catalog.oid;",
4301  pg_rel_oid);
4302 
4303  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4304 
4305  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
4306 
4307  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
4308  pg_type_oid, false);
4309 
4310  if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
4311  {
4312  /* Toast tables do not have pg_type array rows */
4313  Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
4314  PQfnumber(upgrade_res, "trel")));
4315 
4316  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
4317  appendPQExpBuffer(upgrade_buffer,
4318  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4319  pg_type_toast_oid);
4320 
4321  toast_set = true;
4322  }
4323 
4324  PQclear(upgrade_res);
4325  destroyPQExpBuffer(upgrade_query);
4326 
4327  return toast_set;
4328 }
4329 
4330 static void
4332  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
4333  bool is_index)
4334 {
4335  PQExpBuffer upgrade_query = createPQExpBuffer();
4336  PGresult *upgrade_res;
4337  Oid pg_class_reltoastrelid;
4338  Oid pg_index_indexrelid;
4339 
4340  appendPQExpBuffer(upgrade_query,
4341  "SELECT c.reltoastrelid, i.indexrelid "
4342  "FROM pg_catalog.pg_class c LEFT JOIN "
4343  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
4344  "WHERE c.oid = '%u'::pg_catalog.oid;",
4345  pg_class_oid);
4346 
4347  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4348 
4349  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastrelid")));
4350  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
4351 
4352  appendPQExpBufferStr(upgrade_buffer,
4353  "\n-- For binary upgrade, must preserve pg_class oids\n");
4354 
4355  if (!is_index)
4356  {
4357  appendPQExpBuffer(upgrade_buffer,
4358  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
4359  pg_class_oid);
4360  /* only tables have toast tables, not indexes */
4361  if (OidIsValid(pg_class_reltoastrelid))
4362  {
4363  /*
4364  * One complexity is that the table definition might not require
4365  * the creation of a TOAST table, and the TOAST table might have
4366  * been created long after table creation, when the table was
4367  * loaded with wide data. By setting the TOAST oid we force
4368  * creation of the TOAST heap and TOAST index by the backend so we
4369  * can cleanly copy the files during binary upgrade.
4370  */
4371 
4372  appendPQExpBuffer(upgrade_buffer,
4373  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
4374  pg_class_reltoastrelid);
4375 
4376  /* every toast table has an index */
4377  appendPQExpBuffer(upgrade_buffer,
4378  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4379  pg_index_indexrelid);
4380  }
4381  }
4382  else
4383  appendPQExpBuffer(upgrade_buffer,
4384  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4385  pg_class_oid);
4386 
4387  appendPQExpBufferChar(upgrade_buffer, '\n');
4388 
4389  PQclear(upgrade_res);
4390  destroyPQExpBuffer(upgrade_query);
4391 }
4392 
4393 /*
4394  * If the DumpableObject is a member of an extension, add a suitable
4395  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4396  *
4397  * For somewhat historical reasons, objname should already be quoted,
4398  * but not objnamespace (if any).
4399  */
4400 static void
4402  DumpableObject *dobj,
4403  const char *objtype,
4404  const char *objname,
4405  const char *objnamespace)
4406 {
4407  DumpableObject *extobj = NULL;
4408  int i;
4409 
4410  if (!dobj->ext_member)
4411  return;
4412 
4413  /*
4414  * Find the parent extension. We could avoid this search if we wanted to
4415  * add a link field to DumpableObject, but the space costs of that would
4416  * be considerable. We assume that member objects could only have a
4417  * direct dependency on their own extension, not any others.
4418  */
4419  for (i = 0; i < dobj->nDeps; i++)
4420  {
4421  extobj = findObjectByDumpId(dobj->dependencies[i]);
4422  if (extobj && extobj->objType == DO_EXTENSION)
4423  break;
4424  extobj = NULL;
4425  }
4426  if (extobj == NULL)
4427  exit_horribly(NULL, "could not find parent extension for %s %s\n",
4428  objtype, objname);
4429 
4430  appendPQExpBufferStr(upgrade_buffer,
4431  "\n-- For binary upgrade, handle extension membership the hard way\n");
4432  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
4433  fmtId(extobj->name),
4434  objtype);
4435  if (objnamespace && *objnamespace)
4436  appendPQExpBuffer(upgrade_buffer, "%s.", fmtId(objnamespace));
4437  appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
4438 }
4439 
4440 /*
4441  * getNamespaces:
4442  * read all namespaces in the system catalogs and return them in the
4443  * NamespaceInfo* structure
4444  *
4445  * numNamespaces is set to the number of namespaces read in
4446  */
4447 NamespaceInfo *
4449 {
4450  DumpOptions *dopt = fout->dopt;
4451  PGresult *res;
4452  int ntups;
4453  int i;
4454  PQExpBuffer query;
4455  NamespaceInfo *nsinfo;
4456  int i_tableoid;
4457  int i_oid;
4458  int i_nspname;
4459  int i_rolname;
4460  int i_nspacl;
4461  int i_rnspacl;
4462  int i_initnspacl;
4463  int i_initrnspacl;
4464 
4465  query = createPQExpBuffer();
4466 
4467  /*
4468  * we fetch all namespaces including system ones, so that every object we
4469  * read in can be linked to a containing namespace.
4470  */
4471  if (fout->remoteVersion >= 90600)
4472  {
4473  PQExpBuffer acl_subquery = createPQExpBuffer();
4474  PQExpBuffer racl_subquery = createPQExpBuffer();
4475  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4476  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4477 
4478  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4479  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4480  dopt->binary_upgrade);
4481 
4482  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4483  "(%s nspowner) AS rolname, "
4484  "%s as nspacl, "
4485  "%s as rnspacl, "
4486  "%s as initnspacl, "
4487  "%s as initrnspacl "
4488  "FROM pg_namespace n "
4489  "LEFT JOIN pg_init_privs pip "
4490  "ON (n.oid = pip.objoid "
4491  "AND pip.classoid = 'pg_namespace'::regclass "
4492  "AND pip.objsubid = 0",
4494  acl_subquery->data,
4495  racl_subquery->data,
4496  init_acl_subquery->data,
4497  init_racl_subquery->data);
4498 
4499  appendPQExpBuffer(query, ") ");
4500 
4501  destroyPQExpBuffer(acl_subquery);
4502  destroyPQExpBuffer(racl_subquery);
4503  destroyPQExpBuffer(init_acl_subquery);
4504  destroyPQExpBuffer(init_racl_subquery);
4505  }
4506  else
4507  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4508  "(%s nspowner) AS rolname, "
4509  "nspacl, NULL as rnspacl, "
4510  "NULL AS initnspacl, NULL as initrnspacl "
4511  "FROM pg_namespace",
4513 
4514  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4515 
4516  ntups = PQntuples(res);
4517 
4518  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4519 
4520  i_tableoid = PQfnumber(res, "tableoid");
4521  i_oid = PQfnumber(res, "oid");
4522  i_nspname = PQfnumber(res, "nspname");
4523  i_rolname = PQfnumber(res, "rolname");
4524  i_nspacl = PQfnumber(res, "nspacl");
4525  i_rnspacl = PQfnumber(res, "rnspacl");
4526  i_initnspacl = PQfnumber(res, "initnspacl");
4527  i_initrnspacl = PQfnumber(res, "initrnspacl");
4528 
4529  for (i = 0; i < ntups; i++)
4530  {
4531  nsinfo[i].dobj.objType = DO_NAMESPACE;
4532  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4533  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4534  AssignDumpId(&nsinfo[i].dobj);
4535  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4536  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4537  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4538  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4539  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4540  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4541 
4542  /* Decide whether to dump this namespace */
4543  selectDumpableNamespace(&nsinfo[i], fout);
4544 
4545  /*
4546  * Do not try to dump ACL if the ACL is empty or the default.
4547  *
4548  * This is useful because, for some schemas/objects, the only
4549  * component we are going to try and dump is the ACL and if we can
4550  * remove that then 'dump' goes to zero/false and we don't consider
4551  * this object for dumping at all later on.
4552  */
4553  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4554  PQgetisnull(res, i, i_initnspacl) &&
4555  PQgetisnull(res, i, i_initrnspacl))
4556  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4557 
4558  if (strlen(nsinfo[i].rolname) == 0)
4559  write_msg(NULL, "WARNING: owner of schema \"%s\" appears to be invalid\n",
4560  nsinfo[i].dobj.name);
4561  }
4562 
4563  PQclear(res);
4564  destroyPQExpBuffer(query);
4565 
4566  *numNamespaces = ntups;
4567 
4568  return nsinfo;
4569 }
4570 
4571 /*
4572  * findNamespace:
4573  * given a namespace OID, look up the info read by getNamespaces
4574  */
4575 static NamespaceInfo *
4577 {
4578  NamespaceInfo *nsinfo;
4579 
4580  nsinfo = findNamespaceByOid(nsoid);
4581  if (nsinfo == NULL)
4582  exit_horribly(NULL, "schema with OID %u does not exist\n", nsoid);
4583  return nsinfo;
4584 }
4585 
4586 /*
4587  * getExtensions:
4588  * read all extensions in the system catalogs and return them in the
4589  * ExtensionInfo* structure
4590  *
4591  * numExtensions is set to the number of extensions read in
4592  */
4593 ExtensionInfo *
4595 {
4596  DumpOptions *dopt = fout->dopt;
4597  PGresult *res;
4598  int ntups;
4599  int i;
4600  PQExpBuffer query;
4601  ExtensionInfo *extinfo;
4602  int i_tableoid;
4603  int i_oid;
4604  int i_extname;
4605  int i_nspname;
4606  int i_extrelocatable;
4607  int i_extversion;
4608  int i_extconfig;
4609  int i_extcondition;
4610 
4611  /*
4612  * Before 9.1, there are no extensions.
4613  */
4614  if (fout->remoteVersion < 90100)
4615  {
4616  *numExtensions = 0;
4617  return NULL;
4618  }
4619 
4620  query = createPQExpBuffer();
4621 
4622  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4623  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4624  "FROM pg_extension x "
4625  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4626 
4627  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4628 
4629  ntups = PQntuples(res);
4630 
4631  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4632 
4633  i_tableoid = PQfnumber(res, "tableoid");
4634  i_oid = PQfnumber(res, "oid");
4635  i_extname = PQfnumber(res, "extname");
4636  i_nspname = PQfnumber(res, "nspname");
4637  i_extrelocatable = PQfnumber(res, "extrelocatable");
4638  i_extversion = PQfnumber(res, "extversion");
4639  i_extconfig = PQfnumber(res, "extconfig");
4640  i_extcondition = PQfnumber(res, "extcondition");
4641 
4642  for (i = 0; i < ntups; i++)
4643  {
4644  extinfo[i].dobj.objType = DO_EXTENSION;
4645  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4646  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4647  AssignDumpId(&extinfo[i].dobj);
4648  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4649  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4650  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4651  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4652  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4653  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4654 
4655  /* Decide whether we want to dump it */
4656  selectDumpableExtension(&(extinfo[i]), dopt);
4657  }
4658 
4659  PQclear(res);
4660  destroyPQExpBuffer(query);
4661 
4662  *numExtensions = ntups;
4663 
4664  return extinfo;
4665 }
4666 
4667 /*
4668  * getTypes:
4669  * read all types in the system catalogs and return them in the
4670  * TypeInfo* structure
4671  *
4672  * numTypes is set to the number of types read in
4673  *
4674  * NB: this must run after getFuncs() because we assume we can do
4675  * findFuncByOid().
4676  */
4677 TypeInfo *
4679 {
4680  DumpOptions *dopt = fout->dopt;
4681  PGresult *res;
4682  int ntups;
4683  int i;
4684  PQExpBuffer query = createPQExpBuffer();
4685  TypeInfo *tyinfo;
4686  ShellTypeInfo *stinfo;
4687  int i_tableoid;
4688  int i_oid;
4689  int i_typname;
4690  int i_typnamespace;
4691  int i_typacl;
4692  int i_rtypacl;
4693  int i_inittypacl;
4694  int i_initrtypacl;
4695  int i_rolname;
4696  int i_typelem;
4697  int i_typrelid;
4698  int i_typrelkind;
4699  int i_typtype;
4700  int i_typisdefined;
4701  int i_isarray;
4702 
4703  /*
4704  * we include even the built-in types because those may be used as array
4705  * elements by user-defined types
4706  *
4707  * we filter out the built-in types when we dump out the types
4708  *
4709  * same approach for undefined (shell) types and array types
4710  *
4711  * Note: as of 8.3 we can reliably detect whether a type is an
4712  * auto-generated array type by checking the element type's typarray.
4713  * (Before that the test is capable of generating false positives.) We
4714  * still check for name beginning with '_', though, so as to avoid the
4715  * cost of the subselect probe for all standard types. This would have to
4716  * be revisited if the backend ever allows renaming of array types.
4717  */
4718 
4719  if (fout->remoteVersion >= 90600)
4720  {
4721  PQExpBuffer acl_subquery = createPQExpBuffer();
4722  PQExpBuffer racl_subquery = createPQExpBuffer();
4723  PQExpBuffer initacl_subquery = createPQExpBuffer();
4724  PQExpBuffer initracl_subquery = createPQExpBuffer();
4725 
4726  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
4727  initracl_subquery, "t.typacl", "t.typowner", "'T'",
4728  dopt->binary_upgrade);
4729 
4730  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
4731  "t.typnamespace, "
4732  "%s AS typacl, "
4733  "%s AS rtypacl, "
4734  "%s AS inittypacl, "
4735  "%s AS initrtypacl, "
4736  "(%s t.typowner) AS rolname, "
4737  "t.typelem, t.typrelid, "
4738  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
4739  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
4740  "t.typtype, t.typisdefined, "
4741  "t.typname[0] = '_' AND t.typelem != 0 AND "
4742  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
4743  "FROM pg_type t "
4744  "LEFT JOIN pg_init_privs pip ON "
4745  "(t.oid = pip.objoid "
4746  "AND pip.classoid = 'pg_type'::regclass "
4747  "AND pip.objsubid = 0) ",
4748  acl_subquery->data,
4749  racl_subquery->data,
4750  initacl_subquery->data,
4751  initracl_subquery->data,
4753 
4754  destroyPQExpBuffer(acl_subquery);
4755  destroyPQExpBuffer(racl_subquery);
4756  destroyPQExpBuffer(initacl_subquery);
4757  destroyPQExpBuffer(initracl_subquery);
4758  }
4759  else if (fout->remoteVersion >= 90200)
4760  {
4761  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4762  "typnamespace, typacl, NULL as rtypacl, "
4763  "NULL AS inittypacl, NULL AS initrtypacl, "
4764  "(%s typowner) AS rolname, "
4765  "typelem, typrelid, "
4766  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4767  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4768  "typtype, typisdefined, "
4769  "typname[0] = '_' AND typelem != 0 AND "
4770  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4771  "FROM pg_type",
4773  }
4774  else if (fout->remoteVersion >= 80300)
4775  {
4776  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4777  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4778  "NULL AS inittypacl, NULL AS initrtypacl, "
4779  "(%s typowner) AS rolname, "
4780  "typelem, typrelid, "
4781  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4782  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4783  "typtype, typisdefined, "
4784  "typname[0] = '_' AND typelem != 0 AND "
4785  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4786  "FROM pg_type",
4788  }
4789  else
4790  {
4791  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4792  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4793  "NULL AS inittypacl, NULL AS initrtypacl, "
4794  "(%s typowner) AS rolname, "
4795  "typelem, typrelid, "
4796  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4797  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4798  "typtype, typisdefined, "
4799  "typname[0] = '_' AND typelem != 0 AS isarray "
4800  "FROM pg_type",
4802  }
4803 
4804  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4805 
4806  ntups = PQntuples(res);
4807 
4808  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
4809 
4810  i_tableoid = PQfnumber(res, "tableoid");
4811  i_oid = PQfnumber(res, "oid");
4812  i_typname = PQfnumber(res, "typname");
4813  i_typnamespace = PQfnumber(res, "typnamespace");
4814  i_typacl = PQfnumber(res, "typacl");
4815  i_rtypacl = PQfnumber(res, "rtypacl");
4816  i_inittypacl = PQfnumber(res, "inittypacl");
4817  i_initrtypacl = PQfnumber(res, "initrtypacl");
4818  i_rolname = PQfnumber(res, "rolname");
4819  i_typelem = PQfnumber(res, "typelem");
4820  i_typrelid = PQfnumber(res, "typrelid");
4821  i_typrelkind = PQfnumber(res, "typrelkind");
4822  i_typtype = PQfnumber(res, "typtype");
4823  i_typisdefined = PQfnumber(res, "typisdefined");
4824  i_isarray = PQfnumber(res, "isarray");
4825 
4826  for (i = 0; i < ntups; i++)
4827  {
4828  tyinfo[i].dobj.objType = DO_TYPE;
4829  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4830  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4831  AssignDumpId(&tyinfo[i].dobj);
4832  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
4833  tyinfo[i].dobj.namespace =
4834  findNamespace(fout,
4835  atooid(PQgetvalue(res, i, i_typnamespace)));
4836  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4837  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
4838  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
4839  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
4840  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
4841  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
4842  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
4843  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
4844  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
4845  tyinfo[i].shellType = NULL;
4846 
4847  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
4848  tyinfo[i].isDefined = true;
4849  else
4850  tyinfo[i].isDefined = false;
4851 
4852  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
4853  tyinfo[i].isArray = true;
4854  else
4855  tyinfo[i].isArray = false;
4856 
4857  /* Decide whether we want to dump it */
4858  selectDumpableType(&tyinfo[i], fout);
4859 
4860  /* Do not try to dump ACL if no ACL exists. */
4861  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
4862  PQgetisnull(res, i, i_inittypacl) &&
4863  PQgetisnull(res, i, i_initrtypacl))
4864  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4865 
4866  /*
4867  * If it's a domain, fetch info about its constraints, if any
4868  */
4869  tyinfo[i].nDomChecks = 0;
4870  tyinfo[i].domChecks = NULL;
4871  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4872  tyinfo[i].typtype == TYPTYPE_DOMAIN)
4873  getDomainConstraints(fout, &(tyinfo[i]));
4874 
4875  /*
4876  * If it's a base type, make a DumpableObject representing a shell
4877  * definition of the type. We will need to dump that ahead of the I/O
4878  * functions for the type. Similarly, range types need a shell
4879  * definition in case they have a canonicalize function.
4880  *
4881  * Note: the shell type doesn't have a catId. You might think it
4882  * should copy the base type's catId, but then it might capture the
4883  * pg_depend entries for the type, which we don't want.
4884  */
4885  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4886  (tyinfo[i].typtype == TYPTYPE_BASE ||
4887  tyinfo[i].typtype == TYPTYPE_RANGE))
4888  {
4889  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
4890  stinfo->dobj.objType = DO_SHELL_TYPE;
4891  stinfo->dobj.catId = nilCatalogId;
4892  AssignDumpId(&stinfo->dobj);
4893  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
4894  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
4895  stinfo->baseType = &(tyinfo[i]);
4896  tyinfo[i].shellType = stinfo;
4897 
4898  /*
4899  * Initially mark the shell type as not to be dumped. We'll only
4900  * dump it if the I/O or canonicalize functions need to be dumped;
4901  * this is taken care of while sorting dependencies.
4902  */
4903  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
4904  }
4905 
4906  if (strlen(tyinfo[i].rolname) == 0)
4907  write_msg(NULL, "WARNING: owner of data type \"%s\" appears to be invalid\n",
4908  tyinfo[i].dobj.name);
4909  }
4910 
4911  *numTypes = ntups;
4912 
4913  PQclear(res);
4914 
4915  destroyPQExpBuffer(query);
4916 
4917  return tyinfo;
4918 }
4919 
4920 /*
4921  * getOperators:
4922  * read all operators in the system catalogs and return them in the
4923  * OprInfo* structure
4924  *
4925  * numOprs is set to the number of operators read in
4926  */
4927 OprInfo *
4928 getOperators(Archive *fout, int *numOprs)
4929 {
4930  PGresult *res;
4931  int ntups;
4932  int i;
4933  PQExpBuffer query = createPQExpBuffer();
4934  OprInfo *oprinfo;
4935  int i_tableoid;
4936  int i_oid;
4937  int i_oprname;
4938  int i_oprnamespace;
4939  int i_rolname;
4940  int i_oprkind;
4941  int i_oprcode;
4942 
4943  /*
4944  * find all operators, including builtin operators; we filter out
4945  * system-defined operators at dump-out time.
4946  */
4947 
4948  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
4949  "oprnamespace, "
4950  "(%s oprowner) AS rolname, "
4951  "oprkind, "
4952  "oprcode::oid AS oprcode "
4953  "FROM pg_operator",
4955 
4956  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4957 
4958  ntups = PQntuples(res);
4959  *numOprs = ntups;
4960 
4961  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
4962 
4963  i_tableoid = PQfnumber(res, "tableoid");
4964  i_oid = PQfnumber(res, "oid");
4965  i_oprname = PQfnumber(res, "oprname");
4966  i_oprnamespace = PQfnumber(res, "oprnamespace");
4967  i_rolname = PQfnumber(res, "rolname");
4968  i_oprkind = PQfnumber(res, "oprkind");
4969  i_oprcode = PQfnumber(res, "oprcode");
4970 
4971  for (i = 0; i < ntups; i++)
4972  {
4973  oprinfo[i].dobj.objType = DO_OPERATOR;
4974  oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4975  oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4976  AssignDumpId(&oprinfo[i].dobj);
4977  oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
4978  oprinfo[i].dobj.namespace =
4979  findNamespace(fout,
4980  atooid(PQgetvalue(res, i, i_oprnamespace)));
4981  oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4982  oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
4983  oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
4984 
4985  /* Decide whether we want to dump it */
4986  selectDumpableObject(&(oprinfo[i].dobj), fout);
4987 
4988  /* Operators do not currently have ACLs. */
4989  oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4990 
4991  if (strlen(oprinfo[i].rolname) == 0)
4992  write_msg(NULL, "WARNING: owner of operator \"%s\" appears to be invalid\n",
4993  oprinfo[i].dobj.name);
4994  }
4995 
4996  PQclear(res);
4997 
4998  destroyPQExpBuffer(query);
4999 
5000  return oprinfo;
5001 }
5002 
5003 /*
5004  * getCollations:
5005  * read all collations in the system catalogs and return them in the
5006  * CollInfo* structure
5007  *
5008  * numCollations is set to the number of collations read in
5009  */
5010 CollInfo *
5012 {
5013  PGresult *res;
5014  int ntups;
5015  int i;
5016  PQExpBuffer query;
5017  CollInfo *collinfo;
5018  int i_tableoid;
5019  int i_oid;
5020  int i_collname;
5021  int i_collnamespace;
5022  int i_rolname;
5023 
5024  /* Collations didn't exist pre-9.1 */
5025  if (fout->remoteVersion < 90100)
5026  {
5027  *numCollations = 0;
5028  return NULL;
5029  }
5030 
5031  query = createPQExpBuffer();
5032 
5033  /*
5034  * find all collations, including builtin collations; we filter out
5035  * system-defined collations at dump-out time.
5036  */
5037 
5038  appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
5039  "collnamespace, "
5040  "(%s collowner) AS rolname "
5041  "FROM pg_collation",
5043 
5044  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5045 
5046  ntups = PQntuples(res);
5047  *numCollations = ntups;
5048 
5049  collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
5050 
5051  i_tableoid = PQfnumber(res, "tableoid");
5052  i_oid = PQfnumber(res, "oid");
5053  i_collname = PQfnumber(res, "collname");
5054  i_collnamespace = PQfnumber(res, "collnamespace");
5055  i_rolname = PQfnumber(res, "rolname");
5056 
5057  for (i = 0; i < ntups; i++)
5058  {
5059  collinfo[i].dobj.objType = DO_COLLATION;
5060  collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5061  collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5062  AssignDumpId(&collinfo[i].dobj);
5063  collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
5064  collinfo[i].dobj.namespace =
5065  findNamespace(fout,
5066  atooid(PQgetvalue(res, i, i_collnamespace)));
5067  collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5068 
5069  /* Decide whether we want to dump it */
5070  selectDumpableObject(&(collinfo[i].dobj), fout);
5071 
5072  /* Collations do not currently have ACLs. */
5073  collinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5074  }
5075 
5076  PQclear(res);
5077 
5078  destroyPQExpBuffer(query);
5079 
5080  return collinfo;
5081 }
5082 
5083 /*
5084  * getConversions:
5085  * read all conversions in the system catalogs and return them in the
5086  * ConvInfo* structure
5087  *
5088  * numConversions is set to the number of conversions read in
5089  */
5090 ConvInfo *
5091 getConversions(Archive *fout, int *numConversions)
5092 {
5093  PGresult *res;
5094  int ntups;
5095  int i;
5096  PQExpBuffer query;
5097  ConvInfo *convinfo;
5098  int i_tableoid;
5099  int i_oid;
5100  int i_conname;
5101  int i_connamespace;
5102  int i_rolname;
5103 
5104  query = createPQExpBuffer();
5105 
5106  /*
5107  * find all conversions, including builtin conversions; we filter out
5108  * system-defined conversions at dump-out time.
5109  */
5110 
5111  appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
5112  "connamespace, "
5113  "(%s conowner) AS rolname "
5114  "FROM pg_conversion",
5116 
5117  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5118 
5119  ntups = PQntuples(res);
5120  *numConversions = ntups;
5121 
5122  convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
5123 
5124  i_tableoid = PQfnumber(res, "tableoid");
5125  i_oid = PQfnumber(res, "oid");
5126  i_conname = PQfnumber(res, "conname");
5127  i_connamespace = PQfnumber(res, "connamespace");
5128  i_rolname = PQfnumber(res, "rolname");
5129 
5130  for (i = 0; i < ntups; i++)
5131  {
5132  convinfo[i].dobj.objType = DO_CONVERSION;
5133  convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5134  convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5135  AssignDumpId(&convinfo[i].dobj);
5136  convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
5137  convinfo[i].dobj.namespace =
5138  findNamespace(fout,
5139  atooid(PQgetvalue(res, i, i_connamespace)));
5140  convinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5141 
5142  /* Decide whether we want to dump it */
5143  selectDumpableObject(&(convinfo[i].dobj), fout);
5144 
5145  /* Conversions do not currently have ACLs. */
5146  convinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5147  }
5148 
5149  PQclear(res);
5150 
5151  destroyPQExpBuffer(query);
5152 
5153  return convinfo;
5154 }
5155 
5156 /*
5157  * getAccessMethods:
5158  * read all user-defined access methods in the system catalogs and return
5159  * them in the AccessMethodInfo* structure
5160  *
5161  * numAccessMethods is set to the number of access methods read in
5162  */
5164 getAccessMethods(Archive *fout, int *numAccessMethods)
5165 {
5166  PGresult *res;
5167  int ntups;
5168  int i;
5169  PQExpBuffer query;
5170  AccessMethodInfo *aminfo;
5171  int i_tableoid;
5172  int i_oid;
5173  int i_amname;
5174  int i_amhandler;
5175  int i_amtype;
5176 
5177  /* Before 9.6, there are no user-defined access methods */
5178  if (fout->remoteVersion < 90600)
5179  {
5180  *numAccessMethods = 0;
5181  return NULL;
5182  }
5183 
5184  query = createPQExpBuffer();
5185 
5186  /* Select all access methods from pg_am table */
5187  appendPQExpBuffer(query, "SELECT tableoid, oid, amname, amtype, "
5188  "amhandler::pg_catalog.regproc AS amhandler "
5189  "FROM pg_am");
5190 
5191  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5192 
5193  ntups = PQntuples(res);
5194  *numAccessMethods = ntups;
5195 
5196  aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
5197 
5198  i_tableoid = PQfnumber(res, "tableoid");
5199  i_oid = PQfnumber(res, "oid");
5200  i_amname = PQfnumber(res, "amname");
5201  i_amhandler = PQfnumber(res, "amhandler");
5202  i_amtype = PQfnumber(res, "amtype");
5203 
5204  for (i = 0; i < ntups; i++)
5205  {
5206  aminfo[i].dobj.objType = DO_ACCESS_METHOD;
5207  aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5208  aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5209  AssignDumpId(&aminfo[i].dobj);
5210  aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
5211  aminfo[i].dobj.namespace = NULL;
5212  aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
5213  aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
5214 
5215  /* Decide whether we want to dump it */
5216  selectDumpableAccessMethod(&(aminfo[i]), fout);
5217 
5218  /* Access methods do not currently have ACLs. */
5219  aminfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5220  }
5221 
5222  PQclear(res);
5223 
5224  destroyPQExpBuffer(query);
5225 
5226  return aminfo;
5227 }
5228 
5229 
5230 /*
5231  * getOpclasses:
5232  * read all opclasses in the system catalogs and return them in the
5233  * OpclassInfo* structure
5234  *
5235  * numOpclasses is set to the number of opclasses read in
5236  */
5237 OpclassInfo *
5238 getOpclasses(Archive *fout, int *numOpclasses)
5239 {
5240  PGresult *res;
5241  int ntups;
5242  int i;
5243  PQExpBuffer query = createPQExpBuffer();
5244  OpclassInfo *opcinfo;
5245  int i_tableoid;
5246  int i_oid;
5247  int i_opcname;
5248  int i_opcnamespace;
5249  int i_rolname;
5250 
5251  /*
5252  * find all opclasses, including builtin opclasses; we filter out
5253  * system-defined opclasses at dump-out time.
5254  */
5255 
5256  appendPQExpBuffer(query, "SELECT tableoid, oid, opcname, "
5257  "opcnamespace, "
5258  "(%s opcowner) AS rolname "
5259  "FROM pg_opclass",
5261 
5262  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5263 
5264  ntups = PQntuples(res);
5265  *numOpclasses = ntups;
5266 
5267  opcinfo = (OpclassInfo *) pg_malloc(ntups * sizeof(OpclassInfo));
5268 
5269  i_tableoid = PQfnumber(res, "tableoid");
5270  i_oid = PQfnumber(res, "oid");
5271  i_opcname = PQfnumber(res, "opcname");
5272  i_opcnamespace = PQfnumber(res, "opcnamespace");
5273  i_rolname = PQfnumber(res, "rolname");
5274 
5275  for (i = 0; i < ntups; i++)
5276  {
5277  opcinfo[i].dobj.objType = DO_OPCLASS;
5278  opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5279  opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5280  AssignDumpId(&opcinfo[i].dobj);
5281  opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
5282  opcinfo[i].dobj.namespace =
5283  findNamespace(fout,
5284  atooid(PQgetvalue(res, i, i_opcnamespace)));
5285  opcinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5286 
5287  /* Decide whether we want to dump it */
5288  selectDumpableObject(&(opcinfo[i].dobj), fout);
5289 
5290  /* Op Classes do not currently have ACLs. */
5291  opcinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5292 
5293  if (strlen(opcinfo[i].rolname) == 0)
5294  write_msg(NULL, "WARNING: owner of operator class \"%s\" appears to be invalid\n",
5295  opcinfo[i].dobj.name);
5296  }
5297 
5298  PQclear(res);
5299 
5300  destroyPQExpBuffer(query);
5301 
5302  return opcinfo;
5303 }
5304 
5305 /*
5306  * getOpfamilies:
5307  * read all opfamilies in the system catalogs and return them in the
5308  * OpfamilyInfo* structure
5309  *
5310  * numOpfamilies is set to the number of opfamilies read in
5311  */
5312 OpfamilyInfo *
5313 getOpfamilies(Archive *fout, int *numOpfamilies)
5314 {
5315  PGresult *res;
5316  int ntups;
5317  int i;
5318  PQExpBuffer query;
5319  OpfamilyInfo *opfinfo;
5320  int i_tableoid;
5321  int i_oid;
5322  int i_opfname;
5323  int i_opfnamespace;
5324  int i_rolname;
5325 
5326  /* Before 8.3, there is no separate concept of opfamilies */
5327  if (fout->remoteVersion < 80300)
5328  {
5329  *numOpfamilies = 0;
5330  return NULL;
5331  }
5332 
5333  query = createPQExpBuffer();
5334 
5335  /*
5336&