PostgreSQL Source Code  git master
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #ifdef HAVE_TERMIOS_H
37 #include <termios.h>
38 #endif
39 
40 #include "getopt_long.h"
41 
42 #include "access/attnum.h"
43 #include "access/sysattr.h"
44 #include "access/transam.h"
45 #include "catalog/pg_aggregate.h"
46 #include "catalog/pg_am.h"
47 #include "catalog/pg_attribute.h"
48 #include "catalog/pg_cast.h"
49 #include "catalog/pg_class.h"
50 #include "catalog/pg_default_acl.h"
51 #include "catalog/pg_largeobject.h"
53 #include "catalog/pg_proc.h"
54 #include "catalog/pg_trigger.h"
55 #include "catalog/pg_type.h"
56 #include "libpq/libpq-fs.h"
57 
58 #include "dumputils.h"
59 #include "parallel.h"
60 #include "pg_backup_db.h"
61 #include "pg_backup_utils.h"
62 #include "pg_dump.h"
63 #include "fe_utils/string_utils.h"
64 
65 
66 typedef struct
67 {
68  const char *descr; /* comment for an object */
69  Oid classoid; /* object class (catalog OID) */
70  Oid objoid; /* object OID */
71  int objsubid; /* subobject (table column #) */
72 } CommentItem;
73 
74 typedef struct
75 {
76  const char *provider; /* label provider of this security label */
77  const char *label; /* security label for an object */
78  Oid classoid; /* object class (catalog OID) */
79  Oid objoid; /* object OID */
80  int objsubid; /* subobject (table column #) */
81 } SecLabelItem;
82 
83 typedef enum OidOptions
84 {
86  zeroAsAny = 2,
89 } OidOptions;
90 
91 /* global decls */
92 bool g_verbose; /* User wants verbose narration of our
93  * activities. */
94 static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
95 
96 /* subquery used to convert user ID (eg, datdba) to user name */
97 static const char *username_subquery;
98 
99 /*
100  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
101  * FirstNormalObjectId - 1.
102  */
103 static Oid g_last_builtin_oid; /* value of the last builtin oid */
104 
105 /* The specified names/patterns should to match at least one entity */
106 static int strict_names = 0;
107 
108 /*
109  * Object inclusion/exclusion lists
110  *
111  * The string lists record the patterns given by command-line switches,
112  * which we then convert to lists of OIDs of matching objects.
113  */
115 static SimpleOidList schema_include_oids = {NULL, NULL};
117 static SimpleOidList schema_exclude_oids = {NULL, NULL};
118 
120 static SimpleOidList table_include_oids = {NULL, NULL};
122 static SimpleOidList table_exclude_oids = {NULL, NULL};
124 static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
125 
126 
127 char g_opaque_type[10]; /* name for the opaque type */
128 
129 /* placeholders for the delimiters for comments */
131 char g_comment_end[10];
132 
133 static const CatalogId nilCatalogId = {0, 0};
134 
135 static void help(const char *progname);
136 static void setup_connection(Archive *AH,
137  const char *dumpencoding, const char *dumpsnapshot,
138  char *use_role);
139 static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
140 static void expand_schema_name_patterns(Archive *fout,
141  SimpleStringList *patterns,
142  SimpleOidList *oids,
143  bool strict_names);
144 static void expand_table_name_patterns(Archive *fout,
145  SimpleStringList *patterns,
146  SimpleOidList *oids,
147  bool strict_names);
148 static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
149 static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
150 static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
151 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
152 static void dumpComment(Archive *fout, const char *target,
153  const char *namespace, const char *owner,
154  CatalogId catalogId, int subid, DumpId dumpId);
155 static int findComments(Archive *fout, Oid classoid, Oid objoid,
156  CommentItem **items);
157 static int collectComments(Archive *fout, CommentItem **items);
158 static void dumpSecLabel(Archive *fout, const char *target,
159  const char *namespace, const char *owner,
160  CatalogId catalogId, int subid, DumpId dumpId);
161 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
162  SecLabelItem **items);
163 static int collectSecLabels(Archive *fout, SecLabelItem **items);
164 static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
165 static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
166 static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
167 static void dumpType(Archive *fout, TypeInfo *tyinfo);
168 static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
169 static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
170 static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
171 static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
172 static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
173 static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
174 static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
175 static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
176 static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
177 static void dumpFunc(Archive *fout, FuncInfo *finfo);
178 static void dumpCast(Archive *fout, CastInfo *cast);
179 static void dumpTransform(Archive *fout, TransformInfo *transform);
180 static void dumpOpr(Archive *fout, OprInfo *oprinfo);
181 static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
182 static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
183 static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
184 static void dumpCollation(Archive *fout, CollInfo *collinfo);
185 static void dumpConversion(Archive *fout, ConvInfo *convinfo);
186 static void dumpRule(Archive *fout, RuleInfo *rinfo);
187 static void dumpAgg(Archive *fout, AggInfo *agginfo);
188 static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
189 static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
190 static void dumpTable(Archive *fout, TableInfo *tbinfo);
191 static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
192 static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
193 static void dumpSequence(Archive *fout, TableInfo *tbinfo);
194 static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
195 static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
196 static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
197 static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
198 static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
199 static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
200 static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
201 static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
202 static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
203 static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
204 static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
205 static void dumpUserMappings(Archive *fout,
206  const char *servername, const char *namespace,
207  const char *owner, CatalogId catalogId, DumpId dumpId);
208 static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
209 
210 static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
211  const char *type, const char *name, const char *subname,
212  const char *tag, const char *nspname, const char *owner,
213  const char *acls, const char *racls,
214  const char *initacls, const char *initracls);
215 
216 static void getDependencies(Archive *fout);
217 static void BuildArchiveDependencies(Archive *fout);
219  DumpId **dependencies, int *nDeps, int *allocDeps);
220 
222 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
223  DumpableObject *boundaryObjs);
224 
225 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
226 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind);
227 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids);
228 static void buildMatViewRefreshDependencies(Archive *fout);
229 static void getTableDataFKConstraints(void);
230 static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
231  bool is_agg);
232 static char *format_function_arguments_old(Archive *fout,
233  FuncInfo *finfo, int nallargs,
234  char **allargtypes,
235  char **argmodes,
236  char **argnames);
237 static char *format_function_signature(Archive *fout,
238  FuncInfo *finfo, bool honor_quotes);
239 static char *convertRegProcReference(Archive *fout,
240  const char *proc);
241 static char *convertOperatorReference(Archive *fout, const char *opr);
242 static char *convertTSFunction(Archive *fout, Oid funcOid);
243 static Oid findLastBuiltinOid_V71(Archive *fout, const char *);
244 static void selectSourceSchema(Archive *fout, const char *schemaName);
245 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
246 static void getBlobs(Archive *fout);
247 static void dumpBlob(Archive *fout, BlobInfo *binfo);
248 static int dumpBlobs(Archive *fout, void *arg);
249 static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
250 static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
251 static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
252 static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
253 static void dumpDatabase(Archive *AH);
254 static void dumpEncoding(Archive *AH);
255 static void dumpStdStrings(Archive *AH);
257  PQExpBuffer upgrade_buffer,
258  Oid pg_type_oid,
259  bool force_array_type);
261  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
262 static void binary_upgrade_set_pg_class_oids(Archive *fout,
263  PQExpBuffer upgrade_buffer,
264  Oid pg_class_oid, bool is_index);
265 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
266  DumpableObject *dobj,
267  const char *objlabel);
268 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
269 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
270 static bool nonemptyReloptions(const char *reloptions);
271 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
272  const char *prefix, Archive *fout);
273 static char *get_synchronized_snapshot(Archive *fout);
274 static void setupDumpWorker(Archive *AHX);
275 static TableInfo *getRootTableInfo(TableInfo *tbinfo);
276 
277 
278 int
279 main(int argc, char **argv)
280 {
281  int c;
282  const char *filename = NULL;
283  const char *format = "p";
284  TableInfo *tblinfo;
285  int numTables;
286  DumpableObject **dobjs;
287  int numObjs;
288  DumpableObject *boundaryObjs;
289  int i;
290  int optindex;
291  RestoreOptions *ropt;
292  Archive *fout; /* the script file */
293  const char *dumpencoding = NULL;
294  const char *dumpsnapshot = NULL;
295  char *use_role = NULL;
296  int numWorkers = 1;
297  trivalue prompt_password = TRI_DEFAULT;
298  int compressLevel = -1;
299  int plainText = 0;
300  ArchiveFormat archiveFormat = archUnknown;
301  ArchiveMode archiveMode;
302 
303  static DumpOptions dopt;
304 
305  static struct option long_options[] = {
306  {"data-only", no_argument, NULL, 'a'},
307  {"blobs", no_argument, NULL, 'b'},
308  {"no-blobs", no_argument, NULL, 'B'},
309  {"clean", no_argument, NULL, 'c'},
310  {"create", no_argument, NULL, 'C'},
311  {"dbname", required_argument, NULL, 'd'},
312  {"file", required_argument, NULL, 'f'},
313  {"format", required_argument, NULL, 'F'},
314  {"host", required_argument, NULL, 'h'},
315  {"jobs", 1, NULL, 'j'},
316  {"no-reconnect", no_argument, NULL, 'R'},
317  {"oids", no_argument, NULL, 'o'},
318  {"no-owner", no_argument, NULL, 'O'},
319  {"port", required_argument, NULL, 'p'},
320  {"schema", required_argument, NULL, 'n'},
321  {"exclude-schema", required_argument, NULL, 'N'},
322  {"schema-only", no_argument, NULL, 's'},
323  {"superuser", required_argument, NULL, 'S'},
324  {"table", required_argument, NULL, 't'},
325  {"exclude-table", required_argument, NULL, 'T'},
326  {"no-password", no_argument, NULL, 'w'},
327  {"password", no_argument, NULL, 'W'},
328  {"username", required_argument, NULL, 'U'},
329  {"verbose", no_argument, NULL, 'v'},
330  {"no-privileges", no_argument, NULL, 'x'},
331  {"no-acl", no_argument, NULL, 'x'},
332  {"compress", required_argument, NULL, 'Z'},
333  {"encoding", required_argument, NULL, 'E'},
334  {"help", no_argument, NULL, '?'},
335  {"version", no_argument, NULL, 'V'},
336 
337  /*
338  * the following options don't have an equivalent short option letter
339  */
340  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
341  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
342  {"column-inserts", no_argument, &dopt.column_inserts, 1},
343  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
344  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
345  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
346  {"exclude-table-data", required_argument, NULL, 4},
347  {"if-exists", no_argument, &dopt.if_exists, 1},
348  {"inserts", no_argument, &dopt.dump_inserts, 1},
349  {"lock-wait-timeout", required_argument, NULL, 2},
350  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
351  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
352  {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
353  {"role", required_argument, NULL, 3},
354  {"section", required_argument, NULL, 5},
355  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
356  {"snapshot", required_argument, NULL, 6},
357  {"strict-names", no_argument, &strict_names, 1},
358  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
359  {"no-publications", no_argument, &dopt.no_publications, 1},
360  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
361  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
362  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
363  {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
364  {"no-sync", no_argument, NULL, 7},
365 
366  {NULL, 0, NULL, 0}
367  };
368 
369  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
370 
371  /*
372  * Initialize what we need for parallel execution, especially for thread
373  * support on Windows.
374  */
376 
377  g_verbose = false;
378 
379  strcpy(g_comment_start, "-- ");
380  g_comment_end[0] = '\0';
381  strcpy(g_opaque_type, "opaque");
382 
383  progname = get_progname(argv[0]);
384 
385  if (argc > 1)
386  {
387  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
388  {
389  help(progname);
390  exit_nicely(0);
391  }
392  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
393  {
394  puts("pg_dump (PostgreSQL) " PG_VERSION);
395  exit_nicely(0);
396  }
397  }
398 
399  InitDumpOptions(&dopt);
400 
401  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:oOp:RsS:t:T:U:vwWxZ:",
402  long_options, &optindex)) != -1)
403  {
404  switch (c)
405  {
406  case 'a': /* Dump data only */
407  dopt.dataOnly = true;
408  break;
409 
410  case 'b': /* Dump blobs */
411  dopt.outputBlobs = true;
412  break;
413 
414  case 'B': /* Don't dump blobs */
415  dopt.dontOutputBlobs = true;
416  break;
417 
418  case 'c': /* clean (i.e., drop) schema prior to create */
419  dopt.outputClean = 1;
420  break;
421 
422  case 'C': /* Create DB */
423  dopt.outputCreateDB = 1;
424  break;
425 
426  case 'd': /* database name */
427  dopt.dbname = pg_strdup(optarg);
428  break;
429 
430  case 'E': /* Dump encoding */
431  dumpencoding = pg_strdup(optarg);
432  break;
433 
434  case 'f':
435  filename = pg_strdup(optarg);
436  break;
437 
438  case 'F':
439  format = pg_strdup(optarg);
440  break;
441 
442  case 'h': /* server host */
443  dopt.pghost = pg_strdup(optarg);
444  break;
445 
446  case 'j': /* number of dump jobs */
447  numWorkers = atoi(optarg);
448  break;
449 
450  case 'n': /* include schema(s) */
451  simple_string_list_append(&schema_include_patterns, optarg);
452  dopt.include_everything = false;
453  break;
454 
455  case 'N': /* exclude schema(s) */
456  simple_string_list_append(&schema_exclude_patterns, optarg);
457  break;
458 
459  case 'o': /* Dump oids */
460  dopt.oids = true;
461  break;
462 
463  case 'O': /* Don't reconnect to match owner */
464  dopt.outputNoOwner = 1;
465  break;
466 
467  case 'p': /* server port */
468  dopt.pgport = pg_strdup(optarg);
469  break;
470 
471  case 'R':
472  /* no-op, still accepted for backwards compatibility */
473  break;
474 
475  case 's': /* dump schema only */
476  dopt.schemaOnly = true;
477  break;
478 
479  case 'S': /* Username for superuser in plain text output */
481  break;
482 
483  case 't': /* include table(s) */
484  simple_string_list_append(&table_include_patterns, optarg);
485  dopt.include_everything = false;
486  break;
487 
488  case 'T': /* exclude table(s) */
489  simple_string_list_append(&table_exclude_patterns, optarg);
490  break;
491 
492  case 'U':
493  dopt.username = pg_strdup(optarg);
494  break;
495 
496  case 'v': /* verbose */
497  g_verbose = true;
498  break;
499 
500  case 'w':
501  prompt_password = TRI_NO;
502  break;
503 
504  case 'W':
505  prompt_password = TRI_YES;
506  break;
507 
508  case 'x': /* skip ACL dump */
509  dopt.aclsSkip = true;
510  break;
511 
512  case 'Z': /* Compression Level */
513  compressLevel = atoi(optarg);
514  if (compressLevel < 0 || compressLevel > 9)
515  {
516  write_msg(NULL, "compression level must be in range 0..9\n");
517  exit_nicely(1);
518  }
519  break;
520 
521  case 0:
522  /* This covers the long options. */
523  break;
524 
525  case 2: /* lock-wait-timeout */
527  break;
528 
529  case 3: /* SET ROLE */
530  use_role = pg_strdup(optarg);
531  break;
532 
533  case 4: /* exclude table(s) data */
534  simple_string_list_append(&tabledata_exclude_patterns, optarg);
535  break;
536 
537  case 5: /* section */
539  break;
540 
541  case 6: /* snapshot */
542  dumpsnapshot = pg_strdup(optarg);
543  break;
544 
545  case 7: /* no-sync */
546  dosync = false;
547  break;
548 
549  default:
550  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
551  exit_nicely(1);
552  }
553  }
554 
555  /*
556  * Non-option argument specifies database name as long as it wasn't
557  * already specified with -d / --dbname
558  */
559  if (optind < argc && dopt.dbname == NULL)
560  dopt.dbname = argv[optind++];
561 
562  /* Complain if any arguments remain */
563  if (optind < argc)
564  {
565  fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
566  progname, argv[optind]);
567  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
568  progname);
569  exit_nicely(1);
570  }
571 
572  /* --column-inserts implies --inserts */
573  if (dopt.column_inserts)
574  dopt.dump_inserts = 1;
575 
576  /*
577  * Binary upgrade mode implies dumping sequence data even in schema-only
578  * mode. This is not exposed as a separate option, but kept separate
579  * internally for clarity.
580  */
581  if (dopt.binary_upgrade)
582  dopt.sequence_data = 1;
583 
584  if (dopt.dataOnly && dopt.schemaOnly)
585  {
586  write_msg(NULL, "options -s/--schema-only and -a/--data-only cannot be used together\n");
587  exit_nicely(1);
588  }
589 
590  if (dopt.dataOnly && dopt.outputClean)
591  {
592  write_msg(NULL, "options -c/--clean and -a/--data-only cannot be used together\n");
593  exit_nicely(1);
594  }
595 
596  if (dopt.dump_inserts && dopt.oids)
597  {
598  write_msg(NULL, "options --inserts/--column-inserts and -o/--oids cannot be used together\n");
599  write_msg(NULL, "(The INSERT command cannot set OIDs.)\n");
600  exit_nicely(1);
601  }
602 
603  if (dopt.if_exists && !dopt.outputClean)
604  exit_horribly(NULL, "option --if-exists requires option -c/--clean\n");
605 
606  /* Identify archive format to emit */
607  archiveFormat = parseArchiveFormat(format, &archiveMode);
608 
609  /* archiveFormat specific setup */
610  if (archiveFormat == archNull)
611  plainText = 1;
612 
613  /* Custom and directory formats are compressed by default, others not */
614  if (compressLevel == -1)
615  {
616 #ifdef HAVE_LIBZ
617  if (archiveFormat == archCustom || archiveFormat == archDirectory)
618  compressLevel = Z_DEFAULT_COMPRESSION;
619  else
620 #endif
621  compressLevel = 0;
622  }
623 
624 #ifndef HAVE_LIBZ
625  if (compressLevel != 0)
626  write_msg(NULL, "WARNING: requested compression not available in this "
627  "installation -- archive will be uncompressed\n");
628  compressLevel = 0;
629 #endif
630 
631  /*
632  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
633  * parallel jobs because that's the maximum limit for the
634  * WaitForMultipleObjects() call.
635  */
636  if (numWorkers <= 0
637 #ifdef WIN32
638  || numWorkers > MAXIMUM_WAIT_OBJECTS
639 #endif
640  )
641  exit_horribly(NULL, "invalid number of parallel jobs\n");
642 
643  /* Parallel backup only in the directory archive format so far */
644  if (archiveFormat != archDirectory && numWorkers > 1)
645  exit_horribly(NULL, "parallel backup only supported by the directory format\n");
646 
647  /* Open the output file */
648  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
649  archiveMode, setupDumpWorker);
650 
651  /* Make dump options accessible right away */
652  SetArchiveOptions(fout, &dopt, NULL);
653 
654  /* Register the cleanup hook */
655  on_exit_close_archive(fout);
656 
657  /* Let the archiver know how noisy to be */
658  fout->verbose = g_verbose;
659 
660  /*
661  * We allow the server to be back to 8.0, and up to any minor release of
662  * our own major version. (See also version check in pg_dumpall.c.)
663  */
664  fout->minRemoteVersion = 80000;
665  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
666 
667  fout->numWorkers = numWorkers;
668 
669  /*
670  * Open the database using the Archiver, so it knows about it. Errors mean
671  * death.
672  */
673  ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
674  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
675 
676  /*
677  * Disable security label support if server version < v9.1.x (prevents
678  * access to nonexistent pg_seclabel catalog)
679  */
680  if (fout->remoteVersion < 90100)
681  dopt.no_security_labels = 1;
682 
683  /*
684  * On hot standbys, never try to dump unlogged table data, since it will
685  * just throw an error.
686  */
687  if (fout->isStandby)
688  dopt.no_unlogged_table_data = true;
689 
690  /* Select the appropriate subquery to convert user IDs to names */
691  if (fout->remoteVersion >= 80100)
692  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
693  else
694  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
695 
696  /* check the version for the synchronized snapshots feature */
697  if (numWorkers > 1 && fout->remoteVersion < 90200
698  && !dopt.no_synchronized_snapshots)
699  exit_horribly(NULL,
700  "Synchronized snapshots are not supported by this server version.\n"
701  "Run with --no-synchronized-snapshots instead if you do not need\n"
702  "synchronized snapshots.\n");
703 
704  /* check the version when a snapshot is explicitly specified by user */
705  if (dumpsnapshot && fout->remoteVersion < 90200)
706  exit_horribly(NULL,
707  "Exported snapshots are not supported by this server version.\n");
708 
709  /*
710  * Find the last built-in OID, if needed (prior to 8.1)
711  *
712  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
713  */
714  if (fout->remoteVersion < 80100)
716  PQdb(GetConnection(fout)));
717  else
719 
720  if (g_verbose)
721  write_msg(NULL, "last built-in OID is %u\n", g_last_builtin_oid);
722 
723  /* Expand schema selection patterns into OID lists */
724  if (schema_include_patterns.head != NULL)
725  {
726  expand_schema_name_patterns(fout, &schema_include_patterns,
727  &schema_include_oids,
728  strict_names);
729  if (schema_include_oids.head == NULL)
730  exit_horribly(NULL, "no matching schemas were found\n");
731  }
732  expand_schema_name_patterns(fout, &schema_exclude_patterns,
733  &schema_exclude_oids,
734  false);
735  /* non-matching exclusion patterns aren't an error */
736 
737  /* Expand table selection patterns into OID lists */
738  if (table_include_patterns.head != NULL)
739  {
740  expand_table_name_patterns(fout, &table_include_patterns,
741  &table_include_oids,
742  strict_names);
743  if (table_include_oids.head == NULL)
744  exit_horribly(NULL, "no matching tables were found\n");
745  }
746  expand_table_name_patterns(fout, &table_exclude_patterns,
747  &table_exclude_oids,
748  false);
749 
750  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
751  &tabledata_exclude_oids,
752  false);
753 
754  /* non-matching exclusion patterns aren't an error */
755 
756  /*
757  * Dumping blobs is the default for dumps where an inclusion switch is not
758  * used (an "include everything" dump). -B can be used to exclude blobs
759  * from those dumps. -b can be used to include blobs even when an
760  * inclusion switch is used.
761  *
762  * -s means "schema only" and blobs are data, not schema, so we never
763  * include blobs when -s is used.
764  */
765  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
766  dopt.outputBlobs = true;
767 
768  /*
769  * Now scan the database and create DumpableObject structs for all the
770  * objects we intend to dump.
771  */
772  tblinfo = getSchemaData(fout, &numTables);
773 
774  if (fout->remoteVersion < 80400)
775  guessConstraintInheritance(tblinfo, numTables);
776 
777  if (!dopt.schemaOnly)
778  {
779  getTableData(&dopt, tblinfo, numTables, dopt.oids, 0);
781  if (dopt.dataOnly)
783  }
784 
785  if (dopt.schemaOnly && dopt.sequence_data)
786  getTableData(&dopt, tblinfo, numTables, dopt.oids, RELKIND_SEQUENCE);
787 
788  /*
789  * In binary-upgrade mode, we do not have to worry about the actual blob
790  * data or the associated metadata that resides in the pg_largeobject and
791  * pg_largeobject_metadata tables, respectivly.
792  *
793  * However, we do need to collect blob information as there may be
794  * comments or other information on blobs that we do need to dump out.
795  */
796  if (dopt.outputBlobs || dopt.binary_upgrade)
797  getBlobs(fout);
798 
799  /*
800  * Collect dependency data to assist in ordering the objects.
801  */
802  getDependencies(fout);
803 
804  /* Lastly, create dummy objects to represent the section boundaries */
805  boundaryObjs = createBoundaryObjects();
806 
807  /* Get pointers to all the known DumpableObjects */
808  getDumpableObjects(&dobjs, &numObjs);
809 
810  /*
811  * Add dummy dependencies to enforce the dump section ordering.
812  */
813  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
814 
815  /*
816  * Sort the objects into a safe dump order (no forward references).
817  *
818  * We rely on dependency information to help us determine a safe order, so
819  * the initial sort is mostly for cosmetic purposes: we sort by name to
820  * ensure that logically identical schemas will dump identically.
821  */
822  sortDumpableObjectsByTypeName(dobjs, numObjs);
823 
824  /* If we do a parallel dump, we want the largest tables to go first */
825  if (archiveFormat == archDirectory && numWorkers > 1)
826  sortDataAndIndexObjectsBySize(dobjs, numObjs);
827 
828  sortDumpableObjects(dobjs, numObjs,
829  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
830 
831  /*
832  * Create archive TOC entries for all the objects to be dumped, in a safe
833  * order.
834  */
835 
836  /* First the special ENCODING and STDSTRINGS entries. */
837  dumpEncoding(fout);
838  dumpStdStrings(fout);
839 
840  /* The database item is always next, unless we don't want it at all */
841  if (dopt.include_everything && !dopt.dataOnly)
842  dumpDatabase(fout);
843 
844  /* Now the rearrangeable objects. */
845  for (i = 0; i < numObjs; i++)
846  dumpDumpableObject(fout, dobjs[i]);
847 
848  /*
849  * Set up options info to ensure we dump what we want.
850  */
851  ropt = NewRestoreOptions();
852  ropt->filename = filename;
853 
854  /* if you change this list, see dumpOptionsFromRestoreOptions */
855  ropt->dropSchema = dopt.outputClean;
856  ropt->dataOnly = dopt.dataOnly;
857  ropt->schemaOnly = dopt.schemaOnly;
858  ropt->if_exists = dopt.if_exists;
859  ropt->column_inserts = dopt.column_inserts;
860  ropt->dumpSections = dopt.dumpSections;
861  ropt->aclsSkip = dopt.aclsSkip;
862  ropt->superuser = dopt.outputSuperuser;
863  ropt->createDB = dopt.outputCreateDB;
864  ropt->noOwner = dopt.outputNoOwner;
865  ropt->noTablespace = dopt.outputNoTablespaces;
866  ropt->disable_triggers = dopt.disable_triggers;
867  ropt->use_setsessauth = dopt.use_setsessauth;
869  ropt->dump_inserts = dopt.dump_inserts;
870  ropt->no_publications = dopt.no_publications;
872  ropt->no_subscriptions = dopt.no_subscriptions;
873  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
876  ropt->sequence_data = dopt.sequence_data;
877  ropt->binary_upgrade = dopt.binary_upgrade;
878 
879  if (compressLevel == -1)
880  ropt->compression = 0;
881  else
882  ropt->compression = compressLevel;
883 
884  ropt->suppressDumpWarnings = true; /* We've already shown them */
885 
886  SetArchiveOptions(fout, &dopt, ropt);
887 
888  /* Mark which entries should be output */
890 
891  /*
892  * The archive's TOC entries are now marked as to which ones will actually
893  * be output, so we can set up their dependency lists properly. This isn't
894  * necessary for plain-text output, though.
895  */
896  if (!plainText)
898 
899  /*
900  * And finally we can do the actual output.
901  *
902  * Note: for non-plain-text output formats, the output file is written
903  * inside CloseArchive(). This is, um, bizarre; but not worth changing
904  * right now.
905  */
906  if (plainText)
907  RestoreArchive(fout);
908 
909  CloseArchive(fout);
910 
911  exit_nicely(0);
912 }
913 
914 
915 static void
916 help(const char *progname)
917 {
918  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
919  printf(_("Usage:\n"));
920  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
921 
922  printf(_("\nGeneral options:\n"));
923  printf(_(" -f, --file=FILENAME output file or directory name\n"));
924  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
925  " plain text (default))\n"));
926  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
927  printf(_(" -v, --verbose verbose mode\n"));
928  printf(_(" -V, --version output version information, then exit\n"));
929  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
930  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
931  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
932  printf(_(" -?, --help show this help, then exit\n"));
933 
934  printf(_("\nOptions controlling the output content:\n"));
935  printf(_(" -a, --data-only dump only the data, not the schema\n"));
936  printf(_(" -b, --blobs include large objects in dump\n"));
937  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
938  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
939  printf(_(" -C, --create include commands to create database in dump\n"));
940  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
941  printf(_(" -n, --schema=SCHEMA dump the named schema(s) only\n"));
942  printf(_(" -N, --exclude-schema=SCHEMA do NOT dump the named schema(s)\n"));
943  printf(_(" -o, --oids include OIDs in dump\n"));
944  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
945  " plain-text format\n"));
946  printf(_(" -s, --schema-only dump only the schema, no data\n"));
947  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
948  printf(_(" -t, --table=TABLE dump the named table(s) only\n"));
949  printf(_(" -T, --exclude-table=TABLE do NOT dump the named table(s)\n"));
950  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
951  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
952  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
953  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
954  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
955  printf(_(" --enable-row-security enable row security (dump only content user has\n"
956  " access to)\n"));
957  printf(_(" --exclude-table-data=TABLE do NOT dump data for the named table(s)\n"));
958  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
959  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
960  printf(_(" --no-publications do not dump publications\n"));
961  printf(_(" --no-security-labels do not dump security label assignments\n"));
962  printf(_(" --no-subscriptions do not dump subscriptions\n"));
963  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
964  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
965  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
966  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
967  printf(_(" --load-via-partition-root load partitions via the root table\n"));
968  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
969  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
970  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
971  printf(_(" --strict-names require table and/or schema include patterns to\n"
972  " match at least one entity each\n"));
973  printf(_(" --use-set-session-authorization\n"
974  " use SET SESSION AUTHORIZATION commands instead of\n"
975  " ALTER OWNER commands to set ownership\n"));
976 
977  printf(_("\nConnection options:\n"));
978  printf(_(" -d, --dbname=DBNAME database to dump\n"));
979  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
980  printf(_(" -p, --port=PORT database server port number\n"));
981  printf(_(" -U, --username=NAME connect as specified database user\n"));
982  printf(_(" -w, --no-password never prompt for password\n"));
983  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
984  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
985 
986  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
987  "variable value is used.\n\n"));
988  printf(_("Report bugs to <pgsql-bugs@postgresql.org>.\n"));
989 }
990 
991 static void
992 setup_connection(Archive *AH, const char *dumpencoding,
993  const char *dumpsnapshot, char *use_role)
994 {
995  DumpOptions *dopt = AH->dopt;
996  PGconn *conn = GetConnection(AH);
997  const char *std_strings;
998 
999  /*
1000  * Set the client encoding if requested.
1001  */
1002  if (dumpencoding)
1003  {
1004  if (PQsetClientEncoding(conn, dumpencoding) < 0)
1005  exit_horribly(NULL, "invalid client encoding \"%s\" specified\n",
1006  dumpencoding);
1007  }
1008 
1009  /*
1010  * Get the active encoding and the standard_conforming_strings setting, so
1011  * we know how to escape strings.
1012  */
1013  AH->encoding = PQclientEncoding(conn);
1014 
1015  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1016  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1017 
1018  /*
1019  * Set the role if requested. In a parallel dump worker, we'll be passed
1020  * use_role == NULL, but AH->use_role is already set (if user specified it
1021  * originally) and we should use that.
1022  */
1023  if (!use_role && AH->use_role)
1024  use_role = AH->use_role;
1025 
1026  /* Set the role if requested */
1027  if (use_role && AH->remoteVersion >= 80100)
1028  {
1029  PQExpBuffer query = createPQExpBuffer();
1030 
1031  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1032  ExecuteSqlStatement(AH, query->data);
1033  destroyPQExpBuffer(query);
1034 
1035  /* save it for possible later use by parallel workers */
1036  if (!AH->use_role)
1037  AH->use_role = pg_strdup(use_role);
1038  }
1039 
1040  /* Set the datestyle to ISO to ensure the dump's portability */
1041  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1042 
1043  /* Likewise, avoid using sql_standard intervalstyle */
1044  if (AH->remoteVersion >= 80400)
1045  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1046 
1047  /*
1048  * Set extra_float_digits so that we can dump float data exactly (given
1049  * correctly implemented float I/O code, anyway)
1050  */
1051  if (AH->remoteVersion >= 90000)
1052  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1053  else
1054  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1055 
1056  /*
1057  * If synchronized scanning is supported, disable it, to prevent
1058  * unpredictable changes in row ordering across a dump and reload.
1059  */
1060  if (AH->remoteVersion >= 80300)
1061  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1062 
1063  /*
1064  * Disable timeouts if supported.
1065  */
1066  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1067  if (AH->remoteVersion >= 90300)
1068  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1069  if (AH->remoteVersion >= 90600)
1070  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1071 
1072  /*
1073  * Quote all identifiers, if requested.
1074  */
1075  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1076  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1077 
1078  /*
1079  * Adjust row-security mode, if supported.
1080  */
1081  if (AH->remoteVersion >= 90500)
1082  {
1083  if (dopt->enable_row_security)
1084  ExecuteSqlStatement(AH, "SET row_security = on");
1085  else
1086  ExecuteSqlStatement(AH, "SET row_security = off");
1087  }
1088 
1089  /*
1090  * Start transaction-snapshot mode transaction to dump consistent data.
1091  */
1092  ExecuteSqlStatement(AH, "BEGIN");
1093  if (AH->remoteVersion >= 90100)
1094  {
1095  /*
1096  * To support the combination of serializable_deferrable with the jobs
1097  * option we use REPEATABLE READ for the worker connections that are
1098  * passed a snapshot. As long as the snapshot is acquired in a
1099  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1100  * REPEATABLE READ transaction provides the appropriate integrity
1101  * guarantees. This is a kluge, but safe for back-patching.
1102  */
1103  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1105  "SET TRANSACTION ISOLATION LEVEL "
1106  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1107  else
1109  "SET TRANSACTION ISOLATION LEVEL "
1110  "REPEATABLE READ, READ ONLY");
1111  }
1112  else
1113  {
1115  "SET TRANSACTION ISOLATION LEVEL "
1116  "SERIALIZABLE, READ ONLY");
1117  }
1118 
1119  /*
1120  * If user specified a snapshot to use, select that. In a parallel dump
1121  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1122  * is already set (if the server can handle it) and we should use that.
1123  */
1124  if (dumpsnapshot)
1125  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1126 
1127  if (AH->sync_snapshot_id)
1128  {
1129  PQExpBuffer query = createPQExpBuffer();
1130 
1131  appendPQExpBuffer(query, "SET TRANSACTION SNAPSHOT ");
1132  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1133  ExecuteSqlStatement(AH, query->data);
1134  destroyPQExpBuffer(query);
1135  }
1136  else if (AH->numWorkers > 1 &&
1137  AH->remoteVersion >= 90200 &&
1139  {
1140  if (AH->isStandby && AH->remoteVersion < 100000)
1141  exit_horribly(NULL,
1142  "Synchronized snapshots on standby servers are not supported by this server version.\n"
1143  "Run with --no-synchronized-snapshots instead if you do not need\n"
1144  "synchronized snapshots.\n");
1145 
1146 
1148  }
1149 }
1150 
1151 /* Set up connection for a parallel worker process */
1152 static void
1154 {
1155  /*
1156  * We want to re-select all the same values the master connection is
1157  * using. We'll have inherited directly-usable values in
1158  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1159  * inherited encoding value back to a string to pass to setup_connection.
1160  */
1161  setup_connection(AH,
1163  NULL,
1164  NULL);
1165 }
1166 
1167 static char *
1169 {
1170  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1171  char *result;
1172  PGresult *res;
1173 
1174  res = ExecuteSqlQueryForSingleRow(fout, query);
1175  result = pg_strdup(PQgetvalue(res, 0, 0));
1176  PQclear(res);
1177 
1178  return result;
1179 }
1180 
1181 static ArchiveFormat
1183 {
1184  ArchiveFormat archiveFormat;
1185 
1186  *mode = archModeWrite;
1187 
1188  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1189  {
1190  /* This is used by pg_dumpall, and is not documented */
1191  archiveFormat = archNull;
1192  *mode = archModeAppend;
1193  }
1194  else if (pg_strcasecmp(format, "c") == 0)
1195  archiveFormat = archCustom;
1196  else if (pg_strcasecmp(format, "custom") == 0)
1197  archiveFormat = archCustom;
1198  else if (pg_strcasecmp(format, "d") == 0)
1199  archiveFormat = archDirectory;
1200  else if (pg_strcasecmp(format, "directory") == 0)
1201  archiveFormat = archDirectory;
1202  else if (pg_strcasecmp(format, "p") == 0)
1203  archiveFormat = archNull;
1204  else if (pg_strcasecmp(format, "plain") == 0)
1205  archiveFormat = archNull;
1206  else if (pg_strcasecmp(format, "t") == 0)
1207  archiveFormat = archTar;
1208  else if (pg_strcasecmp(format, "tar") == 0)
1209  archiveFormat = archTar;
1210  else
1211  exit_horribly(NULL, "invalid output format \"%s\" specified\n", format);
1212  return archiveFormat;
1213 }
1214 
1215 /*
1216  * Find the OIDs of all schemas matching the given list of patterns,
1217  * and append them to the given OID list.
1218  */
1219 static void
1221  SimpleStringList *patterns,
1222  SimpleOidList *oids,
1223  bool strict_names)
1224 {
1225  PQExpBuffer query;
1226  PGresult *res;
1227  SimpleStringListCell *cell;
1228  int i;
1229 
1230  if (patterns->head == NULL)
1231  return; /* nothing to do */
1232 
1233  query = createPQExpBuffer();
1234 
1235  /*
1236  * The loop below runs multiple SELECTs might sometimes result in
1237  * duplicate entries in the OID list, but we don't care.
1238  */
1239 
1240  for (cell = patterns->head; cell; cell = cell->next)
1241  {
1242  appendPQExpBuffer(query,
1243  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1244  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1245  false, NULL, "n.nspname", NULL, NULL);
1246 
1247  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1248  if (strict_names && PQntuples(res) == 0)
1249  exit_horribly(NULL, "no matching schemas were found for pattern \"%s\"\n", cell->val);
1250 
1251  for (i = 0; i < PQntuples(res); i++)
1252  {
1253  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1254  }
1255 
1256  PQclear(res);
1257  resetPQExpBuffer(query);
1258  }
1259 
1260  destroyPQExpBuffer(query);
1261 }
1262 
1263 /*
1264  * Find the OIDs of all tables matching the given list of patterns,
1265  * and append them to the given OID list.
1266  */
1267 static void
1269  SimpleStringList *patterns, SimpleOidList *oids,
1270  bool strict_names)
1271 {
1272  PQExpBuffer query;
1273  PGresult *res;
1274  SimpleStringListCell *cell;
1275  int i;
1276 
1277  if (patterns->head == NULL)
1278  return; /* nothing to do */
1279 
1280  query = createPQExpBuffer();
1281 
1282  /*
1283  * this might sometimes result in duplicate entries in the OID list, but
1284  * we don't care.
1285  */
1286 
1287  for (cell = patterns->head; cell; cell = cell->next)
1288  {
1289  appendPQExpBuffer(query,
1290  "SELECT c.oid"
1291  "\nFROM pg_catalog.pg_class c"
1292  "\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace"
1293  "\nWHERE c.relkind in ('%c', '%c', '%c', '%c', '%c', '%c')\n",
1297  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1298  false, "n.nspname", "c.relname", NULL,
1299  "pg_catalog.pg_table_is_visible(c.oid)");
1300 
1301  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1302  if (strict_names && PQntuples(res) == 0)
1303  exit_horribly(NULL, "no matching tables were found for pattern \"%s\"\n", cell->val);
1304 
1305  for (i = 0; i < PQntuples(res); i++)
1306  {
1307  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1308  }
1309 
1310  PQclear(res);
1311  resetPQExpBuffer(query);
1312  }
1313 
1314  destroyPQExpBuffer(query);
1315 }
1316 
1317 /*
1318  * checkExtensionMembership
1319  * Determine whether object is an extension member, and if so,
1320  * record an appropriate dependency and set the object's dump flag.
1321  *
1322  * It's important to call this for each object that could be an extension
1323  * member. Generally, we integrate this with determining the object's
1324  * to-be-dumped-ness, since extension membership overrides other rules for that.
1325  *
1326  * Returns true if object is an extension member, else false.
1327  */
1328 static bool
1330 {
1331  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1332 
1333  if (ext == NULL)
1334  return false;
1335 
1336  dobj->ext_member = true;
1337 
1338  /* Record dependency so that getDependencies needn't deal with that */
1339  addObjectDependency(dobj, ext->dobj.dumpId);
1340 
1341  /*
1342  * In 9.6 and above, mark the member object to have any non-initial ACL,
1343  * policies, and security labels dumped.
1344  *
1345  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1346  * extract the information about the object. We don't provide support for
1347  * initial policies and security labels and it seems unlikely for those to
1348  * ever exist, but we may have to revisit this later.
1349  *
1350  * Prior to 9.6, we do not include any extension member components.
1351  *
1352  * In binary upgrades, we still dump all components of the members
1353  * individually, since the idea is to exactly reproduce the database
1354  * contents rather than replace the extension contents with something
1355  * different.
1356  */
1357  if (fout->dopt->binary_upgrade)
1358  dobj->dump = ext->dobj.dump;
1359  else
1360  {
1361  if (fout->remoteVersion < 90600)
1362  dobj->dump = DUMP_COMPONENT_NONE;
1363  else
1364  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1367  }
1368 
1369  return true;
1370 }
1371 
1372 /*
1373  * selectDumpableNamespace: policy-setting subroutine
1374  * Mark a namespace as to be dumped or not
1375  */
1376 static void
1378 {
1379  /*
1380  * If specific tables are being dumped, do not dump any complete
1381  * namespaces. If specific namespaces are being dumped, dump just those
1382  * namespaces. Otherwise, dump all non-system namespaces.
1383  */
1384  if (table_include_oids.head != NULL)
1385  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1386  else if (schema_include_oids.head != NULL)
1387  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1388  simple_oid_list_member(&schema_include_oids,
1389  nsinfo->dobj.catId.oid) ?
1391  else if (fout->remoteVersion >= 90600 &&
1392  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1393  {
1394  /*
1395  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1396  * they are interesting (and not the original ACLs which were set at
1397  * initdb time, see pg_init_privs).
1398  */
1399  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1400  }
1401  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1402  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1403  {
1404  /* Other system schemas don't get dumped */
1405  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1406  }
1407  else
1408  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1409 
1410  /*
1411  * In any case, a namespace can be excluded by an exclusion switch
1412  */
1413  if (nsinfo->dobj.dump_contains &&
1414  simple_oid_list_member(&schema_exclude_oids,
1415  nsinfo->dobj.catId.oid))
1416  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1417 
1418  /*
1419  * If the schema belongs to an extension, allow extension membership to
1420  * override the dump decision for the schema itself. However, this does
1421  * not change dump_contains, so this won't change what we do with objects
1422  * within the schema. (If they belong to the extension, they'll get
1423  * suppressed by it, otherwise not.)
1424  */
1425  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1426 }
1427 
1428 /*
1429  * selectDumpableTable: policy-setting subroutine
1430  * Mark a table as to be dumped or not
1431  */
1432 static void
1434 {
1435  if (checkExtensionMembership(&tbinfo->dobj, fout))
1436  return; /* extension membership overrides all else */
1437 
1438  /*
1439  * If specific tables are being dumped, dump just those tables; else, dump
1440  * according to the parent namespace's dump flag.
1441  */
1442  if (table_include_oids.head != NULL)
1443  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1444  tbinfo->dobj.catId.oid) ?
1446  else
1447  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1448 
1449  /*
1450  * In any case, a table can be excluded by an exclusion switch
1451  */
1452  if (tbinfo->dobj.dump &&
1453  simple_oid_list_member(&table_exclude_oids,
1454  tbinfo->dobj.catId.oid))
1455  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1456 }
1457 
1458 /*
1459  * selectDumpableType: policy-setting subroutine
1460  * Mark a type as to be dumped or not
1461  *
1462  * If it's a table's rowtype or an autogenerated array type, we also apply a
1463  * special type code to facilitate sorting into the desired order. (We don't
1464  * want to consider those to be ordinary types because that would bring tables
1465  * up into the datatype part of the dump order.) We still set the object's
1466  * dump flag; that's not going to cause the dummy type to be dumped, but we
1467  * need it so that casts involving such types will be dumped correctly -- see
1468  * dumpCast. This means the flag should be set the same as for the underlying
1469  * object (the table or base type).
1470  */
1471 static void
1473 {
1474  /* skip complex types, except for standalone composite types */
1475  if (OidIsValid(tyinfo->typrelid) &&
1477  {
1478  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1479 
1480  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1481  if (tytable != NULL)
1482  tyinfo->dobj.dump = tytable->dobj.dump;
1483  else
1484  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1485  return;
1486  }
1487 
1488  /* skip auto-generated array types */
1489  if (tyinfo->isArray)
1490  {
1491  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1492 
1493  /*
1494  * Fall through to set the dump flag; we assume that the subsequent
1495  * rules will do the same thing as they would for the array's base
1496  * type. (We cannot reliably look up the base type here, since
1497  * getTypes may not have processed it yet.)
1498  */
1499  }
1500 
1501  if (checkExtensionMembership(&tyinfo->dobj, fout))
1502  return; /* extension membership overrides all else */
1503 
1504  /* Dump based on if the contents of the namespace are being dumped */
1505  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1506 }
1507 
1508 /*
1509  * selectDumpableDefaultACL: policy-setting subroutine
1510  * Mark a default ACL as to be dumped or not
1511  *
1512  * For per-schema default ACLs, dump if the schema is to be dumped.
1513  * Otherwise dump if we are dumping "everything". Note that dataOnly
1514  * and aclsSkip are checked separately.
1515  */
1516 static void
1518 {
1519  /* Default ACLs can't be extension members */
1520 
1521  if (dinfo->dobj.namespace)
1522  /* default ACLs are considered part of the namespace */
1523  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1524  else
1525  dinfo->dobj.dump = dopt->include_everything ?
1527 }
1528 
1529 /*
1530  * selectDumpableCast: policy-setting subroutine
1531  * Mark a cast as to be dumped or not
1532  *
1533  * Casts do not belong to any particular namespace (since they haven't got
1534  * names), nor do they have identifiable owners. To distinguish user-defined
1535  * casts from built-in ones, we must resort to checking whether the cast's
1536  * OID is in the range reserved for initdb.
1537  */
1538 static void
1540 {
1541  if (checkExtensionMembership(&cast->dobj, fout))
1542  return; /* extension membership overrides all else */
1543 
1544  /*
1545  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1546  * support ACLs currently.
1547  */
1548  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1549  cast->dobj.dump = DUMP_COMPONENT_NONE;
1550  else
1551  cast->dobj.dump = fout->dopt->include_everything ?
1553 }
1554 
1555 /*
1556  * selectDumpableProcLang: policy-setting subroutine
1557  * Mark a procedural language as to be dumped or not
1558  *
1559  * Procedural languages do not belong to any particular namespace. To
1560  * identify built-in languages, we must resort to checking whether the
1561  * language's OID is in the range reserved for initdb.
1562  */
1563 static void
1565 {
1566  if (checkExtensionMembership(&plang->dobj, fout))
1567  return; /* extension membership overrides all else */
1568 
1569  /*
1570  * Only include procedural languages when we are dumping everything.
1571  *
1572  * For from-initdb procedural languages, only include ACLs, as we do for
1573  * the pg_catalog namespace. We need this because procedural languages do
1574  * not live in any namespace.
1575  */
1576  if (!fout->dopt->include_everything)
1577  plang->dobj.dump = DUMP_COMPONENT_NONE;
1578  else
1579  {
1580  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1581  plang->dobj.dump = fout->remoteVersion < 90600 ?
1583  else
1584  plang->dobj.dump = DUMP_COMPONENT_ALL;
1585  }
1586 }
1587 
1588 /*
1589  * selectDumpableAccessMethod: policy-setting subroutine
1590  * Mark an access method as to be dumped or not
1591  *
1592  * Access methods do not belong to any particular namespace. To identify
1593  * built-in access methods, we must resort to checking whether the
1594  * method's OID is in the range reserved for initdb.
1595  */
1596 static void
1598 {
1599  if (checkExtensionMembership(&method->dobj, fout))
1600  return; /* extension membership overrides all else */
1601 
1602  /*
1603  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1604  * they do not support ACLs currently.
1605  */
1606  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1607  method->dobj.dump = DUMP_COMPONENT_NONE;
1608  else
1609  method->dobj.dump = fout->dopt->include_everything ?
1611 }
1612 
1613 /*
1614  * selectDumpableExtension: policy-setting subroutine
1615  * Mark an extension as to be dumped or not
1616  *
1617  * Normally, we dump all extensions, or none of them if include_everything
1618  * is false (i.e., a --schema or --table switch was given). However, in
1619  * binary-upgrade mode it's necessary to skip built-in extensions, since we
1620  * assume those will already be installed in the target database. We identify
1621  * such extensions by their having OIDs in the range reserved for initdb.
1622  */
1623 static void
1625 {
1626  /*
1627  * Use DUMP_COMPONENT_ACL for from-initdb extensions, to allow users to
1628  * change permissions on those objects, if they wish to, and have those
1629  * changes preserved.
1630  */
1631  if (dopt->binary_upgrade && extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1632  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1633  else
1634  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1637 }
1638 
1639 /*
1640  * selectDumpablePublicationTable: policy-setting subroutine
1641  * Mark a publication table as to be dumped or not
1642  *
1643  * Publication tables have schemas, but those are ignored in decision making,
1644  * because publications are only dumped when we are dumping everything.
1645  */
1646 static void
1648 {
1649  if (checkExtensionMembership(dobj, fout))
1650  return; /* extension membership overrides all else */
1651 
1652  dobj->dump = fout->dopt->include_everything ?
1654 }
1655 
1656 /*
1657  * selectDumpableObject: policy-setting subroutine
1658  * Mark a generic dumpable object as to be dumped or not
1659  *
1660  * Use this only for object types without a special-case routine above.
1661  */
1662 static void
1664 {
1665  if (checkExtensionMembership(dobj, fout))
1666  return; /* extension membership overrides all else */
1667 
1668  /*
1669  * Default policy is to dump if parent namespace is dumpable, or for
1670  * non-namespace-associated items, dump if we're dumping "everything".
1671  */
1672  if (dobj->namespace)
1673  dobj->dump = dobj->namespace->dobj.dump_contains;
1674  else
1675  dobj->dump = fout->dopt->include_everything ?
1677 }
1678 
1679 /*
1680  * Dump a table's contents for loading using the COPY command
1681  * - this routine is called by the Archiver when it wants the table
1682  * to be dumped.
1683  */
1684 
1685 static int
1686 dumpTableData_copy(Archive *fout, void *dcontext)
1687 {
1688  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1689  TableInfo *tbinfo = tdinfo->tdtable;
1690  const char *classname = tbinfo->dobj.name;
1691  const bool hasoids = tbinfo->hasoids;
1692  const bool oids = tdinfo->oids;
1694 
1695  /*
1696  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1697  * which uses it already.
1698  */
1699  PQExpBuffer clistBuf = createPQExpBuffer();
1700  PGconn *conn = GetConnection(fout);
1701  PGresult *res;
1702  int ret;
1703  char *copybuf;
1704  const char *column_list;
1705 
1706  if (g_verbose)
1707  write_msg(NULL, "dumping contents of table \"%s.%s\"\n",
1708  tbinfo->dobj.namespace->dobj.name, classname);
1709 
1710  /*
1711  * Make sure we are in proper schema. We will qualify the table name
1712  * below anyway (in case its name conflicts with a pg_catalog table); but
1713  * this ensures reproducible results in case the table contains regproc,
1714  * regclass, etc columns.
1715  */
1716  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
1717 
1718  /*
1719  * Specify the column list explicitly so that we have no possibility of
1720  * retrieving data in the wrong column order. (The default column
1721  * ordering of COPY will not be what we want in certain corner cases
1722  * involving ADD COLUMN and inheritance.)
1723  */
1724  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1725 
1726  if (oids && hasoids)
1727  {
1728  appendPQExpBuffer(q, "COPY %s %s WITH OIDS TO stdout;",
1730  tbinfo->dobj.namespace->dobj.name,
1731  classname),
1732  column_list);
1733  }
1734  else if (tdinfo->filtercond)
1735  {
1736  /* Note: this syntax is only supported in 8.2 and up */
1737  appendPQExpBufferStr(q, "COPY (SELECT ");
1738  /* klugery to get rid of parens in column list */
1739  if (strlen(column_list) > 2)
1740  {
1741  appendPQExpBufferStr(q, column_list + 1);
1742  q->data[q->len - 1] = ' ';
1743  }
1744  else
1745  appendPQExpBufferStr(q, "* ");
1746  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1748  tbinfo->dobj.namespace->dobj.name,
1749  classname),
1750  tdinfo->filtercond);
1751  }
1752  else
1753  {
1754  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1756  tbinfo->dobj.namespace->dobj.name,
1757  classname),
1758  column_list);
1759  }
1760  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1761  PQclear(res);
1762  destroyPQExpBuffer(clistBuf);
1763 
1764  for (;;)
1765  {
1766  ret = PQgetCopyData(conn, &copybuf, 0);
1767 
1768  if (ret < 0)
1769  break; /* done or error */
1770 
1771  if (copybuf)
1772  {
1773  WriteData(fout, copybuf, ret);
1774  PQfreemem(copybuf);
1775  }
1776 
1777  /* ----------
1778  * THROTTLE:
1779  *
1780  * There was considerable discussion in late July, 2000 regarding
1781  * slowing down pg_dump when backing up large tables. Users with both
1782  * slow & fast (multi-processor) machines experienced performance
1783  * degradation when doing a backup.
1784  *
1785  * Initial attempts based on sleeping for a number of ms for each ms
1786  * of work were deemed too complex, then a simple 'sleep in each loop'
1787  * implementation was suggested. The latter failed because the loop
1788  * was too tight. Finally, the following was implemented:
1789  *
1790  * If throttle is non-zero, then
1791  * See how long since the last sleep.
1792  * Work out how long to sleep (based on ratio).
1793  * If sleep is more than 100ms, then
1794  * sleep
1795  * reset timer
1796  * EndIf
1797  * EndIf
1798  *
1799  * where the throttle value was the number of ms to sleep per ms of
1800  * work. The calculation was done in each loop.
1801  *
1802  * Most of the hard work is done in the backend, and this solution
1803  * still did not work particularly well: on slow machines, the ratio
1804  * was 50:1, and on medium paced machines, 1:1, and on fast
1805  * multi-processor machines, it had little or no effect, for reasons
1806  * that were unclear.
1807  *
1808  * Further discussion ensued, and the proposal was dropped.
1809  *
1810  * For those people who want this feature, it can be implemented using
1811  * gettimeofday in each loop, calculating the time since last sleep,
1812  * multiplying that by the sleep ratio, then if the result is more
1813  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1814  * function to sleep for a subsecond period ie.
1815  *
1816  * select(0, NULL, NULL, NULL, &tvi);
1817  *
1818  * This will return after the interval specified in the structure tvi.
1819  * Finally, call gettimeofday again to save the 'last sleep time'.
1820  * ----------
1821  */
1822  }
1823  archprintf(fout, "\\.\n\n\n");
1824 
1825  if (ret == -2)
1826  {
1827  /* copy data transfer failed */
1828  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.\n", classname);
1829  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1830  write_msg(NULL, "The command was: %s\n", q->data);
1831  exit_nicely(1);
1832  }
1833 
1834  /* Check command status and return to normal libpq state */
1835  res = PQgetResult(conn);
1836  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1837  {
1838  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetResult() failed.\n", classname);
1839  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1840  write_msg(NULL, "The command was: %s\n", q->data);
1841  exit_nicely(1);
1842  }
1843  PQclear(res);
1844 
1845  /* Do this to ensure we've pumped libpq back to idle state */
1846  if (PQgetResult(conn) != NULL)
1847  write_msg(NULL, "WARNING: unexpected extra results during COPY of table \"%s\"\n",
1848  classname);
1849 
1850  destroyPQExpBuffer(q);
1851  return 1;
1852 }
1853 
1854 /*
1855  * Dump table data using INSERT commands.
1856  *
1857  * Caution: when we restore from an archive file direct to database, the
1858  * INSERT commands emitted by this function have to be parsed by
1859  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
1860  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
1861  */
1862 static int
1863 dumpTableData_insert(Archive *fout, void *dcontext)
1864 {
1865  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1866  TableInfo *tbinfo = tdinfo->tdtable;
1867  const char *classname = tbinfo->dobj.name;
1868  DumpOptions *dopt = fout->dopt;
1870  PQExpBuffer insertStmt = NULL;
1871  PGresult *res;
1872  int tuple;
1873  int nfields;
1874  int field;
1875 
1876  /*
1877  * Make sure we are in proper schema. We will qualify the table name
1878  * below anyway (in case its name conflicts with a pg_catalog table); but
1879  * this ensures reproducible results in case the table contains regproc,
1880  * regclass, etc columns.
1881  */
1882  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
1883 
1884  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
1885  "SELECT * FROM ONLY %s",
1887  tbinfo->dobj.namespace->dobj.name,
1888  classname));
1889  if (tdinfo->filtercond)
1890  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
1891 
1892  ExecuteSqlStatement(fout, q->data);
1893 
1894  while (1)
1895  {
1896  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
1897  PGRES_TUPLES_OK);
1898  nfields = PQnfields(res);
1899  for (tuple = 0; tuple < PQntuples(res); tuple++)
1900  {
1901  /*
1902  * First time through, we build as much of the INSERT statement as
1903  * possible in "insertStmt", which we can then just print for each
1904  * line. If the table happens to have zero columns then this will
1905  * be a complete statement, otherwise it will end in "VALUES(" and
1906  * be ready to have the row's column values appended.
1907  */
1908  if (insertStmt == NULL)
1909  {
1910  insertStmt = createPQExpBuffer();
1911 
1912  /*
1913  * When load-via-partition-root is set, get the root table
1914  * name for the partition table, so that we can reload data
1915  * through the root table.
1916  */
1917  if (dopt->load_via_partition_root && tbinfo->ispartition)
1918  {
1919  TableInfo *parentTbinfo;
1920 
1921  parentTbinfo = getRootTableInfo(tbinfo);
1922 
1923  /*
1924  * When we loading data through the root, we will qualify
1925  * the table name. This is needed because earlier
1926  * search_path will be set for the partition table.
1927  */
1928  classname = (char *) fmtQualifiedId(fout->remoteVersion,
1929  parentTbinfo->dobj.namespace->dobj.name,
1930  parentTbinfo->dobj.name);
1931  }
1932  else
1933  classname = fmtId(tbinfo->dobj.name);
1934 
1935  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
1936  classname);
1937 
1938  /* corner case for zero-column table */
1939  if (nfields == 0)
1940  {
1941  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
1942  }
1943  else
1944  {
1945  /* append the list of column names if required */
1946  if (dopt->column_inserts)
1947  {
1948  appendPQExpBufferChar(insertStmt, '(');
1949  for (field = 0; field < nfields; field++)
1950  {
1951  if (field > 0)
1952  appendPQExpBufferStr(insertStmt, ", ");
1953  appendPQExpBufferStr(insertStmt,
1954  fmtId(PQfname(res, field)));
1955  }
1956  appendPQExpBufferStr(insertStmt, ") ");
1957  }
1958 
1959  if (tbinfo->needs_override)
1960  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
1961 
1962  appendPQExpBufferStr(insertStmt, "VALUES (");
1963  }
1964  }
1965 
1966  archputs(insertStmt->data, fout);
1967 
1968  /* if it is zero-column table then we're done */
1969  if (nfields == 0)
1970  continue;
1971 
1972  for (field = 0; field < nfields; field++)
1973  {
1974  if (field > 0)
1975  archputs(", ", fout);
1976  if (PQgetisnull(res, tuple, field))
1977  {
1978  archputs("NULL", fout);
1979  continue;
1980  }
1981 
1982  /* XXX This code is partially duplicated in ruleutils.c */
1983  switch (PQftype(res, field))
1984  {
1985  case INT2OID:
1986  case INT4OID:
1987  case INT8OID:
1988  case OIDOID:
1989  case FLOAT4OID:
1990  case FLOAT8OID:
1991  case NUMERICOID:
1992  {
1993  /*
1994  * These types are printed without quotes unless
1995  * they contain values that aren't accepted by the
1996  * scanner unquoted (e.g., 'NaN'). Note that
1997  * strtod() and friends might accept NaN, so we
1998  * can't use that to test.
1999  *
2000  * In reality we only need to defend against
2001  * infinity and NaN, so we need not get too crazy
2002  * about pattern matching here.
2003  */
2004  const char *s = PQgetvalue(res, tuple, field);
2005 
2006  if (strspn(s, "0123456789 +-eE.") == strlen(s))
2007  archputs(s, fout);
2008  else
2009  archprintf(fout, "'%s'", s);
2010  }
2011  break;
2012 
2013  case BITOID:
2014  case VARBITOID:
2015  archprintf(fout, "B'%s'",
2016  PQgetvalue(res, tuple, field));
2017  break;
2018 
2019  case BOOLOID:
2020  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2021  archputs("true", fout);
2022  else
2023  archputs("false", fout);
2024  break;
2025 
2026  default:
2027  /* All other types are printed as string literals. */
2028  resetPQExpBuffer(q);
2030  PQgetvalue(res, tuple, field),
2031  fout);
2032  archputs(q->data, fout);
2033  break;
2034  }
2035  }
2036  archputs(");\n", fout);
2037  }
2038 
2039  if (PQntuples(res) <= 0)
2040  {
2041  PQclear(res);
2042  break;
2043  }
2044  PQclear(res);
2045  }
2046 
2047  archputs("\n\n", fout);
2048 
2049  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2050 
2051  destroyPQExpBuffer(q);
2052  if (insertStmt != NULL)
2053  destroyPQExpBuffer(insertStmt);
2054 
2055  return 1;
2056 }
2057 
2058 /*
2059  * getRootTableInfo:
2060  * get the root TableInfo for the given partition table.
2061  */
2062 static TableInfo *
2064 {
2065  TableInfo *parentTbinfo;
2066 
2067  Assert(tbinfo->ispartition);
2068  Assert(tbinfo->numParents == 1);
2069 
2070  parentTbinfo = tbinfo->parents[0];
2071  while (parentTbinfo->ispartition)
2072  {
2073  Assert(parentTbinfo->numParents == 1);
2074  parentTbinfo = parentTbinfo->parents[0];
2075  }
2076 
2077  return parentTbinfo;
2078 }
2079 
2080 /*
2081  * dumpTableData -
2082  * dump the contents of a single table
2083  *
2084  * Actually, this just makes an ArchiveEntry for the table contents.
2085  */
2086 static void
2088 {
2089  DumpOptions *dopt = fout->dopt;
2090  TableInfo *tbinfo = tdinfo->tdtable;
2091  PQExpBuffer copyBuf = createPQExpBuffer();
2092  PQExpBuffer clistBuf = createPQExpBuffer();
2093  DataDumperPtr dumpFn;
2094  char *copyStmt;
2095  const char *copyFrom;
2096 
2097  if (!dopt->dump_inserts)
2098  {
2099  /* Dump/restore using COPY */
2100  dumpFn = dumpTableData_copy;
2101 
2102  /*
2103  * When load-via-partition-root is set, get the root table name for
2104  * the partition table, so that we can reload data through the root
2105  * table.
2106  */
2107  if (dopt->load_via_partition_root && tbinfo->ispartition)
2108  {
2109  TableInfo *parentTbinfo;
2110 
2111  parentTbinfo = getRootTableInfo(tbinfo);
2112 
2113  /*
2114  * When we load data through the root, we will qualify the table
2115  * name, because search_path is set for the partition.
2116  */
2117  copyFrom = fmtQualifiedId(fout->remoteVersion,
2118  parentTbinfo->dobj.namespace->dobj.name,
2119  parentTbinfo->dobj.name);
2120  }
2121  else
2122  copyFrom = fmtId(tbinfo->dobj.name);
2123 
2124  /* must use 2 steps here 'cause fmtId is nonreentrant */
2125  appendPQExpBuffer(copyBuf, "COPY %s ",
2126  copyFrom);
2127  appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n",
2128  fmtCopyColumnList(tbinfo, clistBuf),
2129  (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
2130  copyStmt = copyBuf->data;
2131  }
2132  else
2133  {
2134  /* Restore using INSERT */
2135  dumpFn = dumpTableData_insert;
2136  copyStmt = NULL;
2137  }
2138 
2139  /*
2140  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2141  * dependency on its table as "special" and pass it to ArchiveEntry now.
2142  * See comments for BuildArchiveDependencies.
2143  */
2144  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2145  ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2146  tbinfo->dobj.name, tbinfo->dobj.namespace->dobj.name,
2147  NULL, tbinfo->rolname,
2148  false, "TABLE DATA", SECTION_DATA,
2149  "", "", copyStmt,
2150  &(tbinfo->dobj.dumpId), 1,
2151  dumpFn, tdinfo);
2152 
2153  destroyPQExpBuffer(copyBuf);
2154  destroyPQExpBuffer(clistBuf);
2155 }
2156 
2157 /*
2158  * refreshMatViewData -
2159  * load or refresh the contents of a single materialized view
2160  *
2161  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2162  * statement.
2163  */
2164 static void
2166 {
2167  TableInfo *tbinfo = tdinfo->tdtable;
2168  PQExpBuffer q;
2169 
2170  /* If the materialized view is not flagged as populated, skip this. */
2171  if (!tbinfo->relispopulated)
2172  return;
2173 
2174  q = createPQExpBuffer();
2175 
2176  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2177  fmtId(tbinfo->dobj.name));
2178 
2179  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2180  ArchiveEntry(fout,
2181  tdinfo->dobj.catId, /* catalog ID */
2182  tdinfo->dobj.dumpId, /* dump ID */
2183  tbinfo->dobj.name, /* Name */
2184  tbinfo->dobj.namespace->dobj.name, /* Namespace */
2185  NULL, /* Tablespace */
2186  tbinfo->rolname, /* Owner */
2187  false, /* with oids */
2188  "MATERIALIZED VIEW DATA", /* Desc */
2189  SECTION_POST_DATA, /* Section */
2190  q->data, /* Create */
2191  "", /* Del */
2192  NULL, /* Copy */
2193  tdinfo->dobj.dependencies, /* Deps */
2194  tdinfo->dobj.nDeps, /* # Deps */
2195  NULL, /* Dumper */
2196  NULL); /* Dumper Arg */
2197 
2198  destroyPQExpBuffer(q);
2199 }
2200 
2201 /*
2202  * getTableData -
2203  * set up dumpable objects representing the contents of tables
2204  */
2205 static void
2206 getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind)
2207 {
2208  int i;
2209 
2210  for (i = 0; i < numTables; i++)
2211  {
2212  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2213  (!relkind || tblinfo[i].relkind == relkind))
2214  makeTableDataInfo(dopt, &(tblinfo[i]), oids);
2215  }
2216 }
2217 
2218 /*
2219  * Make a dumpable object for the data of this specific table
2220  *
2221  * Note: we make a TableDataInfo if and only if we are going to dump the
2222  * table data; the "dump" flag in such objects isn't used.
2223  */
2224 static void
2225 makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids)
2226 {
2227  TableDataInfo *tdinfo;
2228 
2229  /*
2230  * Nothing to do if we already decided to dump the table. This will
2231  * happen for "config" tables.
2232  */
2233  if (tbinfo->dataObj != NULL)
2234  return;
2235 
2236  /* Skip VIEWs (no data to dump) */
2237  if (tbinfo->relkind == RELKIND_VIEW)
2238  return;
2239  /* Skip FOREIGN TABLEs (no data to dump) */
2240  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2241  return;
2242  /* Skip partitioned tables (data in partitions) */
2243  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2244  return;
2245 
2246  /* Don't dump data in unlogged tables, if so requested */
2247  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2248  dopt->no_unlogged_table_data)
2249  return;
2250 
2251  /* Check that the data is not explicitly excluded */
2252  if (simple_oid_list_member(&tabledata_exclude_oids,
2253  tbinfo->dobj.catId.oid))
2254  return;
2255 
2256  /* OK, let's dump it */
2257  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2258 
2259  if (tbinfo->relkind == RELKIND_MATVIEW)
2260  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2261  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2262  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2263  else
2264  tdinfo->dobj.objType = DO_TABLE_DATA;
2265 
2266  /*
2267  * Note: use tableoid 0 so that this object won't be mistaken for
2268  * something that pg_depend entries apply to.
2269  */
2270  tdinfo->dobj.catId.tableoid = 0;
2271  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2272  AssignDumpId(&tdinfo->dobj);
2273  tdinfo->dobj.name = tbinfo->dobj.name;
2274  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2275  tdinfo->tdtable = tbinfo;
2276  tdinfo->oids = oids;
2277  tdinfo->filtercond = NULL; /* might get set later */
2278  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2279 
2280  tbinfo->dataObj = tdinfo;
2281 }
2282 
2283 /*
2284  * The refresh for a materialized view must be dependent on the refresh for
2285  * any materialized view that this one is dependent on.
2286  *
2287  * This must be called after all the objects are created, but before they are
2288  * sorted.
2289  */
2290 static void
2292 {
2293  PQExpBuffer query;
2294  PGresult *res;
2295  int ntups,
2296  i;
2297  int i_classid,
2298  i_objid,
2299  i_refobjid;
2300 
2301  /* No Mat Views before 9.3. */
2302  if (fout->remoteVersion < 90300)
2303  return;
2304 
2305  /* Make sure we are in proper schema */
2306  selectSourceSchema(fout, "pg_catalog");
2307 
2308  query = createPQExpBuffer();
2309 
2310  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2311  "( "
2312  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2313  "FROM pg_depend d1 "
2314  "JOIN pg_class c1 ON c1.oid = d1.objid "
2315  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2316  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2317  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2318  "AND d2.objid = r1.oid "
2319  "AND d2.refobjid <> d1.objid "
2320  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2321  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2323  "WHERE d1.classid = 'pg_class'::regclass "
2324  "UNION "
2325  "SELECT w.objid, d3.refobjid, c3.relkind "
2326  "FROM w "
2327  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2328  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2329  "AND d3.objid = r3.oid "
2330  "AND d3.refobjid <> w.refobjid "
2331  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2332  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2334  ") "
2335  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2336  "FROM w "
2337  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2338 
2339  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2340 
2341  ntups = PQntuples(res);
2342 
2343  i_classid = PQfnumber(res, "classid");
2344  i_objid = PQfnumber(res, "objid");
2345  i_refobjid = PQfnumber(res, "refobjid");
2346 
2347  for (i = 0; i < ntups; i++)
2348  {
2349  CatalogId objId;
2350  CatalogId refobjId;
2351  DumpableObject *dobj;
2352  DumpableObject *refdobj;
2353  TableInfo *tbinfo;
2354  TableInfo *reftbinfo;
2355 
2356  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2357  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2358  refobjId.tableoid = objId.tableoid;
2359  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2360 
2361  dobj = findObjectByCatalogId(objId);
2362  if (dobj == NULL)
2363  continue;
2364 
2365  Assert(dobj->objType == DO_TABLE);
2366  tbinfo = (TableInfo *) dobj;
2367  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2368  dobj = (DumpableObject *) tbinfo->dataObj;
2369  if (dobj == NULL)
2370  continue;
2371  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2372 
2373  refdobj = findObjectByCatalogId(refobjId);
2374  if (refdobj == NULL)
2375  continue;
2376 
2377  Assert(refdobj->objType == DO_TABLE);
2378  reftbinfo = (TableInfo *) refdobj;
2379  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2380  refdobj = (DumpableObject *) reftbinfo->dataObj;
2381  if (refdobj == NULL)
2382  continue;
2383  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2384 
2385  addObjectDependency(dobj, refdobj->dumpId);
2386 
2387  if (!reftbinfo->relispopulated)
2388  tbinfo->relispopulated = false;
2389  }
2390 
2391  PQclear(res);
2392 
2393  destroyPQExpBuffer(query);
2394 }
2395 
2396 /*
2397  * getTableDataFKConstraints -
2398  * add dump-order dependencies reflecting foreign key constraints
2399  *
2400  * This code is executed only in a data-only dump --- in schema+data dumps
2401  * we handle foreign key issues by not creating the FK constraints until
2402  * after the data is loaded. In a data-only dump, however, we want to
2403  * order the table data objects in such a way that a table's referenced
2404  * tables are restored first. (In the presence of circular references or
2405  * self-references this may be impossible; we'll detect and complain about
2406  * that during the dependency sorting step.)
2407  */
2408 static void
2410 {
2411  DumpableObject **dobjs;
2412  int numObjs;
2413  int i;
2414 
2415  /* Search through all the dumpable objects for FK constraints */
2416  getDumpableObjects(&dobjs, &numObjs);
2417  for (i = 0; i < numObjs; i++)
2418  {
2419  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2420  {
2421  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2422  TableInfo *ftable;
2423 
2424  /* Not interesting unless both tables are to be dumped */
2425  if (cinfo->contable == NULL ||
2426  cinfo->contable->dataObj == NULL)
2427  continue;
2428  ftable = findTableByOid(cinfo->confrelid);
2429  if (ftable == NULL ||
2430  ftable->dataObj == NULL)
2431  continue;
2432 
2433  /*
2434  * Okay, make referencing table's TABLE_DATA object depend on the
2435  * referenced table's TABLE_DATA object.
2436  */
2438  ftable->dataObj->dobj.dumpId);
2439  }
2440  }
2441  free(dobjs);
2442 }
2443 
2444 
2445 /*
2446  * guessConstraintInheritance:
2447  * In pre-8.4 databases, we can't tell for certain which constraints
2448  * are inherited. We assume a CHECK constraint is inherited if its name
2449  * matches the name of any constraint in the parent. Originally this code
2450  * tried to compare the expression texts, but that can fail for various
2451  * reasons --- for example, if the parent and child tables are in different
2452  * schemas, reverse-listing of function calls may produce different text
2453  * (schema-qualified or not) depending on search path.
2454  *
2455  * In 8.4 and up we can rely on the conislocal field to decide which
2456  * constraints must be dumped; much safer.
2457  *
2458  * This function assumes all conislocal flags were initialized to true.
2459  * It clears the flag on anything that seems to be inherited.
2460  */
2461 static void
2463 {
2464  int i,
2465  j,
2466  k;
2467 
2468  for (i = 0; i < numTables; i++)
2469  {
2470  TableInfo *tbinfo = &(tblinfo[i]);
2471  int numParents;
2472  TableInfo **parents;
2473  TableInfo *parent;
2474 
2475  /* Sequences and views never have parents */
2476  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2477  tbinfo->relkind == RELKIND_VIEW)
2478  continue;
2479 
2480  /* Don't bother computing anything for non-target tables, either */
2481  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2482  continue;
2483 
2484  numParents = tbinfo->numParents;
2485  parents = tbinfo->parents;
2486 
2487  if (numParents == 0)
2488  continue; /* nothing to see here, move along */
2489 
2490  /* scan for inherited CHECK constraints */
2491  for (j = 0; j < tbinfo->ncheck; j++)
2492  {
2493  ConstraintInfo *constr;
2494 
2495  constr = &(tbinfo->checkexprs[j]);
2496 
2497  for (k = 0; k < numParents; k++)
2498  {
2499  int l;
2500 
2501  parent = parents[k];
2502  for (l = 0; l < parent->ncheck; l++)
2503  {
2504  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2505 
2506  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2507  {
2508  constr->conislocal = false;
2509  break;
2510  }
2511  }
2512  if (!constr->conislocal)
2513  break;
2514  }
2515  }
2516  }
2517 }
2518 
2519 
2520 /*
2521  * dumpDatabase:
2522  * dump the database definition
2523  */
2524 static void
2526 {
2527  DumpOptions *dopt = fout->dopt;
2528  PQExpBuffer dbQry = createPQExpBuffer();
2529  PQExpBuffer delQry = createPQExpBuffer();
2530  PQExpBuffer creaQry = createPQExpBuffer();
2531  PGconn *conn = GetConnection(fout);
2532  PGresult *res;
2533  int i_tableoid,
2534  i_oid,
2535  i_dba,
2536  i_encoding,
2537  i_collate,
2538  i_ctype,
2539  i_frozenxid,
2540  i_minmxid,
2541  i_tablespace;
2542  CatalogId dbCatId;
2543  DumpId dbDumpId;
2544  const char *datname,
2545  *dba,
2546  *encoding,
2547  *collate,
2548  *ctype,
2549  *tablespace;
2550  uint32 frozenxid,
2551  minmxid;
2552 
2553  datname = PQdb(conn);
2554 
2555  if (g_verbose)
2556  write_msg(NULL, "saving database definition\n");
2557 
2558  /* Make sure we are in proper schema */
2559  selectSourceSchema(fout, "pg_catalog");
2560 
2561  /* Get the database owner and parameters from pg_database */
2562  if (fout->remoteVersion >= 90300)
2563  {
2564  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2565  "(%s datdba) AS dba, "
2566  "pg_encoding_to_char(encoding) AS encoding, "
2567  "datcollate, datctype, datfrozenxid, datminmxid, "
2568  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2569  "shobj_description(oid, 'pg_database') AS description "
2570 
2571  "FROM pg_database "
2572  "WHERE datname = ",
2574  appendStringLiteralAH(dbQry, datname, fout);
2575  }
2576  else if (fout->remoteVersion >= 80400)
2577  {
2578  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2579  "(%s datdba) AS dba, "
2580  "pg_encoding_to_char(encoding) AS encoding, "
2581  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2582  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2583  "shobj_description(oid, 'pg_database') AS description "
2584 
2585  "FROM pg_database "
2586  "WHERE datname = ",
2588  appendStringLiteralAH(dbQry, datname, fout);
2589  }
2590  else if (fout->remoteVersion >= 80200)
2591  {
2592  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2593  "(%s datdba) AS dba, "
2594  "pg_encoding_to_char(encoding) AS encoding, "
2595  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2596  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2597  "shobj_description(oid, 'pg_database') AS description "
2598 
2599  "FROM pg_database "
2600  "WHERE datname = ",
2602  appendStringLiteralAH(dbQry, datname, fout);
2603  }
2604  else
2605  {
2606  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2607  "(%s datdba) AS dba, "
2608  "pg_encoding_to_char(encoding) AS encoding, "
2609  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2610  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2611  "FROM pg_database "
2612  "WHERE datname = ",
2614  appendStringLiteralAH(dbQry, datname, fout);
2615  }
2616 
2617  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2618 
2619  i_tableoid = PQfnumber(res, "tableoid");
2620  i_oid = PQfnumber(res, "oid");
2621  i_dba = PQfnumber(res, "dba");
2622  i_encoding = PQfnumber(res, "encoding");
2623  i_collate = PQfnumber(res, "datcollate");
2624  i_ctype = PQfnumber(res, "datctype");
2625  i_frozenxid = PQfnumber(res, "datfrozenxid");
2626  i_minmxid = PQfnumber(res, "datminmxid");
2627  i_tablespace = PQfnumber(res, "tablespace");
2628 
2629  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2630  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2631  dba = PQgetvalue(res, 0, i_dba);
2632  encoding = PQgetvalue(res, 0, i_encoding);
2633  collate = PQgetvalue(res, 0, i_collate);
2634  ctype = PQgetvalue(res, 0, i_ctype);
2635  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2636  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2637  tablespace = PQgetvalue(res, 0, i_tablespace);
2638 
2639  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2640  fmtId(datname));
2641  if (strlen(encoding) > 0)
2642  {
2643  appendPQExpBufferStr(creaQry, " ENCODING = ");
2644  appendStringLiteralAH(creaQry, encoding, fout);
2645  }
2646  if (strlen(collate) > 0)
2647  {
2648  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2649  appendStringLiteralAH(creaQry, collate, fout);
2650  }
2651  if (strlen(ctype) > 0)
2652  {
2653  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2654  appendStringLiteralAH(creaQry, ctype, fout);
2655  }
2656  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2657  !dopt->outputNoTablespaces)
2658  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2659  fmtId(tablespace));
2660  appendPQExpBufferStr(creaQry, ";\n");
2661 
2662  if (dopt->binary_upgrade)
2663  {
2664  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
2665  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
2666  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
2667  "WHERE datname = ",
2668  frozenxid, minmxid);
2669  appendStringLiteralAH(creaQry, datname, fout);
2670  appendPQExpBufferStr(creaQry, ";\n");
2671 
2672  }
2673 
2674  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2675  fmtId(datname));
2676 
2677  dbDumpId = createDumpId();
2678 
2679  ArchiveEntry(fout,
2680  dbCatId, /* catalog ID */
2681  dbDumpId, /* dump ID */
2682  datname, /* Name */
2683  NULL, /* Namespace */
2684  NULL, /* Tablespace */
2685  dba, /* Owner */
2686  false, /* with oids */
2687  "DATABASE", /* Desc */
2688  SECTION_PRE_DATA, /* Section */
2689  creaQry->data, /* Create */
2690  delQry->data, /* Del */
2691  NULL, /* Copy */
2692  NULL, /* Deps */
2693  0, /* # Deps */
2694  NULL, /* Dumper */
2695  NULL); /* Dumper Arg */
2696 
2697  /*
2698  * pg_largeobject and pg_largeobject_metadata come from the old system
2699  * intact, so set their relfrozenxids and relminmxids.
2700  */
2701  if (dopt->binary_upgrade)
2702  {
2703  PGresult *lo_res;
2704  PQExpBuffer loFrozenQry = createPQExpBuffer();
2705  PQExpBuffer loOutQry = createPQExpBuffer();
2706  int i_relfrozenxid,
2707  i_relminmxid;
2708 
2709  /*
2710  * pg_largeobject
2711  */
2712  if (fout->remoteVersion >= 90300)
2713  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2714  "FROM pg_catalog.pg_class\n"
2715  "WHERE oid = %u;\n",
2717  else
2718  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2719  "FROM pg_catalog.pg_class\n"
2720  "WHERE oid = %u;\n",
2722 
2723  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2724 
2725  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2726  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2727 
2728  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
2729  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2730  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2731  "WHERE oid = %u;\n",
2732  atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2733  atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
2735  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2736  "pg_largeobject", NULL, NULL, "",
2737  false, "pg_largeobject", SECTION_PRE_DATA,
2738  loOutQry->data, "", NULL,
2739  NULL, 0,
2740  NULL, NULL);
2741 
2742  PQclear(lo_res);
2743 
2744  /*
2745  * pg_largeobject_metadata
2746  */
2747  if (fout->remoteVersion >= 90000)
2748  {
2749  resetPQExpBuffer(loFrozenQry);
2750  resetPQExpBuffer(loOutQry);
2751 
2752  if (fout->remoteVersion >= 90300)
2753  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2754  "FROM pg_catalog.pg_class\n"
2755  "WHERE oid = %u;\n",
2757  else
2758  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2759  "FROM pg_catalog.pg_class\n"
2760  "WHERE oid = %u;\n",
2762 
2763  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2764 
2765  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2766  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2767 
2768  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
2769  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2770  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2771  "WHERE oid = %u;\n",
2772  atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2773  atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
2775  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2776  "pg_largeobject_metadata", NULL, NULL, "",
2777  false, "pg_largeobject_metadata", SECTION_PRE_DATA,
2778  loOutQry->data, "", NULL,
2779  NULL, 0,
2780  NULL, NULL);
2781 
2782  PQclear(lo_res);
2783  }
2784 
2785  destroyPQExpBuffer(loFrozenQry);
2786  destroyPQExpBuffer(loOutQry);
2787  }
2788 
2789  /* Dump DB comment if any */
2790  if (fout->remoteVersion >= 80200)
2791  {
2792  /*
2793  * 8.2 keeps comments on shared objects in a shared table, so we
2794  * cannot use the dumpComment used for other database objects.
2795  */
2796  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2797 
2798  if (comment && strlen(comment))
2799  {
2800  resetPQExpBuffer(dbQry);
2801 
2802  /*
2803  * Generates warning when loaded into a differently-named
2804  * database.
2805  */
2806  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", fmtId(datname));
2807  appendStringLiteralAH(dbQry, comment, fout);
2808  appendPQExpBufferStr(dbQry, ";\n");
2809 
2810  ArchiveEntry(fout, dbCatId, createDumpId(), datname, NULL, NULL,
2811  dba, false, "COMMENT", SECTION_NONE,
2812  dbQry->data, "", NULL,
2813  &dbDumpId, 1, NULL, NULL);
2814  }
2815  }
2816  else
2817  {
2818  resetPQExpBuffer(dbQry);
2819  appendPQExpBuffer(dbQry, "DATABASE %s", fmtId(datname));
2820  dumpComment(fout, dbQry->data, NULL, "",
2821  dbCatId, 0, dbDumpId);
2822  }
2823 
2824  /* Dump shared security label. */
2825  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2826  {
2827  PGresult *shres;
2828  PQExpBuffer seclabelQry;
2829 
2830  seclabelQry = createPQExpBuffer();
2831 
2832  buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry);
2833  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2834  resetPQExpBuffer(seclabelQry);
2835  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2836  if (strlen(seclabelQry->data))
2837  ArchiveEntry(fout, dbCatId, createDumpId(), datname, NULL, NULL,
2838  dba, false, "SECURITY LABEL", SECTION_NONE,
2839  seclabelQry->data, "", NULL,
2840  &dbDumpId, 1, NULL, NULL);
2841  destroyPQExpBuffer(seclabelQry);
2842  PQclear(shres);
2843  }
2844 
2845  PQclear(res);
2846 
2847  destroyPQExpBuffer(dbQry);
2848  destroyPQExpBuffer(delQry);
2849  destroyPQExpBuffer(creaQry);
2850 }
2851 
2852 /*
2853  * dumpEncoding: put the correct encoding into the archive
2854  */
2855 static void
2857 {
2858  const char *encname = pg_encoding_to_char(AH->encoding);
2860 
2861  if (g_verbose)
2862  write_msg(NULL, "saving encoding = %s\n", encname);
2863 
2864  appendPQExpBufferStr(qry, "SET client_encoding = ");
2865  appendStringLiteralAH(qry, encname, AH);
2866  appendPQExpBufferStr(qry, ";\n");
2867 
2868  ArchiveEntry(AH, nilCatalogId, createDumpId(),
2869  "ENCODING", NULL, NULL, "",
2870  false, "ENCODING", SECTION_PRE_DATA,
2871  qry->data, "", NULL,
2872  NULL, 0,
2873  NULL, NULL);
2874 
2875  destroyPQExpBuffer(qry);
2876 }
2877 
2878 
2879 /*
2880  * dumpStdStrings: put the correct escape string behavior into the archive
2881  */
2882 static void
2884 {
2885  const char *stdstrings = AH->std_strings ? "on" : "off";
2887 
2888  if (g_verbose)
2889  write_msg(NULL, "saving standard_conforming_strings = %s\n",
2890  stdstrings);
2891 
2892  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
2893  stdstrings);
2894 
2895  ArchiveEntry(AH, nilCatalogId, createDumpId(),
2896  "STDSTRINGS", NULL, NULL, "",
2897  false, "STDSTRINGS", SECTION_PRE_DATA,
2898  qry->data, "", NULL,
2899  NULL, 0,
2900  NULL, NULL);
2901 
2902  destroyPQExpBuffer(qry);
2903 }
2904 
2905 
2906 /*
2907  * getBlobs:
2908  * Collect schema-level data about large objects
2909  */
2910 static void
2912 {
2913  DumpOptions *dopt = fout->dopt;
2914  PQExpBuffer blobQry = createPQExpBuffer();
2915  BlobInfo *binfo;
2916  DumpableObject *bdata;
2917  PGresult *res;
2918  int ntups;
2919  int i;
2920  int i_oid;
2921  int i_lomowner;
2922  int i_lomacl;
2923  int i_rlomacl;
2924  int i_initlomacl;
2925  int i_initrlomacl;
2926 
2927  /* Verbose message */
2928  if (g_verbose)
2929  write_msg(NULL, "reading large objects\n");
2930 
2931  /* Make sure we are in proper schema */
2932  selectSourceSchema(fout, "pg_catalog");
2933 
2934  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
2935  if (fout->remoteVersion >= 90600)
2936  {
2937  PQExpBuffer acl_subquery = createPQExpBuffer();
2938  PQExpBuffer racl_subquery = createPQExpBuffer();
2939  PQExpBuffer init_acl_subquery = createPQExpBuffer();
2940  PQExpBuffer init_racl_subquery = createPQExpBuffer();
2941 
2942  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
2943  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
2944  dopt->binary_upgrade);
2945 
2946  appendPQExpBuffer(blobQry,
2947  "SELECT l.oid, (%s l.lomowner) AS rolname, "
2948  "%s AS lomacl, "
2949  "%s AS rlomacl, "
2950  "%s AS initlomacl, "
2951  "%s AS initrlomacl "
2952  "FROM pg_largeobject_metadata l "
2953  "LEFT JOIN pg_init_privs pip ON "
2954  "(l.oid = pip.objoid "
2955  "AND pip.classoid = 'pg_largeobject'::regclass "
2956  "AND pip.objsubid = 0) ",
2958  acl_subquery->data,
2959  racl_subquery->data,
2960  init_acl_subquery->data,
2961  init_racl_subquery->data);
2962 
2963  destroyPQExpBuffer(acl_subquery);
2964  destroyPQExpBuffer(racl_subquery);
2965  destroyPQExpBuffer(init_acl_subquery);
2966  destroyPQExpBuffer(init_racl_subquery);
2967  }
2968  else if (fout->remoteVersion >= 90000)
2969  appendPQExpBuffer(blobQry,
2970  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
2971  "NULL AS rlomacl, NULL AS initlomacl, "
2972  "NULL AS initrlomacl "
2973  " FROM pg_largeobject_metadata",
2975  else
2976  appendPQExpBufferStr(blobQry,
2977  "SELECT DISTINCT loid AS oid, "
2978  "NULL::name AS rolname, NULL::oid AS lomacl, "
2979  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
2980  "NULL::oid AS initrlomacl "
2981  " FROM pg_largeobject");
2982 
2983  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
2984 
2985  i_oid = PQfnumber(res, "oid");
2986  i_lomowner = PQfnumber(res, "rolname");
2987  i_lomacl = PQfnumber(res, "lomacl");
2988  i_rlomacl = PQfnumber(res, "rlomacl");
2989  i_initlomacl = PQfnumber(res, "initlomacl");
2990  i_initrlomacl = PQfnumber(res, "initrlomacl");
2991 
2992  ntups = PQntuples(res);
2993 
2994  /*
2995  * Each large object has its own BLOB archive entry.
2996  */
2997  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
2998 
2999  for (i = 0; i < ntups; i++)
3000  {
3001  binfo[i].dobj.objType = DO_BLOB;
3003  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3004  AssignDumpId(&binfo[i].dobj);
3005 
3006  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
3007  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
3008  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
3009  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
3010  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
3011  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
3012 
3013  if (PQgetisnull(res, i, i_lomacl) &&
3014  PQgetisnull(res, i, i_rlomacl) &&
3015  PQgetisnull(res, i, i_initlomacl) &&
3016  PQgetisnull(res, i, i_initrlomacl))
3017  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
3018 
3019  /*
3020  * In binary-upgrade mode for blobs, we do *not* dump out the data or
3021  * the ACLs, should any exist. The data and ACL (if any) will be
3022  * copied by pg_upgrade, which simply copies the pg_largeobject and
3023  * pg_largeobject_metadata tables.
3024  *
3025  * We *do* dump out the definition of the blob because we need that to
3026  * make the restoration of the comments, and anything else, work since
3027  * pg_upgrade copies the files behind pg_largeobject and
3028  * pg_largeobject_metadata after the dump is restored.
3029  */
3030  if (dopt->binary_upgrade)
3032  }
3033 
3034  /*
3035  * If we have any large objects, a "BLOBS" archive entry is needed. This
3036  * is just a placeholder for sorting; it carries no data now.
3037  */
3038  if (ntups > 0)
3039  {
3040  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
3041  bdata->objType = DO_BLOB_DATA;
3042  bdata->catId = nilCatalogId;
3043  AssignDumpId(bdata);
3044  bdata->name = pg_strdup("BLOBS");
3045  }
3046 
3047  PQclear(res);
3048  destroyPQExpBuffer(blobQry);
3049 }
3050 
3051 /*
3052  * dumpBlob
3053  *
3054  * dump the definition (metadata) of the given large object
3055  */
3056 static void
3057 dumpBlob(Archive *fout, BlobInfo *binfo)
3058 {
3059  PQExpBuffer cquery = createPQExpBuffer();
3060  PQExpBuffer dquery = createPQExpBuffer();
3061 
3062  appendPQExpBuffer(cquery,
3063  "SELECT pg_catalog.lo_create('%s');\n",
3064  binfo->dobj.name);
3065 
3066  appendPQExpBuffer(dquery,
3067  "SELECT pg_catalog.lo_unlink('%s');\n",
3068  binfo->dobj.name);
3069 
3070  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
3071  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
3072  binfo->dobj.name,
3073  NULL, NULL,
3074  binfo->rolname, false,
3075  "BLOB", SECTION_PRE_DATA,
3076  cquery->data, dquery->data, NULL,
3077  NULL, 0,
3078  NULL, NULL);
3079 
3080  /* set up tag for comment and/or ACL */
3081  resetPQExpBuffer(cquery);
3082  appendPQExpBuffer(cquery, "LARGE OBJECT %s", binfo->dobj.name);
3083 
3084  /* Dump comment if any */
3085  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3086  dumpComment(fout, cquery->data,
3087  NULL, binfo->rolname,
3088  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3089 
3090  /* Dump security label if any */
3091  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3092  dumpSecLabel(fout, cquery->data,
3093  NULL, binfo->rolname,
3094  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3095 
3096  /* Dump ACL if any */
3097  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3098  dumpACL(fout, binfo->dobj.catId, binfo->dobj.dumpId, "LARGE OBJECT",
3099  binfo->dobj.name, NULL, cquery->data,
3100  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3101  binfo->initblobacl, binfo->initrblobacl);
3102 
3103  destroyPQExpBuffer(cquery);
3104  destroyPQExpBuffer(dquery);
3105 }
3106 
3107 /*
3108  * dumpBlobs:
3109  * dump the data contents of all large objects
3110  */
3111 static int
3112 dumpBlobs(Archive *fout, void *arg)
3113 {
3114  const char *blobQry;
3115  const char *blobFetchQry;
3116  PGconn *conn = GetConnection(fout);
3117  PGresult *res;
3118  char buf[LOBBUFSIZE];
3119  int ntups;
3120  int i;
3121  int cnt;
3122 
3123  if (g_verbose)
3124  write_msg(NULL, "saving large objects\n");
3125 
3126  /* Make sure we are in proper schema */
3127  selectSourceSchema(fout, "pg_catalog");
3128 
3129  /*
3130  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3131  * the already-in-memory dumpable objects instead...
3132  */
3133  if (fout->remoteVersion >= 90000)
3134  blobQry = "DECLARE bloboid CURSOR FOR SELECT oid FROM pg_largeobject_metadata";
3135  else
3136  blobQry = "DECLARE bloboid CURSOR FOR SELECT DISTINCT loid FROM pg_largeobject";
3137 
3138  ExecuteSqlStatement(fout, blobQry);
3139 
3140  /* Command to fetch from cursor */
3141  blobFetchQry = "FETCH 1000 IN bloboid";
3142 
3143  do
3144  {
3145  /* Do a fetch */
3146  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3147 
3148  /* Process the tuples, if any */
3149  ntups = PQntuples(res);
3150  for (i = 0; i < ntups; i++)
3151  {
3152  Oid blobOid;
3153  int loFd;
3154 
3155  blobOid = atooid(PQgetvalue(res, i, 0));
3156  /* Open the BLOB */
3157  loFd = lo_open(conn, blobOid, INV_READ);
3158  if (loFd == -1)
3159  exit_horribly(NULL, "could not open large object %u: %s",
3160  blobOid, PQerrorMessage(conn));
3161 
3162  StartBlob(fout, blobOid);
3163 
3164  /* Now read it in chunks, sending data to archive */
3165  do
3166  {
3167  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3168  if (cnt < 0)
3169  exit_horribly(NULL, "error reading large object %u: %s",
3170  blobOid, PQerrorMessage(conn));
3171 
3172  WriteData(fout, buf, cnt);
3173  } while (cnt > 0);
3174 
3175  lo_close(conn, loFd);
3176 
3177  EndBlob(fout, blobOid);
3178  }
3179 
3180  PQclear(res);
3181  } while (ntups > 0);
3182 
3183  return 1;
3184 }
3185 
3186 /*
3187  * getPolicies
3188  * get information about policies on a dumpable table.
3189  */
3190 void
3191 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3192 {
3193  PQExpBuffer query;
3194  PGresult *res;
3195  PolicyInfo *polinfo;
3196  int i_oid;
3197  int i_tableoid;
3198  int i_polname;
3199  int i_polcmd;
3200  int i_polpermissive;
3201  int i_polroles;
3202  int i_polqual;
3203  int i_polwithcheck;
3204  int i,
3205  j,
3206  ntups;
3207 
3208  if (fout->remoteVersion < 90500)
3209  return;
3210 
3211  query = createPQExpBuffer();
3212 
3213  for (i = 0; i < numTables; i++)
3214  {
3215  TableInfo *tbinfo = &tblinfo[i];
3216 
3217  /* Ignore row security on tables not to be dumped */
3218  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3219  continue;
3220 
3221  if (g_verbose)
3222  write_msg(NULL, "reading row security enabled for table \"%s.%s\"\n",
3223  tbinfo->dobj.namespace->dobj.name,
3224  tbinfo->dobj.name);
3225 
3226  /*
3227  * Get row security enabled information for the table. We represent
3228  * RLS enabled on a table by creating PolicyInfo object with an empty
3229  * policy.
3230  */
3231  if (tbinfo->rowsec)
3232  {
3233  /*
3234  * Note: use tableoid 0 so that this object won't be mistaken for
3235  * something that pg_depend entries apply to.
3236  */
3237  polinfo = pg_malloc(sizeof(PolicyInfo));
3238  polinfo->dobj.objType = DO_POLICY;
3239  polinfo->dobj.catId.tableoid = 0;
3240  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3241  AssignDumpId(&polinfo->dobj);
3242  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3243  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3244  polinfo->poltable = tbinfo;
3245  polinfo->polname = NULL;
3246  polinfo->polcmd = '\0';
3247  polinfo->polpermissive = 0;
3248  polinfo->polroles = NULL;
3249  polinfo->polqual = NULL;
3250  polinfo->polwithcheck = NULL;
3251  }
3252 
3253  if (g_verbose)
3254  write_msg(NULL, "reading policies for table \"%s.%s\"\n",
3255  tbinfo->dobj.namespace->dobj.name,
3256  tbinfo->dobj.name);
3257 
3258  /*
3259  * select table schema to ensure regproc name is qualified if needed
3260  */
3261  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
3262 
3263  resetPQExpBuffer(query);
3264 
3265  /* Get the policies for the table. */
3266  if (fout->remoteVersion >= 100000)
3267  appendPQExpBuffer(query,
3268  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3269  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3270  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3271  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3272  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3273  "FROM pg_catalog.pg_policy pol "
3274  "WHERE polrelid = '%u'",
3275  tbinfo->dobj.catId.oid);
3276  else
3277  appendPQExpBuffer(query,
3278  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3279  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3280  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3281  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3282  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3283  "FROM pg_catalog.pg_policy pol "
3284  "WHERE polrelid = '%u'",
3285  tbinfo->dobj.catId.oid);
3286  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3287 
3288  ntups = PQntuples(res);
3289 
3290  if (ntups == 0)
3291  {
3292  /*
3293  * No explicit policies to handle (only the default-deny policy,
3294  * which is handled as part of the table definition). Clean up
3295  * and return.
3296  */
3297  PQclear(res);
3298  continue;
3299  }
3300 
3301  i_oid = PQfnumber(res, "oid");
3302  i_tableoid = PQfnumber(res, "tableoid");
3303  i_polname = PQfnumber(res, "polname");
3304  i_polcmd = PQfnumber(res, "polcmd");
3305  i_polpermissive = PQfnumber(res, "polpermissive");
3306  i_polroles = PQfnumber(res, "polroles");
3307  i_polqual = PQfnumber(res, "polqual");
3308  i_polwithcheck = PQfnumber(res, "polwithcheck");
3309 
3310  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3311 
3312  for (j = 0; j < ntups; j++)
3313  {
3314  polinfo[j].dobj.objType = DO_POLICY;
3315  polinfo[j].dobj.catId.tableoid =
3316  atooid(PQgetvalue(res, j, i_tableoid));
3317  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3318  AssignDumpId(&polinfo[j].dobj);
3319  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3320  polinfo[j].poltable = tbinfo;
3321  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3322  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3323 
3324  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3325  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3326 
3327  if (PQgetisnull(res, j, i_polroles))
3328  polinfo[j].polroles = NULL;
3329  else
3330  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3331 
3332  if (PQgetisnull(res, j, i_polqual))
3333  polinfo[j].polqual = NULL;
3334  else
3335  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3336 
3337  if (PQgetisnull(res, j, i_polwithcheck))
3338  polinfo[j].polwithcheck = NULL;
3339  else
3340  polinfo[j].polwithcheck
3341  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3342  }
3343  PQclear(res);
3344  }
3345  destroyPQExpBuffer(query);
3346 }
3347 
3348 /*
3349  * dumpPolicy
3350  * dump the definition of the given policy
3351  */
3352 static void
3354 {
3355  DumpOptions *dopt = fout->dopt;
3356  TableInfo *tbinfo = polinfo->poltable;
3357  PQExpBuffer query;
3358  PQExpBuffer delqry;
3359  const char *cmd;
3360  char *tag;
3361 
3362  if (dopt->dataOnly)
3363  return;
3364 
3365  /*
3366  * If polname is NULL, then this record is just indicating that ROW LEVEL
3367  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3368  * ROW LEVEL SECURITY.
3369  */
3370  if (polinfo->polname == NULL)
3371  {
3372  query = createPQExpBuffer();
3373 
3374  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3375  fmtId(polinfo->dobj.name));
3376 
3377  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3378  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3379  polinfo->dobj.name,
3380  polinfo->dobj.namespace->dobj.name,
3381  NULL,
3382  tbinfo->rolname, false,
3383  "ROW SECURITY", SECTION_POST_DATA,
3384  query->data, "", NULL,
3385  NULL, 0,
3386  NULL, NULL);
3387 
3388  destroyPQExpBuffer(query);
3389  return;
3390  }
3391 
3392  if (polinfo->polcmd == '*')
3393  cmd = "";
3394  else if (polinfo->polcmd == 'r')
3395  cmd = " FOR SELECT";
3396  else if (polinfo->polcmd == 'a')
3397  cmd = " FOR INSERT";
3398  else if (polinfo->polcmd == 'w')
3399  cmd = " FOR UPDATE";
3400  else if (polinfo->polcmd == 'd')
3401  cmd = " FOR DELETE";
3402  else
3403  {
3404  write_msg(NULL, "unexpected policy command type: %c\n",
3405  polinfo->polcmd);
3406  exit_nicely(1);
3407  }
3408 
3409  query = createPQExpBuffer();
3410  delqry = createPQExpBuffer();
3411 
3412  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3413 
3414  appendPQExpBuffer(query, " ON %s%s%s", fmtId(tbinfo->dobj.name),
3415  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3416 
3417  if (polinfo->polroles != NULL)
3418  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3419 
3420  if (polinfo->polqual != NULL)
3421  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3422 
3423  if (polinfo->polwithcheck != NULL)
3424  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3425 
3426  appendPQExpBuffer(query, ";\n");
3427 
3428  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3429  appendPQExpBuffer(delqry, " ON %s;\n", fmtId(tbinfo->dobj.name));
3430 
3431  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3432 
3433  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3434  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3435  tag,
3436  polinfo->dobj.namespace->dobj.name,
3437  NULL,
3438  tbinfo->rolname, false,
3439  "POLICY", SECTION_POST_DATA,
3440  query->data, delqry->data, NULL,
3441  NULL, 0,
3442  NULL, NULL);
3443 
3444  free(tag);
3445  destroyPQExpBuffer(query);
3446  destroyPQExpBuffer(delqry);
3447 }
3448 
3449 /*
3450  * getPublications
3451  * get information about publications
3452  */
3453 void
3455 {
3456  DumpOptions *dopt = fout->dopt;
3457  PQExpBuffer query;
3458  PGresult *res;
3459  PublicationInfo *pubinfo;
3460  int i_tableoid;
3461  int i_oid;
3462  int i_pubname;
3463  int i_rolname;
3464  int i_puballtables;
3465  int i_pubinsert;
3466  int i_pubupdate;
3467  int i_pubdelete;
3468  int i,
3469  ntups;
3470 
3471  if (dopt->no_publications || fout->remoteVersion < 100000)
3472  return;
3473 
3474  query = createPQExpBuffer();
3475 
3476  resetPQExpBuffer(query);
3477 
3478  /* Make sure we are in proper schema */
3479  selectSourceSchema(fout, "pg_catalog");
3480 
3481  /* Get the publications. */
3482  appendPQExpBuffer(query,
3483  "SELECT p.tableoid, p.oid, p.pubname, "
3484  "(%s p.pubowner) AS rolname, "
3485  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete "
3486  "FROM pg_publication p",
3488 
3489  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3490 
3491  ntups = PQntuples(res);
3492 
3493  i_tableoid = PQfnumber(res, "tableoid");
3494  i_oid = PQfnumber(res, "oid");
3495  i_pubname = PQfnumber(res, "pubname");
3496  i_rolname = PQfnumber(res, "rolname");
3497  i_puballtables = PQfnumber(res, "puballtables");
3498  i_pubinsert = PQfnumber(res, "pubinsert");
3499  i_pubupdate = PQfnumber(res, "pubupdate");
3500  i_pubdelete = PQfnumber(res, "pubdelete");
3501 
3502  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3503 
3504  for (i = 0; i < ntups; i++)
3505  {
3506  pubinfo[i].dobj.objType = DO_PUBLICATION;
3507  pubinfo[i].dobj.catId.tableoid =
3508  atooid(PQgetvalue(res, i, i_tableoid));
3509  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3510  AssignDumpId(&pubinfo[i].dobj);
3511  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3512  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3513  pubinfo[i].puballtables =
3514  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3515  pubinfo[i].pubinsert =
3516  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3517  pubinfo[i].pubupdate =
3518  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3519  pubinfo[i].pubdelete =
3520  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3521 
3522  if (strlen(pubinfo[i].rolname) == 0)
3523  write_msg(NULL, "WARNING: owner of publication \"%s\" appears to be invalid\n",
3524  pubinfo[i].dobj.name);
3525 
3526  /* Decide whether we want to dump it */
3527  selectDumpableObject(&(pubinfo[i].dobj), fout);
3528  }
3529  PQclear(res);
3530 
3531  destroyPQExpBuffer(query);
3532 }
3533 
3534 /*
3535  * dumpPublication
3536  * dump the definition of the given publication
3537  */
3538 static void
3540 {
3541  PQExpBuffer delq;
3542  PQExpBuffer query;
3543  PQExpBuffer labelq;
3544  bool first = true;
3545 
3546  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3547  return;
3548 
3549  delq = createPQExpBuffer();
3550  query = createPQExpBuffer();
3551  labelq = createPQExpBuffer();
3552 
3553  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3554  fmtId(pubinfo->dobj.name));
3555 
3556  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3557  fmtId(pubinfo->dobj.name));
3558 
3559  appendPQExpBuffer(labelq, "PUBLICATION %s", fmtId(pubinfo->dobj.name));
3560 
3561  if (pubinfo->puballtables)
3562  appendPQExpBufferStr(query, " FOR ALL TABLES");
3563 
3564  appendPQExpBufferStr(query, " WITH (publish = '");
3565  if (pubinfo->pubinsert)
3566  {
3567  appendPQExpBufferStr(query, "insert");
3568  first = false;
3569  }
3570 
3571  if (pubinfo->pubupdate)
3572  {
3573  if (!first)
3574  appendPQExpBufferStr(query, ", ");
3575 
3576  appendPQExpBufferStr(query, "update");
3577  first = false;
3578  }
3579 
3580  if (pubinfo->pubdelete)
3581  {
3582  if (!first)
3583  appendPQExpBufferStr(query, ", ");
3584 
3585  appendPQExpBufferStr(query, "delete");
3586  first = false;
3587  }
3588 
3589  appendPQExpBufferStr(query, "');\n");
3590 
3591  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
3592  pubinfo->dobj.name,
3593  NULL,
3594  NULL,
3595  pubinfo->rolname, false,
3596  "PUBLICATION", SECTION_POST_DATA,
3597  query->data, delq->data, NULL,
3598  NULL, 0,
3599  NULL, NULL);
3600 
3601  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3602  dumpComment(fout, labelq->data,
3603  NULL, pubinfo->rolname,
3604  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3605 
3606  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3607  dumpSecLabel(fout, labelq->data,
3608  NULL, pubinfo->rolname,
3609  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3610 
3611  destroyPQExpBuffer(delq);
3612  destroyPQExpBuffer(query);
3613 }
3614 
3615 /*
3616  * getPublicationTables
3617  * get information about publication membership for dumpable tables.
3618  */
3619 void
3621 {
3622  PQExpBuffer query;
3623  PGresult *res;
3624  PublicationRelInfo *pubrinfo;
3625  int i_tableoid;
3626  int i_oid;
3627  int i_pubname;
3628  int i,
3629  j,
3630  ntups;
3631 
3632  if (fout->remoteVersion < 100000)
3633  return;
3634 
3635  query = createPQExpBuffer();
3636 
3637  /* Make sure we are in proper schema */
3638  selectSourceSchema(fout, "pg_catalog");
3639 
3640  for (i = 0; i < numTables; i++)
3641  {
3642  TableInfo *tbinfo = &tblinfo[i];
3643 
3644  /* Only plain tables can be aded to publications. */
3645  if (tbinfo->relkind != RELKIND_RELATION)
3646  continue;
3647 
3648  /*
3649  * Ignore publication membership of tables whose definitions are not
3650  * to be dumped.
3651  */
3652  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3653  continue;
3654 
3655  if (g_verbose)
3656  write_msg(NULL, "reading publication membership for table \"%s.%s\"\n",
3657  tbinfo->dobj.namespace->dobj.name,
3658  tbinfo->dobj.name);
3659 
3660  resetPQExpBuffer(query);
3661 
3662  /* Get the publication membership for the table. */
3663  appendPQExpBuffer(query,
3664  "SELECT pr.tableoid, pr.oid, p.pubname "
3665  "FROM pg_publication_rel pr, pg_publication p "
3666  "WHERE pr.prrelid = '%u'"
3667  " AND p.oid = pr.prpubid",
3668  tbinfo->dobj.catId.oid);
3669  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3670 
3671  ntups = PQntuples(res);
3672 
3673  if (ntups == 0)
3674  {
3675  /*
3676  * Table is not member of any publications. Clean up and return.
3677  */
3678  PQclear(res);
3679  continue;
3680  }
3681 
3682  i_tableoid = PQfnumber(res, "tableoid");
3683  i_oid = PQfnumber(res, "oid");
3684  i_pubname = PQfnumber(res, "pubname");
3685 
3686  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
3687 
3688  for (j = 0; j < ntups; j++)
3689  {
3690  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
3691  pubrinfo[j].dobj.catId.tableoid =
3692  atooid(PQgetvalue(res, j, i_tableoid));
3693  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3694  AssignDumpId(&pubrinfo[j].dobj);
3695  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3696  pubrinfo[j].dobj.name = tbinfo->dobj.name;
3697  pubrinfo[j].pubname = pg_strdup(PQgetvalue(res, j, i_pubname));
3698  pubrinfo[j].pubtable = tbinfo;
3699 
3700  /* Decide whether we want to dump it */
3701  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
3702  }
3703  PQclear(res);
3704  }
3705  destroyPQExpBuffer(query);
3706 }
3707 
3708 /*
3709  * dumpPublicationTable
3710  * dump the definition of the given publication table mapping
3711  */
3712 static void
3714 {
3715  TableInfo *tbinfo = pubrinfo->pubtable;
3716  PQExpBuffer query;
3717  char *tag;
3718 
3719  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3720  return;
3721 
3722  tag = psprintf("%s %s", pubrinfo->pubname, tbinfo->dobj.name);
3723 
3724  query = createPQExpBuffer();
3725 
3726  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
3727  fmtId(pubrinfo->pubname));
3728  appendPQExpBuffer(query, " %s;",
3729  fmtId(tbinfo->dobj.name));
3730 
3731  /*
3732  * There is no point in creating drop query as drop query as the drop is
3733  * done by table drop.
3734  */
3735  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
3736  tag,
3737  tbinfo->dobj.namespace->dobj.name,
3738  NULL,
3739  "", false,
3740  "PUBLICATION TABLE", SECTION_POST_DATA,
3741  query->data, "", NULL,
3742  NULL, 0,
3743  NULL, NULL);
3744 
3745  free(tag);
3746  destroyPQExpBuffer(query);
3747 }
3748 
3749 /*
3750  * Is the currently connected user a superuser?
3751  */
3752 static bool
3754 {
3755  ArchiveHandle *AH = (ArchiveHandle *) fout;
3756  const char *val;
3757 
3758  val = PQparameterStatus(AH->connection, "is_superuser");
3759 
3760  if (val && strcmp(val, "on") == 0)
3761  return true;
3762 
3763  return false;
3764 }
3765 
3766 /*
3767  * getSubscriptions
3768  * get information about subscriptions
3769  */
3770 void
3772 {
3773  DumpOptions *dopt = fout->dopt;
3774  PQExpBuffer query;
3775  PGresult *res;
3776  SubscriptionInfo *subinfo;
3777  int i_tableoid;
3778  int i_oid;
3779  int i_subname;
3780  int i_rolname;
3781  int i_subconninfo;
3782  int i_subslotname;
3783  int i_subsynccommit;
3784  int i_subpublications;
3785  int i,
3786  ntups;
3787 
3788  if (dopt->no_subscriptions || fout->remoteVersion < 100000)
3789  return;
3790 
3791  /* Make sure we are in proper schema */
3792  selectSourceSchema(fout, "pg_catalog");
3793 
3794  if (!is_superuser(fout))
3795  {
3796  int n;
3797 
3798  res = ExecuteSqlQuery(fout,
3799  "SELECT count(*) FROM pg_subscription "
3800  "WHERE subdbid = (SELECT oid FROM pg_database"
3801  " WHERE datname = current_database())",
3802  PGRES_TUPLES_OK);
3803  n = atoi(PQgetvalue(res, 0, 0));
3804  if (n > 0)
3805  write_msg(NULL, "WARNING: subscriptions not dumped because current user is not a superuser\n");
3806  PQclear(res);
3807  return;
3808  }
3809 
3810  query = createPQExpBuffer();
3811 
3812  resetPQExpBuffer(query);
3813 
3814  /* Get the subscriptions in current database. */
3815  appendPQExpBuffer(query,
3816  "SELECT s.tableoid, s.oid, s.subname,"
3817  "(%s s.subowner) AS rolname, "
3818  " s.subconninfo, s.subslotname, s.subsynccommit, "
3819  " s.subpublications "
3820  "FROM pg_subscription s "
3821  "WHERE s.subdbid = (SELECT oid FROM pg_database"
3822  " WHERE datname = current_database())",
3824  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3825 
3826  ntups = PQntuples(res);
3827 
3828  i_tableoid = PQfnumber(res, "tableoid");
3829  i_oid = PQfnumber(res, "oid");
3830  i_subname = PQfnumber(res, "subname");
3831  i_rolname = PQfnumber(res, "rolname");
3832  i_subconninfo = PQfnumber(res, "subconninfo");
3833  i_subslotname = PQfnumber(res, "subslotname");
3834  i_subsynccommit = PQfnumber(res, "subsynccommit");
3835  i_subpublications = PQfnumber(res, "subpublications");
3836 
3837  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
3838 
3839  for (i = 0; i < ntups; i++)
3840  {
3841  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
3842  subinfo[i].dobj.catId.tableoid =
3843  atooid(PQgetvalue(res, i, i_tableoid));
3844  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3845  AssignDumpId(&subinfo[i].dobj);
3846  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
3847  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3848  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
3849  if (PQgetisnull(res, i, i_subslotname))
3850  subinfo[i].subslotname = NULL;
3851  else
3852  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
3853  subinfo[i].subsynccommit =
3854  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
3855  subinfo[i].subpublications =
3856  pg_strdup(PQgetvalue(res, i, i_subpublications));
3857 
3858  if (strlen(subinfo[i].rolname) == 0)
3859  write_msg(NULL, "WARNING: owner of subscription \"%s\" appears to be invalid\n",
3860  subinfo[i].dobj.name);
3861 
3862  /* Decide whether we want to dump it */
3863  selectDumpableObject(&(subinfo[i].dobj), fout);
3864  }
3865  PQclear(res);
3866 
3867  destroyPQExpBuffer(query);
3868 }
3869 
3870 /*
3871  * dumpSubscription
3872  * dump the definition of the given subscription
3873  */
3874 static void
3876 {
3877  PQExpBuffer delq;
3878  PQExpBuffer query;
3879  PQExpBuffer labelq;
3880  PQExpBuffer publications;
3881  char **pubnames = NULL;
3882  int npubnames = 0;
3883  int i;
3884 
3885  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3886  return;
3887 
3888  delq = createPQExpBuffer();
3889  query = createPQExpBuffer();
3890  labelq = createPQExpBuffer();
3891 
3892  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
3893  fmtId(subinfo->dobj.name));
3894 
3895  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
3896  fmtId(subinfo->dobj.name));
3897  appendStringLiteralAH(query, subinfo->subconninfo, fout);
3898 
3899  /* Build list of quoted publications and append them to query. */
3900  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
3901  {
3902  write_msg(NULL,
3903  "WARNING: could not parse subpublications array\n");
3904  if (pubnames)
3905  free(pubnames);
3906  pubnames = NULL;
3907  npubnames = 0;
3908  }
3909 
3910  publications = createPQExpBuffer();
3911  for (i = 0; i < npubnames; i++)
3912  {
3913  if (i > 0)
3914  appendPQExpBufferStr(publications, ", ");
3915 
3916  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
3917  }
3918 
3919  appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
3920  if (subinfo->subslotname)
3921  appendStringLiteralAH(query, subinfo->subslotname, fout);
3922  else
3923  appendPQExpBufferStr(query, "NONE");
3924 
3925  if (strcmp(subinfo->subsynccommit, "off") != 0)
3926  appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
3927 
3928  appendPQExpBufferStr(query, ");\n");
3929 
3930  appendPQExpBuffer(labelq, "SUBSCRIPTION %s", fmtId(subinfo->dobj.name));
3931 
3932  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
3933  subinfo->dobj.name,
3934  NULL,
3935  NULL,
3936  subinfo->rolname, false,
3937  "SUBSCRIPTION", SECTION_POST_DATA,
3938  query->data, delq->data, NULL,
3939  NULL, 0,
3940  NULL, NULL);
3941 
3942  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3943  dumpComment(fout, labelq->data,
3944  NULL, subinfo->rolname,
3945  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
3946 
3947  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3948  dumpSecLabel(fout, labelq->data,
3949  NULL, subinfo->rolname,
3950  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
3951 
3952  destroyPQExpBuffer(publications);
3953  if (pubnames)
3954  free(pubnames);
3955 
3956  destroyPQExpBuffer(delq);
3957  destroyPQExpBuffer(query);
3958 }
3959 
3960 static void
3962  PQExpBuffer upgrade_buffer,
3963  Oid pg_type_oid,
3964  bool force_array_type)
3965 {
3966  PQExpBuffer upgrade_query = createPQExpBuffer();
3967  PGresult *res;
3968  Oid pg_type_array_oid;
3969 
3970  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
3971  appendPQExpBuffer(upgrade_buffer,
3972  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
3973  pg_type_oid);
3974 
3975  /* we only support old >= 8.3 for binary upgrades */
3976  appendPQExpBuffer(upgrade_query,
3977  "SELECT typarray "
3978  "FROM pg_catalog.pg_type "
3979  "WHERE oid = '%u'::pg_catalog.oid;",
3980  pg_type_oid);
3981 
3982  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
3983 
3984  pg_type_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
3985 
3986  PQclear(res);
3987 
3988  if (!OidIsValid(pg_type_array_oid) && force_array_type)
3989  {
3990  /*
3991  * If the old version didn't assign an array type, but the new version
3992  * does, we must select an unused type OID to assign. This currently
3993  * only happens for domains, when upgrading pre-v11 to v11 and up.
3994  *
3995  * Note: local state here is kind of ugly, but we must have some,
3996  * since we mustn't choose the same unused OID more than once.
3997  */
3998  static Oid next_possible_free_oid = FirstNormalObjectId;
3999  bool is_dup;
4000 
4001  do
4002  {
4003  ++next_possible_free_oid;
4004  printfPQExpBuffer(upgrade_query,
4005  "SELECT EXISTS(SELECT 1 "
4006  "FROM pg_catalog.pg_type "
4007  "WHERE oid = '%u'::pg_catalog.oid);",
4008  next_possible_free_oid);
4009  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4010  is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
4011  PQclear(res);
4012  } while (is_dup);
4013 
4014  pg_type_array_oid = next_possible_free_oid;
4015  }
4016 
4017  if (OidIsValid(pg_type_array_oid))
4018  {
4019  appendPQExpBufferStr(upgrade_buffer,
4020  "\n-- For binary upgrade, must preserve pg_type array oid\n");
4021  appendPQExpBuffer(upgrade_buffer,
4022  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4023  pg_type_array_oid);
4024  }
4025 
4026  destroyPQExpBuffer(upgrade_query);
4027 }
4028 
4029 static bool
4031  PQExpBuffer upgrade_buffer,
4032  Oid pg_rel_oid)
4033 {
4034  PQExpBuffer upgrade_query = createPQExpBuffer();
4035  PGresult *upgrade_res;
4036  Oid pg_type_oid;
4037  bool toast_set = false;
4038 
4039  /* we only support old >= 8.3 for binary upgrades */
4040  appendPQExpBuffer(upgrade_query,
4041  "SELECT c.reltype AS crel, t.reltype AS trel "
4042  "FROM pg_catalog.pg_class c "
4043  "LEFT JOIN pg_catalog.pg_class t ON "
4044  " (c.reltoastrelid = t.oid) "
4045  "WHERE c.oid = '%u'::pg_catalog.oid;",
4046  pg_rel_oid);
4047 
4048  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4049 
4050  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
4051 
4052  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
4053  pg_type_oid, false);
4054 
4055  if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
4056  {
4057  /* Toast tables do not have pg_type array rows */
4058  Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
4059  PQfnumber(upgrade_res, "trel")));
4060 
4061  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
4062  appendPQExpBuffer(upgrade_buffer,
4063  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4064  pg_type_toast_oid);
4065 
4066  toast_set = true;
4067  }
4068 
4069  PQclear(upgrade_res);
4070  destroyPQExpBuffer(upgrade_query);
4071 
4072  return toast_set;
4073 }
4074 
4075 static void
4077  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
4078  bool is_index)
4079 {
4080  PQExpBuffer upgrade_query = createPQExpBuffer();
4081  PGresult *upgrade_res;
4082  Oid pg_class_reltoastrelid;
4083  Oid pg_index_indexrelid;
4084 
4085  appendPQExpBuffer(upgrade_query,
4086  "SELECT c.reltoastrelid, i.indexrelid "
4087  "FROM pg_catalog.pg_class c LEFT JOIN "
4088  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
4089  "WHERE c.oid = '%u'::pg_catalog.oid;",
4090  pg_class_oid);
4091 
4092  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4093 
4094  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastrelid")));
4095  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
4096 
4097  appendPQExpBufferStr(upgrade_buffer,
4098  "\n-- For binary upgrade, must preserve pg_class oids\n");
4099 
4100  if (!is_index)
4101  {
4102  appendPQExpBuffer(upgrade_buffer,
4103  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
4104  pg_class_oid);
4105  /* only tables have toast tables, not indexes */
4106  if (OidIsValid(pg_class_reltoastrelid))
4107  {
4108  /*
4109  * One complexity is that the table definition might not require
4110  * the creation of a TOAST table, and the TOAST table might have
4111  * been created long after table creation, when the table was
4112  * loaded with wide data. By setting the TOAST oid we force
4113  * creation of the TOAST heap and TOAST index by the backend so we
4114  * can cleanly copy the files during binary upgrade.
4115  */
4116 
4117  appendPQExpBuffer(upgrade_buffer,
4118  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
4119  pg_class_reltoastrelid);
4120 
4121  /* every toast table has an index */
4122  appendPQExpBuffer(upgrade_buffer,
4123  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4124  pg_index_indexrelid);
4125  }
4126  }
4127  else
4128  appendPQExpBuffer(upgrade_buffer,
4129  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4130  pg_class_oid);
4131 
4132  appendPQExpBufferChar(upgrade_buffer, '\n');
4133 
4134  PQclear(upgrade_res);
4135  destroyPQExpBuffer(upgrade_query);
4136 }
4137 
4138 /*
4139  * If the DumpableObject is a member of an extension, add a suitable
4140  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4141  */
4142 static void
4144  DumpableObject *dobj,
4145  const char *objlabel)
4146 {
4147  DumpableObject *extobj = NULL;
4148  int i;
4149 
4150  if (!dobj->ext_member)
4151  return;
4152 
4153  /*
4154  * Find the parent extension. We could avoid this search if we wanted to
4155  * add a link field to DumpableObject, but the space costs of that would
4156  * be considerable. We assume that member objects could only have a
4157  * direct dependency on their own extension, not any others.
4158  */
4159  for (i = 0; i < dobj->nDeps; i++)
4160  {
4161  extobj = findObjectByDumpId(dobj->dependencies[i]);
4162  if (extobj && extobj->objType == DO_EXTENSION)
4163  break;
4164  extobj = NULL;
4165  }
4166  if (extobj == NULL)
4167  exit_horribly(NULL, "could not find parent extension for %s\n", objlabel);
4168 
4169  appendPQExpBufferStr(upgrade_buffer,
4170  "\n-- For binary upgrade, handle extension membership the hard way\n");
4171  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s;\n",
4172  fmtId(extobj->name),
4173  objlabel);
4174 }
4175 
4176 /*
4177  * getNamespaces:
4178  * read all namespaces in the system catalogs and return them in the
4179  * NamespaceInfo* structure
4180  *
4181  * numNamespaces is set to the number of namespaces read in
4182  */
4183 NamespaceInfo *
4185 {
4186  DumpOptions *dopt = fout->dopt;
4187  PGresult *res;
4188  int ntups;
4189  int i;
4190  PQExpBuffer query;
4191  NamespaceInfo *nsinfo;
4192  int i_tableoid;
4193  int i_oid;
4194  int i_nspname;
4195  int i_rolname;
4196  int i_nspacl;
4197  int i_rnspacl;
4198  int i_initnspacl;
4199  int i_initrnspacl;
4200 
4201  query = createPQExpBuffer();
4202 
4203  /* Make sure we are in proper schema */
4204  selectSourceSchema(fout, "pg_catalog");
4205 
4206  /*
4207  * we fetch all namespaces including system ones, so that every object we
4208  * read in can be linked to a containing namespace.
4209  */
4210  if (fout->remoteVersion >= 90600)
4211  {
4212  PQExpBuffer acl_subquery = createPQExpBuffer();
4213  PQExpBuffer racl_subquery = createPQExpBuffer();
4214  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4215  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4216 
4217  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4218  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4219  dopt->binary_upgrade);
4220 
4221  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4222  "(%s nspowner) AS rolname, "
4223  "%s as nspacl, "
4224  "%s as rnspacl, "
4225  "%s as initnspacl, "
4226  "%s as initrnspacl "
4227  "FROM pg_namespace n "
4228  "LEFT JOIN pg_init_privs pip "
4229  "ON (n.oid = pip.objoid "
4230  "AND pip.classoid = 'pg_namespace'::regclass "
4231  "AND pip.objsubid = 0",
4233  acl_subquery->data,
4234  racl_subquery->data,
4235  init_acl_subquery->data,
4236  init_racl_subquery->data);
4237 
4238  /*
4239  * When we are doing a 'clean' run, we will be dropping and recreating
4240  * the 'public' schema (the only object which has that kind of
4241  * treatment in the backend and which has an entry in pg_init_privs)
4242  * and therefore we should not consider any initial privileges in
4243  * pg_init_privs in that case.
4244  *
4245  * See pg_backup_archiver.c:_printTocEntry() for the details on why
4246  * the public schema is special in this regard.
4247  *
4248  * Note that if the public schema is dropped and re-created, this is
4249  * essentially a no-op because the new public schema won't have an
4250  * entry in pg_init_privs anyway, as the entry will be removed when
4251  * the public schema is dropped.
4252  *
4253  * Further, we have to handle the case where the public schema does
4254  * not exist at all.
4255  */
4256  if (dopt->outputClean)
4257  appendPQExpBuffer(query, " AND pip.objoid <> "
4258  "coalesce((select oid from pg_namespace "
4259  "where nspname = 'public'),0)");
4260 
4261  appendPQExpBuffer(query, ") ");
4262 
4263  destroyPQExpBuffer(acl_subquery);
4264  destroyPQExpBuffer(racl_subquery);
4265  destroyPQExpBuffer(init_acl_subquery);
4266  destroyPQExpBuffer(init_racl_subquery);
4267  }
4268  else
4269  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4270  "(%s nspowner) AS rolname, "
4271  "nspacl, NULL as rnspacl, "
4272  "NULL AS initnspacl, NULL as initrnspacl "
4273  "FROM pg_namespace",
4275 
4276  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4277 
4278  ntups = PQntuples(res);
4279 
4280  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4281 
4282  i_tableoid = PQfnumber(res, "tableoid");
4283  i_oid = PQfnumber(res, "oid");
4284  i_nspname = PQfnumber(res, "nspname");
4285  i_rolname = PQfnumber(res, "rolname");
4286  i_nspacl = PQfnumber(res, "nspacl");
4287  i_rnspacl = PQfnumber(res, "rnspacl");
4288  i_initnspacl = PQfnumber(res, "initnspacl");
4289  i_initrnspacl = PQfnumber(res, "initrnspacl");
4290 
4291  for (i = 0; i < ntups; i++)
4292  {
4293  nsinfo[i].dobj.objType = DO_NAMESPACE;
4294  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4295  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4296  AssignDumpId(&nsinfo[i].dobj);
4297  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4298  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4299  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4300  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4301  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4302  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4303 
4304  /* Decide whether to dump this namespace */
4305  selectDumpableNamespace(&nsinfo[i], fout);
4306 
4307  /*
4308  * Do not try to dump ACL if the ACL is empty or the default.
4309  *
4310  * This is useful because, for some schemas/objects, the only
4311  * component we are going to try and dump is the ACL and if we can
4312  * remove that then 'dump' goes to zero/false and we don't consider
4313  * this object for dumping at all later on.
4314  */
4315  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4316  PQgetisnull(res, i, i_initnspacl) &&
4317  PQgetisnull(res, i, i_initrnspacl))
4318  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4319 
4320  if (strlen(nsinfo[i].rolname) == 0)
4321  write_msg(NULL, "WARNING: owner of schema \"%s\" appears to be invalid\n",
4322  nsinfo[i].dobj.name);
4323  }
4324 
4325  PQclear(res);
4326  destroyPQExpBuffer(query);
4327 
4328  *numNamespaces = ntups;
4329 
4330  return nsinfo;
4331 }
4332 
4333 /*
4334  * findNamespace:
4335  * given a namespace OID, look up the info read by getNamespaces
4336  */
4337 static NamespaceInfo *
4339 {
4340  NamespaceInfo *nsinfo;
4341 
4342  nsinfo = findNamespaceByOid(nsoid);
4343  if (nsinfo == NULL)
4344  exit_horribly(NULL, "schema with OID %u does not exist\n", nsoid);
4345  return nsinfo;
4346 }
4347 
4348 /*
4349  * getExtensions:
4350  * read all extensions in the system catalogs and return them in the
4351  * ExtensionInfo* structure
4352  *
4353  * numExtensions is set to the number of extensions read in
4354  */
4355 ExtensionInfo *
4357 {
4358  DumpOptions *dopt = fout->dopt;
4359  PGresult *res;
4360  int ntups;
4361  int i;
4362  PQExpBuffer query;
4363  ExtensionInfo *extinfo;
4364  int i_tableoid;
4365  int i_oid;
4366  int i_extname;
4367  int i_nspname;
4368  int i_extrelocatable;
4369  int i_extversion;
4370  int i_extconfig;
4371  int i_extcondition;
4372 
4373  /*
4374  * Before 9.1, there are no extensions.
4375  */
4376  if (fout->remoteVersion < 90100)
4377  {
4378  *numExtensions = 0;
4379  return NULL;
4380  }
4381 
4382  query = createPQExpBuffer();
4383 
4384  /* Make sure we are in proper schema */
4385  selectSourceSchema(fout, "pg_catalog");
4386 
4387  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4388  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4389  "FROM pg_extension x "
4390  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4391 
4392  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4393 
4394  ntups = PQntuples(res);
4395 
4396  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4397 
4398  i_tableoid = PQfnumber(res, "tableoid");
4399  i_oid = PQfnumber(res, "oid");
4400  i_extname = PQfnumber(res, "extname");
4401  i_nspname = PQfnumber(res, "nspname");
4402  i_extrelocatable = PQfnumber(res, "extrelocatable");
4403  i_extversion = PQfnumber(res, "extversion");
4404  i_extconfig = PQfnumber(res, "extconfig");
4405  i_extcondition = PQfnumber(res, "extcondition");
4406 
4407  for (i = 0; i < ntups; i++)
4408  {
4409  extinfo[i].dobj.objType = DO_EXTENSION;
4410  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4411  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4412  AssignDumpId(&extinfo[i].dobj);
4413  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4414  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4415  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4416  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4417  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4418  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4419 
4420  /* Decide whether we want to dump it */
4421  selectDumpableExtension(&(extinfo[i]), dopt);
4422  }
4423 
4424  PQclear(res);
4425  destroyPQExpBuffer(query);
4426 
4427  *numExtensions = ntups;
4428 
4429  return extinfo;
4430 }
4431 
4432 /*
4433  * getTypes:
4434  * read all types in the system catalogs and return them in the
4435  * TypeInfo* structure
4436  *
4437  * numTypes is set to the number of types read in
4438  *
4439  * NB: this must run after getFuncs() because we assume we can do
4440  * findFuncByOid().
4441  */
4442 TypeInfo *
4444 {
4445  DumpOptions *dopt = fout->dopt;
4446  PGresult *res;
4447  int ntups;
4448  int i;
4449  PQExpBuffer query = createPQExpBuffer();
4450  TypeInfo *tyinfo;
4451  ShellTypeInfo *stinfo;
4452  int i_tableoid;
4453  int i_oid;
4454  int i_typname;
4455  int i_typnamespace;
4456  int i_typacl;
4457  int i_rtypacl;
4458  int i_inittypacl;
4459  int i_initrtypacl;
4460  int i_rolname;
4461  int i_typelem;
4462  int i_typrelid;
4463  int i_typrelkind;
4464  int i_typtype;
4465  int i_typisdefined;
4466  int i_isarray;
4467 
4468  /*
4469  * we include even the built-in types because those may be used as array
4470  * elements by user-defined types
4471  *
4472  * we filter out the built-in types when we dump out the types
4473  *
4474  * same approach for undefined (shell) types and array types
4475  *
4476  * Note: as of 8.3 we can reliably detect whether a type is an
4477  * auto-generated array type by checking the element type's typarray.
4478  * (Before that the test is capable of generating false positives.) We
4479  * still check for name beginning with '_', though, so as to avoid the
4480  * cost of the subselect probe for all standard types. This would have to
4481  * be revisited if the backend ever allows renaming of array types.
4482  */
4483 
4484  /* Make sure we are in proper schema */
4485  selectSourceSchema(fout, "pg_catalog");
4486 
4487  if (fout->remoteVersion >= 90600)
4488  {
4489  PQExpBuffer acl_subquery = createPQExpBuffer();
4490  PQExpBuffer racl_subquery = createPQExpBuffer();
4491  PQExpBuffer initacl_subquery = createPQExpBuffer();
4492  PQExpBuffer initracl_subquery = createPQExpBuffer();
4493 
4494  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
4495  initracl_subquery, "t.typacl", "t.typowner", "'T'",
4496  dopt->binary_upgrade);
4497 
4498  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
4499  "t.typnamespace, "
4500  "%s AS typacl, "
4501  "%s AS rtypacl, "
4502  "%s AS inittypacl, "
4503  "%s AS initrtypacl, "
4504  "(%s t.typowner) AS rolname, "
4505  "t.typelem, t.typrelid, "
4506  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
4507  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
4508  "t.typtype, t.typisdefined, "
4509  "t.typname[0] = '_' AND t.typelem != 0 AND "
4510  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
4511  "FROM pg_type t "
4512  "LEFT JOIN pg_init_privs pip ON "
4513  "(t.oid = pip.objoid "
4514  "AND pip.classoid = 'pg_type'::regclass "
4515  "AND pip.objsubid = 0) ",
4516  acl_subquery->data,
4517  racl_subquery->data,
4518  initacl_subquery->data,
4519  initracl_subquery->data,
4521 
4522  destroyPQExpBuffer(acl_subquery);
4523  destroyPQExpBuffer(racl_subquery);
4524  destroyPQExpBuffer(initacl_subquery);
4525  destroyPQExpBuffer(initracl_subquery);
4526  }
4527  else if (fout->remoteVersion >= 90200)
4528  {
4529  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4530  "typnamespace, typacl, NULL as rtypacl, "
4531  "NULL AS inittypacl, NULL AS initrtypacl, "
4532  "(%s typowner) AS rolname, "
4533  "typelem, typrelid, "
4534  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4535  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4536  "typtype, typisdefined, "
4537  "typname[0] = '_' AND typelem != 0 AND "
4538  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4539  "FROM pg_type",
4541  }
4542  else if (fout->remoteVersion >= 80300)
4543  {
4544  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4545  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4546  "NULL AS inittypacl, NULL AS initrtypacl, "
4547  "(%s typowner) AS rolname, "
4548  "typelem, typrelid, "
4549  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4550  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4551  "typtype, typisdefined, "
4552  "typname[0] = '_' AND typelem != 0 AND "
4553  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4554  "FROM pg_type",
4556  }
4557  else
4558  {
4559  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4560  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4561  "NULL AS inittypacl, NULL AS initrtypacl, "
4562  "(%s typowner) AS rolname, "
4563  "typelem, typrelid, "
4564  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4565  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4566  "typtype, typisdefined, "
4567  "typname[0] = '_' AND typelem != 0 AS isarray "
4568  "FROM pg_type",
4570  }
4571 
4572  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4573 
4574  ntups = PQntuples(res);
4575 
4576  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
4577 
4578  i_tableoid = PQfnumber(res, "tableoid");
4579  i_oid = PQfnumber(res, "oid");
4580  i_typname = PQfnumber(res, "typname");
4581  i_typnamespace = PQfnumber(res, "typnamespace");
4582  i_typacl = PQfnumber(res, "typacl");
4583  i_rtypacl = PQfnumber(res, "rtypacl");
4584  i_inittypacl = PQfnumber(res, "inittypacl");
4585  i_initrtypacl = PQfnumber(res, "initrtypacl");
4586  i_rolname = PQfnumber(res, "rolname");
4587  i_typelem = PQfnumber(res, "typelem");
4588  i_typrelid = PQfnumber(res, "typrelid");
4589  i_typrelkind = PQfnumber(res, "typrelkind");
4590  i_typtype = PQfnumber(res, "typtype");
4591  i_typisdefined = PQfnumber(res, "typisdefined");
4592  i_isarray = PQfnumber(res, "isarray");
4593 
4594  for (i = 0; i < ntups; i++)
4595  {
4596  tyinfo[i].dobj.objType = DO_TYPE;
4597  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4598  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4599  AssignDumpId(&tyinfo[i].dobj);
4600  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
4601  tyinfo[i].dobj.namespace =
4602  findNamespace(fout,
4603  atooid(PQgetvalue(res, i, i_typnamespace)));
4604  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4605  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
4606  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
4607  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
4608  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
4609  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
4610  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
4611  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
4612  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
4613  tyinfo[i].shellType = NULL;
4614 
4615  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
4616  tyinfo[i].isDefined = true;
4617  else
4618  tyinfo[i].isDefined = false;
4619 
4620  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
4621  tyinfo[i].isArray = true;
4622  else
4623  tyinfo[i].isArray = false;
4624 
4625  /* Decide whether we want to dump it */
4626  selectDumpableType(&tyinfo[i], fout);
4627 
4628  /* Do not try to dump ACL if no ACL exists. */
4629  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
4630  PQgetisnull(res, i, i_inittypacl) &&
4631  PQgetisnull(res, i, i_initrtypacl))
4632  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4633 
4634  /*
4635  * If it's a domain, fetch info about its constraints, if any
4636  */
4637  tyinfo[i].nDomChecks = 0;
4638  tyinfo[i].domChecks = NULL;
4639  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4640  tyinfo[i].typtype == TYPTYPE_DOMAIN)
4641  getDomainConstraints(fout, &(tyinfo[i]));
4642 
4643  /*
4644  * If it's a base type, make a DumpableObject representing a shell
4645  * definition of the type. We will need to dump that ahead of the I/O
4646  * functions for the type. Similarly, range types need a shell
4647  * definition in case they have a canonicalize function.
4648  *
4649  * Note: the shell type doesn't have a catId. You might think it
4650  * should copy the base type's catId, but then it might capture the
4651  * pg_depend entries for the type, which we don't want.
4652  */
4653  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4654  (tyinfo[i].typtype == TYPTYPE_BASE ||
4655  tyinfo[i].typtype == TYPTYPE_RANGE))
4656  {
4657  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
4658  stinfo->dobj.objType = DO_SHELL_TYPE;
4659  stinfo->dobj.catId = nilCatalogId;
4660  AssignDumpId(&stinfo->dobj);
4661  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
4662  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
4663  stinfo->baseType = &(tyinfo[i]);
4664  tyinfo[i].shellType = stinfo;
4665 
4666  /*
4667  * Initially mark the shell type as not to be dumped. We'll only
4668  * dump it if the I/O or canonicalize functions need to be dumped;
4669  * this is taken care of while sorting dependencies.
4670  */
4671  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
4672  }
4673 
4674  if (strlen(tyinfo[i].rolname) == 0)
4675  write_msg(NULL, "WARNING: owner of data type \"%s\" appears to be invalid\n",
4676  tyinfo[i].dobj.name);
4677  }
4678 
4679  *numTypes = ntups;
4680 
4681  PQclear(res);
4682 
4683  destroyPQExpBuffer(query);
4684 
4685  return tyinfo;
4686 }
4687 
4688 /*
4689  * getOperators:
4690  * read all operators in the system catalogs and return them in the
4691  * OprInfo* structure
4692  *
4693  * numOprs is set to the number of operators read in
4694  */
4695 OprInfo *
4696 getOperators(Archive *fout, int *numOprs)
4697 {
4698  PGresult *res;
4699  int ntups;
4700  int i;
4701  PQExpBuffer query = createPQExpBuffer();
4702  OprInfo *oprinfo;
4703  int i_tableoid;
4704  int i_oid;
4705  int i_oprname;
4706  int i_oprnamespace;
4707  int i_rolname;
4708  int i_oprkind;
4709  int i_oprcode;
4710 
4711  /*
4712  * find all operators, including builtin operators; we filter out
4713  * system-defined operators at dump-out time.
4714  */
4715 
4716  /* Make sure we are in proper schema */
4717  selectSourceSchema(fout, "pg_catalog");
4718 
4719  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
4720  "oprnamespace, "
4721  "(%s oprowner) AS rolname, "
4722  "oprkind, "
4723  "oprcode::oid AS oprcode "
4724  "FROM pg_operator",
4726 
4727  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4728 
4729  ntups = PQntuples(res);
4730  *numOprs = ntups;
4731 
4732  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
4733 
4734  i_tableoid = PQfnumber(res, "tableoid");
4735  i_oid = PQfnumber(res, "oid");
4736  i_oprname = PQfnumber(res, "oprname");
4737  i_oprnamespace = PQfnumber(res, "oprnamespace");
4738  i_rolname = PQfnumber(res, "rolname");
4739  i_oprkind = PQfnumber(res, "oprkind");
4740  i_oprcode = PQfnumber(res, "oprcode");
4741 
4742  for (i = 0; i < ntups; i++)
4743  {
4744  oprinfo[i].dobj.objType = DO_OPERATOR;
4745  oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4746  oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4747  AssignDumpId(&oprinfo[i].dobj);
4748  oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
4749  oprinfo[i].dobj.namespace =
4750  findNamespace(fout,
4751  atooid(PQgetvalue(res, i, i_oprnamespace)));
4752  oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4753  oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
4754  oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
4755 
4756  /* Decide whether we want to dump it */
4757  selectDumpableObject(&(oprinfo[i].dobj), fout);
4758 
4759  /* Operators do not currently have ACLs. */
4760  oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4761 
4762  if (strlen(oprinfo[i].rolname) == 0)
4763  write_msg(NULL, "WARNING: owner of operator \"%s\" appears to be invalid\n",
4764  oprinfo[i].dobj.name);
4765  }
4766 
4767  PQclear(res);
4768 
4769  destroyPQExpBuffer(query);
4770 
4771  return oprinfo;
4772 }
4773 
4774 /*
4775  * getCollations:
4776  * read all collations in the system catalogs and return them in the
4777  * CollInfo* structure
4778  *
4779  * numCollations is set to the number of collations read in
4780  */
4781 CollInfo *
4783 {
4784  PGresult *res;
4785  int ntups;
4786  int i;
4787  PQExpBuffer query;
4788  CollInfo *collinfo;
4789  int i_tableoid;
4790  int i_oid;
4791  int i_collname;
4792  int i_collnamespace;
4793  int i_rolname;
4794 
4795  /* Collations didn't exist pre-9.1 */
4796  if (fout->remoteVersion < 90100)
4797  {
4798  *numCollations = 0;
4799  return NULL;
4800  }
4801 
4802  query = createPQExpBuffer();
4803 
4804  /*
4805  * find all collations, including builtin collations; we filter out
4806  * system-defined collations at dump-out time.
4807  */
4808 
4809  /* Make sure we are in proper schema */
4810  selectSourceSchema(fout, "pg_catalog");
4811 
4812  appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
4813  "collnamespace, "
4814  "(%s collowner) AS rolname "
4815  "FROM pg_collation",
4817 
4818  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4819 
4820  ntups = PQntuples(res);
4821  *numCollations = ntups;
4822 
4823  collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
4824 
4825  i_tableoid = PQfnumber(res, "tableoid");
4826  i_oid = PQfnumber(res, "oid");
4827  i_collname = PQfnumber(res, "collname");
4828  i_collnamespace = PQfnumber(res, "collnamespace");
4829  i_rolname = PQfnumber(res, "rolname");
4830 
4831  for (i = 0; i < ntups; i++)
4832  {
4833  collinfo[i].dobj.objType = DO_COLLATION;
4834  collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4835  collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4836  AssignDumpId(&collinfo[i].dobj);
4837  collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
4838  collinfo[i].dobj.namespace =
4839  findNamespace(fout,
4840  atooid(PQgetvalue(res, i, i_collnamespace)));
4841  collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4842 
4843  /* Decide whether we want to dump it */
4844  selectDumpableObject(&(collinfo[i].dobj), fout);
4845 
4846  /* Collations do not currently have ACLs. */
4847  collinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4848  }
4849 
4850  PQclear(res);
4851 
4852  destroyPQExpBuffer(query);
4853 
4854  return collinfo;
4855 }
4856 
4857 /*
4858  * getConversions:
4859  * read all conversions in the system catalogs and return them in the
4860  * ConvInfo* structure
4861  *
4862  * numConversions is set to the number of conversions read in
4863  */
4864 ConvInfo *
4865 getConversions(Archive *fout, int *numConversions)
4866 {
4867  PGresult *res;
4868  int ntups;
4869  int i;
4870  PQExpBuffer query;
4871  ConvInfo *convinfo;
4872  int i_tableoid;
4873  int i_oid;
4874  int i_conname;
4875  int i_connamespace;
4876  int i_rolname;
4877 
4878  query = createPQExpBuffer();
4879 
4880  /*
4881  * find all conversions, including builtin conversions; we filter out
4882  * system-defined conversions at dump-out time.
4883  */
4884 
4885  /* Make sure we are in proper schema */
4886  selectSourceSchema(fout, "pg_catalog");
4887 
4888  appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
4889  "connamespace, "
4890  "(%s conowner) AS rolname "
4891  "FROM pg_conversion",
4893 
4894  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4895 
4896  ntups = PQntuples(res);
4897  *numConversions = ntups;
4898 
4899  convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
4900 
4901  i_tableoid = PQfnumber(res, "tableoid");
4902  i_oid = PQfnumber(res, "oid");
4903  i_conname = PQfnumber(res, "conname");
4904  i_connamespace = PQfnumber(res, "connamespace");
4905  i_rolname = PQfnumber(res, "rolname");
4906 
4907  for (i = 0; i < ntups; i++)
4908  {
4909  convinfo[i].dobj.objType = DO_CONVERSION;
4910  convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4911  convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4912  AssignDumpId(&convinfo[i].dobj);
4913  convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
4914  convinfo[i].dobj.namespace =
4915  findNamespace(fout,
4916  atooid(PQgetvalue(res, i, i_connamespace)));
4917  convinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4918 
4919  /* Decide whether we want to dump it */
4920  selectDumpableObject(&(convinfo[i].dobj), fout);
4921 
4922  /* Conversions do not currently have ACLs. */
4923  convinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4924  }
4925 
4926  PQclear(res);
4927 
4928  destroyPQExpBuffer(query);
4929 
4930  return convinfo;
4931 }
4932 
4933 /*
4934  * getAccessMethods:
4935  * read all user-defined access methods in the system catalogs and return
4936  * them in the AccessMethodInfo* structure
4937  *
4938  * numAccessMethods is set to the number of access methods read in
4939  */
4941 getAccessMethods(Archive *fout, int *numAccessMethods)
4942 {
4943  PGresult *res;
4944  int ntups;
4945  int i;
4946  PQExpBuffer query;
4947  AccessMethodInfo *aminfo;
4948  int i_tableoid;
4949  int i_oid;
4950  int i_amname;
4951  int i_amhandler;
4952  int i_amtype;
4953 
4954  /* Before 9.6, there are no user-defined access methods */
4955  if (fout->remoteVersion < 90600)
4956  {
4957  *numAccessMethods = 0;
4958  return NULL;
4959  }
4960 
4961  query = createPQExpBuffer();
4962 
4963  /* Make sure we are in proper schema */
4964  selectSourceSchema(fout, "pg_catalog");
4965 
4966  /* Select all access methods from pg_am table */
4967  appendPQExpBuffer(query, "SELECT tableoid, oid, amname, amtype, "
4968  "amhandler::pg_catalog.regproc AS amhandler "
4969  "FROM pg_am");
4970 
4971  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4972 
4973  ntups = PQntuples(res);
4974  *numAccessMethods = ntups;
4975 
4976  aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
4977 
4978  i_tableoid = PQfnumber(res, "tableoid");
4979  i_oid = PQfnumber(res, "oid");
4980  i_amname = PQfnumber(res, "amname");
4981  i_amhandler = PQfnumber(res, "amhandler");
4982  i_amtype = PQfnumber(res, "amtype");
4983 
4984  for (i = 0; i < ntups; i++)
4985  {
4986  aminfo[i].dobj.objType = DO_ACCESS_METHOD;
4987  aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4988  aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4989  AssignDumpId(&aminfo[i].dobj);
4990  aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
4991  aminfo[i].dobj.namespace = NULL;
4992  aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
4993  aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
4994 
4995  /* Decide whether we want to dump it */
4996  selectDumpableAccessMethod(&(aminfo[i]), fout);
4997 
4998  /* Access methods do not currently have ACLs. */
4999  aminfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5000  }
5001 
5002  PQclear(res);
5003 
5004  destroyPQExpBuffer(query);
5005 
5006  return aminfo;
5007 }
5008 
5009 
5010 /*
5011  * getOpclasses:
5012  * read all opclasses in the system catalogs and return them in the
5013  * OpclassInfo* structure
5014  *
5015  * numOpclasses is set to the number of opclasses read in
5016  */
5017 OpclassInfo *
5018 getOpclasses(Archive *fout, int *numOpclasses)
5019 {
5020  PGresult *res;
5021  int ntups;
5022  int i;
5023  PQExpBuffer query = createPQExpBuffer();
5024  OpclassInfo *opcinfo;
5025  int i_tableoid;
5026  int i_oid;
5027  int i_opcname;
5028  int i_opcnamespace;
5029  int i_rolname;
5030 
5031  /*
5032  * find all opclasses, including builtin opclasses; we filter out
5033  * system-defined opclasses at dump-out time.
5034  */
5035 
5036  /* Make sure we are in proper schema */
5037  selectSourceSchema(fout, "pg_catalog");
5038 
5039  appendPQExpBuffer(query, "SELECT tableoid, oid, opcname, "
5040  "opcnamespace, "
5041  "(%s opcowner) AS rolname "
5042  "FROM pg_opclass",
5044 
5045  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5046 
5047  ntups = PQntuples(res);
5048  *numOpclasses = ntups;
5049 
5050  opcinfo = (OpclassInfo *) pg_malloc(ntups * sizeof(OpclassInfo));
5051 
5052  i_tableoid = PQfnumber(res, "tableoid");
5053  i_oid = PQfnumber(res, "oid");
5054  i_opcname = PQfnumber(res, "opcname");
5055  i_opcnamespace = PQfnumber(res, "opcnamespace");
5056  i_rolname = PQfnumber(res, "rolname");
5057 
5058  for (i = 0; i < ntups; i++)
5059  {
5060  opcinfo[i].dobj.objType = DO_OPCLASS;
5061  opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5062  opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5063  AssignDumpId(&opcinfo[i].dobj);
5064  opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
5065  opcinfo[i].dobj.namespace =
5066  findNamespace(fout,
5067  atooid(PQgetvalue(res, i, i_opcnamespace)));
5068  opcinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5069 
5070  /* Decide whether we want to dump it */
5071  selectDumpableObject(&(opcinfo[i].dobj), fout);
5072 
5073  /* Op Classes do not currently have ACLs. */
5074  opcinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5075 
5076  if (strlen(opcinfo[i].rolname) == 0)
5077  write_msg(NULL, "WARNING: owner of operator class \"%s\" appears to be invalid\n",
5078  opcinfo[i].dobj.name);
5079  }
5080 
5081  PQclear(res);
5082 
5083  destroyPQExpBuffer(query);
5084 
5085  return opcinfo;
5086 }
5087 
5088 /*
5089  * getOpfamilies:
5090  * read all opfamilies in the system catalogs and return them in the
5091  * OpfamilyInfo* structure
5092  *
5093  * numOpfamilies is set to the number of opfamilies read in
5094  */
5095 OpfamilyInfo *
5096 getOpfamilies(Archive *fout, int *numOpfamilies)
5097 {
5098  PGresult *res;
5099  int ntups;
5100  int i;
5101  PQExpBuffer query;
5102  OpfamilyInfo *opfinfo;
5103  int i_tableoid;
5104  int i_oid;
5105  int i_opfname;
5106  int i_opfnamespace;
5107  int i_rolname;
5108 
5109  /* Before 8.3, there is no separate concept of opfamilies */
5110  if (fout->remoteVersion < 80300)
5111  {
5112  *numOpfamilies = 0;
5113  return NULL;
5114  }
5115 
5116  query = createPQExpBuffer();
5117 
5118  /*
5119  * find all opfamilies, including builtin opfamilies; we filter out
5120  * system-defined opfamilies at dump-out time.
5121  */
5122 
5123  /* Make sure we are in proper schema */
5124  selectSourceSchema(fout, "pg_catalog");
5125 
5126  appendPQExpBuffer(query, "SELECT tableoid, oid, opfname, "
5127  "opfnamespace, "
5128  "(%s opfowner) AS rolname "
5129  "FROM pg_opfamily",
5131 
5132  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5133 
5134  ntups = PQntuples(res);
5135  *numOpfamilies = ntups;
5136 
5137  opfinfo = (OpfamilyInfo *) pg_malloc(ntups * sizeof(OpfamilyInfo));
5138 
5139  i_tableoid = PQfnumber(res, "tableoid");
5140  i_oid = PQfnumber(res, "oid");
5141  i_opfname = PQfnumber(res, "opfname");
5142  i_opfnamespace = PQfnumber(res, "opfnamespace");
5143  i_rolname = PQfnumber(res, "rolname");
5144 
5145  for (i = 0; i < ntups; i++)
5146  {
5147  opfinfo[i].dobj.objType = DO_OPFAMILY;
5148  opfinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5149  opfinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5150  AssignDumpId(&opfinfo[i].dobj);
5151  opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
5152  opfinfo[i].dobj.namespace =
5153  findNamespace(fout,
5154  atooid(PQgetvalue(res, i, i_opfnamespace)));
5155  opfinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5156 
5157  /* Decide whether we want to dump it */
5158  selectDumpableObject(&(opfinfo[i].dobj), fout);
5159 
5160  /* Extensions do not currently have ACLs. */
5161  opfinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5162 
5163  if (strlen(opfinfo[i].rolname) == 0)
5164  write_msg(NULL, "WARNING: owner of operator family \"%s\" appears to be invalid\n",
5165  opfinfo[i].dobj.name);
5166  }
5167 
5168  PQclear(res);
5169 
5170  destroyPQExpBuffer(query);
5171 
5172  return opfinfo;
5173 }
5174 
5175 /*
5176  * getAggregates:
5177  * read all the user-defined aggregates in the system catalogs and
5178  * return them in the AggInfo* structure
5179  *
5180  * numAggs is set to the number of aggregates read in
5181  */
5182 AggInfo *
5183 getAggregates(Archive *fout, int *numAggs)
5184 {
5185  DumpOptions *dopt = fout->dopt;
5186  PGresult *res;
5187  int ntups;
5188  int i;
5189  PQExpBuffer query = createPQExpBuffer();
5190  AggInfo *agginfo;
5191  int i_tableoid;
5192  int i_oid;
5193  int i_aggname;
5194  int i_aggnamespace;
5195  int i_pronargs;
5196  int i_proargtypes;
5197  int i_rolname;
5198  int i_aggacl;
5199  int i_raggacl;
5200  int i_initaggacl;
5201  int i_initraggacl;
5202 
5203  /* Make sure we are in proper schema */
5204  selectSourceSchema(fout, "pg_catalog");
5205 
5206  /*
5207  * Find all interesting aggregates. See comment in getFuncs() for the
5208  * rationale behind the filtering logic.
5209  */
5210  if (fout->remoteVersion >= 90600)
5211  {
5212  PQExpBuffer acl_subquery = createPQExpBuffer();
5213  PQExpBuffer racl_subquery = createPQExpBuffer();
5214  PQExpBuffer initacl_subquery = createPQExpBuffer();
5215  PQExpBuffer initracl_subquery = createPQExpBuffer();
5216 
5217  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
5218  initracl_subquery, "p.proacl", "p.proowner", "'f'",
5219  dopt->binary_upgrade);
5220 
5221  appendPQExpBuffer(query, "SELECT p.tableoid, p.oid, "
5222  "p.proname AS aggname, "
5223  "p.pronamespace AS aggnamespace, "
5224  "p.pronargs, p.proargtypes, "
5225  "(%s p.proowner) AS rolname, "
5226  "%s AS aggacl, "
5227  "%s AS raggacl, "
5228  "%s AS initaggacl, "
5229  "%s AS initraggacl "
5230  "FROM pg_proc p "
5231  "LEFT JOIN pg_init_privs pip ON "
5232  "(p.oid = pip.objoid "
5233  "AND pip.classoid = 'pg_proc'::regclass "
5234  "AND pip.objsubid = 0) "
5235  "WHERE p.proisagg AND ("
5236  "p.pronamespace != "
5237  "(SELECT oid FROM pg_namespace "
5238  "WHERE nspname = 'pg_catalog') OR "
5239  "p.proacl IS DISTINCT FROM pip.initprivs",
5241  acl_subquery->data,
5242  racl_subquery->data,
5243  initacl_subquery->data,
5244  initracl_subquery->data);
5245  if (dopt->binary_upgrade)
5246  appendPQExpBufferStr(query,
5247  " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
5248  "classid = 'pg_proc'::regclass AND "
5249  "objid = p.oid AND "
5250  "refclassid = 'pg_extension'::regclass AND "
5251  "deptype = 'e')");
5252  appendPQExpBufferChar(query, ')');
5253 
5254  destroyPQExpBuffer(acl_subquery);
5255  destroyPQExpBuffer(racl_subquery);
5256  destroyPQExpBuffer(initacl_subquery);
5257  destroyPQExpBuffer(initracl_subquery);
5258  }
5259  else if (fout->remoteVersion >= 80200)
5260  {
5261  appendPQExpBuffer(query, "SELECT tableoid, oid, proname AS aggname, "
5262  "pronamespace AS aggnamespace, "
5263  "pronargs, proargtypes, "
5264  "(%s proowner) AS rolname, "
5265  "proacl AS aggacl, "
5266  "NULL AS raggacl, "
5267  "NULL AS initaggacl, NULL AS initraggacl "
5268  "FROM pg_proc p "
5269  "WHERE proisagg AND ("
5270  "pronamespace != "
5271  "(SELECT oid FROM pg_namespace "
5272  "WHERE nspname = 'pg_catalog')",
5274  if (dopt->binary_upgrade && fout->remoteVersion >= 90100)
5275  appendPQExpBufferStr(query,
5276  " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
5277  "classid = 'pg_proc'::regclass AND "
5278  "objid = p.oid AND "
5279  "refclassid = 'pg_extension'::regclass AND "
5280  "deptype = 'e')");
5281  appendPQExpBufferChar(query, ')');
5282  }
5283  else
5284  {
5285  appendPQExpBuffer(query, "SELECT tableoid, oid, proname AS aggname, "
5286  "pronamespace AS aggnamespace, "
5287  "CASE WHEN proargtypes[0] = 'pg_catalog.\"any\"'::pg_catalog.regtype THEN 0 ELSE 1 END AS pronargs, "
5288  "proargtypes, "
5289  "(%s proowner) AS rolname, "