PostgreSQL Source Code  git master
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #ifdef HAVE_TERMIOS_H
37 #include <termios.h>
38 #endif
39 
40 #include "getopt_long.h"
41 
42 #include "access/attnum.h"
43 #include "access/sysattr.h"
44 #include "access/transam.h"
45 #include "catalog/pg_aggregate_d.h"
46 #include "catalog/pg_am_d.h"
47 #include "catalog/pg_attribute_d.h"
48 #include "catalog/pg_cast_d.h"
49 #include "catalog/pg_class_d.h"
50 #include "catalog/pg_default_acl_d.h"
51 #include "catalog/pg_largeobject_d.h"
52 #include "catalog/pg_largeobject_metadata_d.h"
53 #include "catalog/pg_proc_d.h"
54 #include "catalog/pg_trigger_d.h"
55 #include "catalog/pg_type_d.h"
56 #include "libpq/libpq-fs.h"
57 #include "storage/block.h"
58 
59 #include "dumputils.h"
60 #include "parallel.h"
61 #include "pg_backup_db.h"
62 #include "pg_backup_utils.h"
63 #include "pg_dump.h"
64 #include "fe_utils/connect.h"
65 #include "fe_utils/string_utils.h"
66 
67 
68 typedef struct
69 {
70  const char *descr; /* comment for an object */
71  Oid classoid; /* object class (catalog OID) */
72  Oid objoid; /* object OID */
73  int objsubid; /* subobject (table column #) */
74 } CommentItem;
75 
76 typedef struct
77 {
78  const char *provider; /* label provider of this security label */
79  const char *label; /* security label for an object */
80  Oid classoid; /* object class (catalog OID) */
81  Oid objoid; /* object OID */
82  int objsubid; /* subobject (table column #) */
83 } SecLabelItem;
84 
85 typedef enum OidOptions
86 {
88  zeroAsAny = 2,
91 } OidOptions;
92 
93 /* global decls */
94 bool g_verbose; /* User wants verbose narration of our
95  * activities. */
96 static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
97 
98 /* subquery used to convert user ID (eg, datdba) to user name */
99 static const char *username_subquery;
100 
101 /*
102  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
103  * FirstNormalObjectId - 1.
104  */
105 static Oid g_last_builtin_oid; /* value of the last builtin oid */
106 
107 /* The specified names/patterns should to match at least one entity */
108 static int strict_names = 0;
109 
110 /*
111  * Object inclusion/exclusion lists
112  *
113  * The string lists record the patterns given by command-line switches,
114  * which we then convert to lists of OIDs of matching objects.
115  */
117 static SimpleOidList schema_include_oids = {NULL, NULL};
119 static SimpleOidList schema_exclude_oids = {NULL, NULL};
120 
122 static SimpleOidList table_include_oids = {NULL, NULL};
124 static SimpleOidList table_exclude_oids = {NULL, NULL};
126 static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
127 
128 
129 char g_opaque_type[10]; /* name for the opaque type */
130 
131 /* placeholders for the delimiters for comments */
133 char g_comment_end[10];
134 
135 static const CatalogId nilCatalogId = {0, 0};
136 
137 /*
138  * Macro for producing quoted, schema-qualified name of a dumpable object.
139  */
140 #define fmtQualifiedDumpable(obj) \
141  fmtQualifiedId((obj)->dobj.namespace->dobj.name, \
142  (obj)->dobj.name)
143 
144 static void help(const char *progname);
145 static void setup_connection(Archive *AH,
146  const char *dumpencoding, const char *dumpsnapshot,
147  char *use_role);
148 static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
149 static void expand_schema_name_patterns(Archive *fout,
150  SimpleStringList *patterns,
151  SimpleOidList *oids,
152  bool strict_names);
153 static void expand_table_name_patterns(Archive *fout,
154  SimpleStringList *patterns,
155  SimpleOidList *oids,
156  bool strict_names);
157 static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
158 static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
159 static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
160 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
161 static void dumpComment(Archive *fout, const char *type, const char *name,
162  const char *namespace, const char *owner,
163  CatalogId catalogId, int subid, DumpId dumpId);
164 static int findComments(Archive *fout, Oid classoid, Oid objoid,
165  CommentItem **items);
166 static int collectComments(Archive *fout, CommentItem **items);
167 static void dumpSecLabel(Archive *fout, const char *type, const char *name,
168  const char *namespace, const char *owner,
169  CatalogId catalogId, int subid, DumpId dumpId);
170 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
171  SecLabelItem **items);
172 static int collectSecLabels(Archive *fout, SecLabelItem **items);
173 static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
174 static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
175 static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
176 static void dumpType(Archive *fout, TypeInfo *tyinfo);
177 static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
178 static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
179 static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
180 static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
181 static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
182 static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
183 static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
184 static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
185 static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
186 static void dumpFunc(Archive *fout, FuncInfo *finfo);
187 static void dumpCast(Archive *fout, CastInfo *cast);
188 static void dumpTransform(Archive *fout, TransformInfo *transform);
189 static void dumpOpr(Archive *fout, OprInfo *oprinfo);
190 static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
191 static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
192 static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
193 static void dumpCollation(Archive *fout, CollInfo *collinfo);
194 static void dumpConversion(Archive *fout, ConvInfo *convinfo);
195 static void dumpRule(Archive *fout, RuleInfo *rinfo);
196 static void dumpAgg(Archive *fout, AggInfo *agginfo);
197 static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
198 static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
199 static void dumpTable(Archive *fout, TableInfo *tbinfo);
200 static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
201 static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
202 static void dumpSequence(Archive *fout, TableInfo *tbinfo);
203 static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
204 static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
205 static void dumpIndexAttach(Archive *fout, IndexAttachInfo *attachinfo);
206 static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
207 static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
208 static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
209 static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
210 static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
211 static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
212 static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
213 static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
214 static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
215 static void dumpUserMappings(Archive *fout,
216  const char *servername, const char *namespace,
217  const char *owner, CatalogId catalogId, DumpId dumpId);
218 static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
219 
220 static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
221  const char *type, const char *name, const char *subname,
222  const char *nspname, const char *owner,
223  const char *acls, const char *racls,
224  const char *initacls, const char *initracls);
225 
226 static void getDependencies(Archive *fout);
227 static void BuildArchiveDependencies(Archive *fout);
229  DumpId **dependencies, int *nDeps, int *allocDeps);
230 
232 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
233  DumpableObject *boundaryObjs);
234 
235 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
236 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
237 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo);
238 static void buildMatViewRefreshDependencies(Archive *fout);
239 static void getTableDataFKConstraints(void);
240 static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
241  bool is_agg);
242 static char *format_function_arguments_old(Archive *fout,
243  FuncInfo *finfo, int nallargs,
244  char **allargtypes,
245  char **argmodes,
246  char **argnames);
247 static char *format_function_signature(Archive *fout,
248  FuncInfo *finfo, bool honor_quotes);
249 static char *convertRegProcReference(Archive *fout,
250  const char *proc);
251 static char *getFormattedOperatorName(Archive *fout, const char *oproid);
252 static char *convertTSFunction(Archive *fout, Oid funcOid);
253 static Oid findLastBuiltinOid_V71(Archive *fout);
254 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
255 static void getBlobs(Archive *fout);
256 static void dumpBlob(Archive *fout, BlobInfo *binfo);
257 static int dumpBlobs(Archive *fout, void *arg);
258 static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
259 static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
260 static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
261 static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
262 static void dumpDatabase(Archive *AH);
263 static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
264  const char *dbname, Oid dboid);
265 static void dumpEncoding(Archive *AH);
266 static void dumpStdStrings(Archive *AH);
267 static void dumpSearchPath(Archive *AH);
269  PQExpBuffer upgrade_buffer,
270  Oid pg_type_oid,
271  bool force_array_type);
273  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
274 static void binary_upgrade_set_pg_class_oids(Archive *fout,
275  PQExpBuffer upgrade_buffer,
276  Oid pg_class_oid, bool is_index);
277 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
278  DumpableObject *dobj,
279  const char *objtype,
280  const char *objname,
281  const char *objnamespace);
282 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
283 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
284 static bool nonemptyReloptions(const char *reloptions);
285 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
286  const char *prefix, Archive *fout);
287 static char *get_synchronized_snapshot(Archive *fout);
288 static void setupDumpWorker(Archive *AHX);
289 static TableInfo *getRootTableInfo(TableInfo *tbinfo);
290 
291 
292 int
293 main(int argc, char **argv)
294 {
295  int c;
296  const char *filename = NULL;
297  const char *format = "p";
298  TableInfo *tblinfo;
299  int numTables;
300  DumpableObject **dobjs;
301  int numObjs;
302  DumpableObject *boundaryObjs;
303  int i;
304  int optindex;
305  RestoreOptions *ropt;
306  Archive *fout; /* the script file */
307  const char *dumpencoding = NULL;
308  const char *dumpsnapshot = NULL;
309  char *use_role = NULL;
310  int numWorkers = 1;
311  trivalue prompt_password = TRI_DEFAULT;
312  int compressLevel = -1;
313  int plainText = 0;
314  ArchiveFormat archiveFormat = archUnknown;
315  ArchiveMode archiveMode;
316 
317  static DumpOptions dopt;
318 
319  static struct option long_options[] = {
320  {"data-only", no_argument, NULL, 'a'},
321  {"blobs", no_argument, NULL, 'b'},
322  {"no-blobs", no_argument, NULL, 'B'},
323  {"clean", no_argument, NULL, 'c'},
324  {"create", no_argument, NULL, 'C'},
325  {"dbname", required_argument, NULL, 'd'},
326  {"file", required_argument, NULL, 'f'},
327  {"format", required_argument, NULL, 'F'},
328  {"host", required_argument, NULL, 'h'},
329  {"jobs", 1, NULL, 'j'},
330  {"no-reconnect", no_argument, NULL, 'R'},
331  {"no-owner", no_argument, NULL, 'O'},
332  {"port", required_argument, NULL, 'p'},
333  {"schema", required_argument, NULL, 'n'},
334  {"exclude-schema", required_argument, NULL, 'N'},
335  {"schema-only", no_argument, NULL, 's'},
336  {"superuser", required_argument, NULL, 'S'},
337  {"table", required_argument, NULL, 't'},
338  {"exclude-table", required_argument, NULL, 'T'},
339  {"no-password", no_argument, NULL, 'w'},
340  {"password", no_argument, NULL, 'W'},
341  {"username", required_argument, NULL, 'U'},
342  {"verbose", no_argument, NULL, 'v'},
343  {"no-privileges", no_argument, NULL, 'x'},
344  {"no-acl", no_argument, NULL, 'x'},
345  {"compress", required_argument, NULL, 'Z'},
346  {"encoding", required_argument, NULL, 'E'},
347  {"help", no_argument, NULL, '?'},
348  {"version", no_argument, NULL, 'V'},
349 
350  /*
351  * the following options don't have an equivalent short option letter
352  */
353  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
354  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
355  {"column-inserts", no_argument, &dopt.column_inserts, 1},
356  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
357  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
358  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
359  {"exclude-table-data", required_argument, NULL, 4},
360  {"if-exists", no_argument, &dopt.if_exists, 1},
361  {"inserts", no_argument, &dopt.dump_inserts, 1},
362  {"lock-wait-timeout", required_argument, NULL, 2},
363  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
364  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
365  {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
366  {"role", required_argument, NULL, 3},
367  {"section", required_argument, NULL, 5},
368  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
369  {"snapshot", required_argument, NULL, 6},
370  {"strict-names", no_argument, &strict_names, 1},
371  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
372  {"no-comments", no_argument, &dopt.no_comments, 1},
373  {"no-publications", no_argument, &dopt.no_publications, 1},
374  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
375  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
376  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
377  {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
378  {"no-sync", no_argument, NULL, 7},
379  {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
380 
381  {NULL, 0, NULL, 0}
382  };
383 
384  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
385 
386  /*
387  * Initialize what we need for parallel execution, especially for thread
388  * support on Windows.
389  */
391 
392  g_verbose = false;
393 
394  strcpy(g_comment_start, "-- ");
395  g_comment_end[0] = '\0';
396  strcpy(g_opaque_type, "opaque");
397 
398  progname = get_progname(argv[0]);
399 
400  if (argc > 1)
401  {
402  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
403  {
404  help(progname);
405  exit_nicely(0);
406  }
407  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
408  {
409  puts("pg_dump (PostgreSQL) " PG_VERSION);
410  exit_nicely(0);
411  }
412  }
413 
414  InitDumpOptions(&dopt);
415 
416  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:oOp:RsS:t:T:U:vwWxZ:",
417  long_options, &optindex)) != -1)
418  {
419  switch (c)
420  {
421  case 'a': /* Dump data only */
422  dopt.dataOnly = true;
423  break;
424 
425  case 'b': /* Dump blobs */
426  dopt.outputBlobs = true;
427  break;
428 
429  case 'B': /* Don't dump blobs */
430  dopt.dontOutputBlobs = true;
431  break;
432 
433  case 'c': /* clean (i.e., drop) schema prior to create */
434  dopt.outputClean = 1;
435  break;
436 
437  case 'C': /* Create DB */
438  dopt.outputCreateDB = 1;
439  break;
440 
441  case 'd': /* database name */
442  dopt.dbname = pg_strdup(optarg);
443  break;
444 
445  case 'E': /* Dump encoding */
446  dumpencoding = pg_strdup(optarg);
447  break;
448 
449  case 'f':
450  filename = pg_strdup(optarg);
451  break;
452 
453  case 'F':
454  format = pg_strdup(optarg);
455  break;
456 
457  case 'h': /* server host */
458  dopt.pghost = pg_strdup(optarg);
459  break;
460 
461  case 'j': /* number of dump jobs */
462  numWorkers = atoi(optarg);
463  break;
464 
465  case 'n': /* include schema(s) */
466  simple_string_list_append(&schema_include_patterns, optarg);
467  dopt.include_everything = false;
468  break;
469 
470  case 'N': /* exclude schema(s) */
471  simple_string_list_append(&schema_exclude_patterns, optarg);
472  break;
473 
474  case 'O': /* Don't reconnect to match owner */
475  dopt.outputNoOwner = 1;
476  break;
477 
478  case 'p': /* server port */
479  dopt.pgport = pg_strdup(optarg);
480  break;
481 
482  case 'R':
483  /* no-op, still accepted for backwards compatibility */
484  break;
485 
486  case 's': /* dump schema only */
487  dopt.schemaOnly = true;
488  break;
489 
490  case 'S': /* Username for superuser in plain text output */
492  break;
493 
494  case 't': /* include table(s) */
495  simple_string_list_append(&table_include_patterns, optarg);
496  dopt.include_everything = false;
497  break;
498 
499  case 'T': /* exclude table(s) */
500  simple_string_list_append(&table_exclude_patterns, optarg);
501  break;
502 
503  case 'U':
504  dopt.username = pg_strdup(optarg);
505  break;
506 
507  case 'v': /* verbose */
508  g_verbose = true;
509  break;
510 
511  case 'w':
512  prompt_password = TRI_NO;
513  break;
514 
515  case 'W':
516  prompt_password = TRI_YES;
517  break;
518 
519  case 'x': /* skip ACL dump */
520  dopt.aclsSkip = true;
521  break;
522 
523  case 'Z': /* Compression Level */
524  compressLevel = atoi(optarg);
525  if (compressLevel < 0 || compressLevel > 9)
526  {
527  write_msg(NULL, "compression level must be in range 0..9\n");
528  exit_nicely(1);
529  }
530  break;
531 
532  case 0:
533  /* This covers the long options. */
534  break;
535 
536  case 2: /* lock-wait-timeout */
538  break;
539 
540  case 3: /* SET ROLE */
541  use_role = pg_strdup(optarg);
542  break;
543 
544  case 4: /* exclude table(s) data */
545  simple_string_list_append(&tabledata_exclude_patterns, optarg);
546  break;
547 
548  case 5: /* section */
550  break;
551 
552  case 6: /* snapshot */
553  dumpsnapshot = pg_strdup(optarg);
554  break;
555 
556  case 7: /* no-sync */
557  dosync = false;
558  break;
559 
560  default:
561  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
562  exit_nicely(1);
563  }
564  }
565 
566  /*
567  * Non-option argument specifies database name as long as it wasn't
568  * already specified with -d / --dbname
569  */
570  if (optind < argc && dopt.dbname == NULL)
571  dopt.dbname = argv[optind++];
572 
573  /* Complain if any arguments remain */
574  if (optind < argc)
575  {
576  fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
577  progname, argv[optind]);
578  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
579  progname);
580  exit_nicely(1);
581  }
582 
583  /* --column-inserts implies --inserts */
584  if (dopt.column_inserts)
585  dopt.dump_inserts = 1;
586 
587  /*
588  * Binary upgrade mode implies dumping sequence data even in schema-only
589  * mode. This is not exposed as a separate option, but kept separate
590  * internally for clarity.
591  */
592  if (dopt.binary_upgrade)
593  dopt.sequence_data = 1;
594 
595  if (dopt.dataOnly && dopt.schemaOnly)
596  {
597  write_msg(NULL, "options -s/--schema-only and -a/--data-only cannot be used together\n");
598  exit_nicely(1);
599  }
600 
601  if (dopt.dataOnly && dopt.outputClean)
602  {
603  write_msg(NULL, "options -c/--clean and -a/--data-only cannot be used together\n");
604  exit_nicely(1);
605  }
606 
607  if (dopt.if_exists && !dopt.outputClean)
608  exit_horribly(NULL, "option --if-exists requires option -c/--clean\n");
609 
610  if (dopt.do_nothing && !(dopt.dump_inserts || dopt.column_inserts))
611  exit_horribly(NULL, "option --on-conflict-do-nothing requires option --inserts or --column-inserts\n");
612 
613  /* Identify archive format to emit */
614  archiveFormat = parseArchiveFormat(format, &archiveMode);
615 
616  /* archiveFormat specific setup */
617  if (archiveFormat == archNull)
618  plainText = 1;
619 
620  /* Custom and directory formats are compressed by default, others not */
621  if (compressLevel == -1)
622  {
623 #ifdef HAVE_LIBZ
624  if (archiveFormat == archCustom || archiveFormat == archDirectory)
625  compressLevel = Z_DEFAULT_COMPRESSION;
626  else
627 #endif
628  compressLevel = 0;
629  }
630 
631 #ifndef HAVE_LIBZ
632  if (compressLevel != 0)
633  write_msg(NULL, "WARNING: requested compression not available in this "
634  "installation -- archive will be uncompressed\n");
635  compressLevel = 0;
636 #endif
637 
638  /*
639  * If emitting an archive format, we always want to emit a DATABASE item,
640  * in case --create is specified at pg_restore time.
641  */
642  if (!plainText)
643  dopt.outputCreateDB = 1;
644 
645  /*
646  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
647  * parallel jobs because that's the maximum limit for the
648  * WaitForMultipleObjects() call.
649  */
650  if (numWorkers <= 0
651 #ifdef WIN32
652  || numWorkers > MAXIMUM_WAIT_OBJECTS
653 #endif
654  )
655  exit_horribly(NULL, "invalid number of parallel jobs\n");
656 
657  /* Parallel backup only in the directory archive format so far */
658  if (archiveFormat != archDirectory && numWorkers > 1)
659  exit_horribly(NULL, "parallel backup only supported by the directory format\n");
660 
661  /* Open the output file */
662  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
663  archiveMode, setupDumpWorker);
664 
665  /* Make dump options accessible right away */
666  SetArchiveOptions(fout, &dopt, NULL);
667 
668  /* Register the cleanup hook */
669  on_exit_close_archive(fout);
670 
671  /* Let the archiver know how noisy to be */
672  fout->verbose = g_verbose;
673 
674  /*
675  * We allow the server to be back to 8.0, and up to any minor release of
676  * our own major version. (See also version check in pg_dumpall.c.)
677  */
678  fout->minRemoteVersion = 80000;
679  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
680 
681  fout->numWorkers = numWorkers;
682 
683  /*
684  * Open the database using the Archiver, so it knows about it. Errors mean
685  * death.
686  */
687  ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
688  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
689 
690  /*
691  * Disable security label support if server version < v9.1.x (prevents
692  * access to nonexistent pg_seclabel catalog)
693  */
694  if (fout->remoteVersion < 90100)
695  dopt.no_security_labels = 1;
696 
697  /*
698  * On hot standbys, never try to dump unlogged table data, since it will
699  * just throw an error.
700  */
701  if (fout->isStandby)
702  dopt.no_unlogged_table_data = true;
703 
704  /* Select the appropriate subquery to convert user IDs to names */
705  if (fout->remoteVersion >= 80100)
706  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
707  else
708  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
709 
710  /* check the version for the synchronized snapshots feature */
711  if (numWorkers > 1 && fout->remoteVersion < 90200
712  && !dopt.no_synchronized_snapshots)
713  exit_horribly(NULL,
714  "Synchronized snapshots are not supported by this server version.\n"
715  "Run with --no-synchronized-snapshots instead if you do not need\n"
716  "synchronized snapshots.\n");
717 
718  /* check the version when a snapshot is explicitly specified by user */
719  if (dumpsnapshot && fout->remoteVersion < 90200)
720  exit_horribly(NULL,
721  "Exported snapshots are not supported by this server version.\n");
722 
723  /*
724  * Find the last built-in OID, if needed (prior to 8.1)
725  *
726  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
727  */
728  if (fout->remoteVersion < 80100)
730  else
732 
733  if (g_verbose)
734  write_msg(NULL, "last built-in OID is %u\n", g_last_builtin_oid);
735 
736  /* Expand schema selection patterns into OID lists */
737  if (schema_include_patterns.head != NULL)
738  {
739  expand_schema_name_patterns(fout, &schema_include_patterns,
740  &schema_include_oids,
741  strict_names);
742  if (schema_include_oids.head == NULL)
743  exit_horribly(NULL, "no matching schemas were found\n");
744  }
745  expand_schema_name_patterns(fout, &schema_exclude_patterns,
746  &schema_exclude_oids,
747  false);
748  /* non-matching exclusion patterns aren't an error */
749 
750  /* Expand table selection patterns into OID lists */
751  if (table_include_patterns.head != NULL)
752  {
753  expand_table_name_patterns(fout, &table_include_patterns,
754  &table_include_oids,
755  strict_names);
756  if (table_include_oids.head == NULL)
757  exit_horribly(NULL, "no matching tables were found\n");
758  }
759  expand_table_name_patterns(fout, &table_exclude_patterns,
760  &table_exclude_oids,
761  false);
762 
763  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
764  &tabledata_exclude_oids,
765  false);
766 
767  /* non-matching exclusion patterns aren't an error */
768 
769  /*
770  * Dumping blobs is the default for dumps where an inclusion switch is not
771  * used (an "include everything" dump). -B can be used to exclude blobs
772  * from those dumps. -b can be used to include blobs even when an
773  * inclusion switch is used.
774  *
775  * -s means "schema only" and blobs are data, not schema, so we never
776  * include blobs when -s is used.
777  */
778  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
779  dopt.outputBlobs = true;
780 
781  /*
782  * Now scan the database and create DumpableObject structs for all the
783  * objects we intend to dump.
784  */
785  tblinfo = getSchemaData(fout, &numTables);
786 
787  if (fout->remoteVersion < 80400)
788  guessConstraintInheritance(tblinfo, numTables);
789 
790  if (!dopt.schemaOnly)
791  {
792  getTableData(&dopt, tblinfo, numTables, 0);
794  if (dopt.dataOnly)
796  }
797 
798  if (dopt.schemaOnly && dopt.sequence_data)
799  getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
800 
801  /*
802  * In binary-upgrade mode, we do not have to worry about the actual blob
803  * data or the associated metadata that resides in the pg_largeobject and
804  * pg_largeobject_metadata tables, respectively.
805  *
806  * However, we do need to collect blob information as there may be
807  * comments or other information on blobs that we do need to dump out.
808  */
809  if (dopt.outputBlobs || dopt.binary_upgrade)
810  getBlobs(fout);
811 
812  /*
813  * Collect dependency data to assist in ordering the objects.
814  */
815  getDependencies(fout);
816 
817  /* Lastly, create dummy objects to represent the section boundaries */
818  boundaryObjs = createBoundaryObjects();
819 
820  /* Get pointers to all the known DumpableObjects */
821  getDumpableObjects(&dobjs, &numObjs);
822 
823  /*
824  * Add dummy dependencies to enforce the dump section ordering.
825  */
826  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
827 
828  /*
829  * Sort the objects into a safe dump order (no forward references).
830  *
831  * We rely on dependency information to help us determine a safe order, so
832  * the initial sort is mostly for cosmetic purposes: we sort by name to
833  * ensure that logically identical schemas will dump identically.
834  */
835  sortDumpableObjectsByTypeName(dobjs, numObjs);
836 
837  sortDumpableObjects(dobjs, numObjs,
838  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
839 
840  /*
841  * Create archive TOC entries for all the objects to be dumped, in a safe
842  * order.
843  */
844 
845  /* First the special ENCODING, STDSTRINGS, and SEARCHPATH entries. */
846  dumpEncoding(fout);
847  dumpStdStrings(fout);
848  dumpSearchPath(fout);
849 
850  /* The database items are always next, unless we don't want them at all */
851  if (dopt.outputCreateDB)
852  dumpDatabase(fout);
853 
854  /* Now the rearrangeable objects. */
855  for (i = 0; i < numObjs; i++)
856  dumpDumpableObject(fout, dobjs[i]);
857 
858  /*
859  * Set up options info to ensure we dump what we want.
860  */
861  ropt = NewRestoreOptions();
862  ropt->filename = filename;
863 
864  /* if you change this list, see dumpOptionsFromRestoreOptions */
865  ropt->dropSchema = dopt.outputClean;
866  ropt->dataOnly = dopt.dataOnly;
867  ropt->schemaOnly = dopt.schemaOnly;
868  ropt->if_exists = dopt.if_exists;
869  ropt->column_inserts = dopt.column_inserts;
870  ropt->dumpSections = dopt.dumpSections;
871  ropt->aclsSkip = dopt.aclsSkip;
872  ropt->superuser = dopt.outputSuperuser;
873  ropt->createDB = dopt.outputCreateDB;
874  ropt->noOwner = dopt.outputNoOwner;
875  ropt->noTablespace = dopt.outputNoTablespaces;
876  ropt->disable_triggers = dopt.disable_triggers;
877  ropt->use_setsessauth = dopt.use_setsessauth;
879  ropt->dump_inserts = dopt.dump_inserts;
880  ropt->no_comments = dopt.no_comments;
881  ropt->no_publications = dopt.no_publications;
883  ropt->no_subscriptions = dopt.no_subscriptions;
884  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
887  ropt->sequence_data = dopt.sequence_data;
888  ropt->binary_upgrade = dopt.binary_upgrade;
889 
890  if (compressLevel == -1)
891  ropt->compression = 0;
892  else
893  ropt->compression = compressLevel;
894 
895  ropt->suppressDumpWarnings = true; /* We've already shown them */
896 
897  SetArchiveOptions(fout, &dopt, ropt);
898 
899  /* Mark which entries should be output */
901 
902  /*
903  * The archive's TOC entries are now marked as to which ones will actually
904  * be output, so we can set up their dependency lists properly. This isn't
905  * necessary for plain-text output, though.
906  */
907  if (!plainText)
909 
910  /*
911  * And finally we can do the actual output.
912  *
913  * Note: for non-plain-text output formats, the output file is written
914  * inside CloseArchive(). This is, um, bizarre; but not worth changing
915  * right now.
916  */
917  if (plainText)
918  RestoreArchive(fout);
919 
920  CloseArchive(fout);
921 
922  exit_nicely(0);
923 }
924 
925 
926 static void
927 help(const char *progname)
928 {
929  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
930  printf(_("Usage:\n"));
931  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
932 
933  printf(_("\nGeneral options:\n"));
934  printf(_(" -f, --file=FILENAME output file or directory name\n"));
935  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
936  " plain text (default))\n"));
937  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
938  printf(_(" -v, --verbose verbose mode\n"));
939  printf(_(" -V, --version output version information, then exit\n"));
940  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
941  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
942  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
943  printf(_(" -?, --help show this help, then exit\n"));
944 
945  printf(_("\nOptions controlling the output content:\n"));
946  printf(_(" -a, --data-only dump only the data, not the schema\n"));
947  printf(_(" -b, --blobs include large objects in dump\n"));
948  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
949  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
950  printf(_(" -C, --create include commands to create database in dump\n"));
951  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
952  printf(_(" -n, --schema=SCHEMA dump the named schema(s) only\n"));
953  printf(_(" -N, --exclude-schema=SCHEMA do NOT dump the named schema(s)\n"));
954  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
955  " plain-text format\n"));
956  printf(_(" -s, --schema-only dump only the schema, no data\n"));
957  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
958  printf(_(" -t, --table=TABLE dump the named table(s) only\n"));
959  printf(_(" -T, --exclude-table=TABLE do NOT dump the named table(s)\n"));
960  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
961  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
962  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
963  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
964  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
965  printf(_(" --enable-row-security enable row security (dump only content user has\n"
966  " access to)\n"));
967  printf(_(" --exclude-table-data=TABLE do NOT dump data for the named table(s)\n"));
968  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
969  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
970  printf(_(" --load-via-partition-root load partitions via the root table\n"));
971  printf(_(" --no-comments do not dump comments\n"));
972  printf(_(" --no-publications do not dump publications\n"));
973  printf(_(" --no-security-labels do not dump security label assignments\n"));
974  printf(_(" --no-subscriptions do not dump subscriptions\n"));
975  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
976  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
977  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
978  printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
979  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
980  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
981  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
982  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
983  printf(_(" --strict-names require table and/or schema include patterns to\n"
984  " match at least one entity each\n"));
985  printf(_(" --use-set-session-authorization\n"
986  " use SET SESSION AUTHORIZATION commands instead of\n"
987  " ALTER OWNER commands to set ownership\n"));
988 
989  printf(_("\nConnection options:\n"));
990  printf(_(" -d, --dbname=DBNAME database to dump\n"));
991  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
992  printf(_(" -p, --port=PORT database server port number\n"));
993  printf(_(" -U, --username=NAME connect as specified database user\n"));
994  printf(_(" -w, --no-password never prompt for password\n"));
995  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
996  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
997 
998  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
999  "variable value is used.\n\n"));
1000  printf(_("Report bugs to <pgsql-bugs@postgresql.org>.\n"));
1001 }
1002 
1003 static void
1004 setup_connection(Archive *AH, const char *dumpencoding,
1005  const char *dumpsnapshot, char *use_role)
1006 {
1007  DumpOptions *dopt = AH->dopt;
1008  PGconn *conn = GetConnection(AH);
1009  const char *std_strings;
1010 
1012 
1013  /*
1014  * Set the client encoding if requested.
1015  */
1016  if (dumpencoding)
1017  {
1018  if (PQsetClientEncoding(conn, dumpencoding) < 0)
1019  exit_horribly(NULL, "invalid client encoding \"%s\" specified\n",
1020  dumpencoding);
1021  }
1022 
1023  /*
1024  * Get the active encoding and the standard_conforming_strings setting, so
1025  * we know how to escape strings.
1026  */
1027  AH->encoding = PQclientEncoding(conn);
1028 
1029  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1030  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1031 
1032  /*
1033  * Set the role if requested. In a parallel dump worker, we'll be passed
1034  * use_role == NULL, but AH->use_role is already set (if user specified it
1035  * originally) and we should use that.
1036  */
1037  if (!use_role && AH->use_role)
1038  use_role = AH->use_role;
1039 
1040  /* Set the role if requested */
1041  if (use_role && AH->remoteVersion >= 80100)
1042  {
1043  PQExpBuffer query = createPQExpBuffer();
1044 
1045  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1046  ExecuteSqlStatement(AH, query->data);
1047  destroyPQExpBuffer(query);
1048 
1049  /* save it for possible later use by parallel workers */
1050  if (!AH->use_role)
1051  AH->use_role = pg_strdup(use_role);
1052  }
1053 
1054  /* Set the datestyle to ISO to ensure the dump's portability */
1055  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1056 
1057  /* Likewise, avoid using sql_standard intervalstyle */
1058  if (AH->remoteVersion >= 80400)
1059  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1060 
1061  /*
1062  * Set extra_float_digits so that we can dump float data exactly (given
1063  * correctly implemented float I/O code, anyway)
1064  */
1065  if (AH->remoteVersion >= 90000)
1066  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1067  else
1068  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1069 
1070  /*
1071  * If synchronized scanning is supported, disable it, to prevent
1072  * unpredictable changes in row ordering across a dump and reload.
1073  */
1074  if (AH->remoteVersion >= 80300)
1075  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1076 
1077  /*
1078  * Disable timeouts if supported.
1079  */
1080  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1081  if (AH->remoteVersion >= 90300)
1082  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1083  if (AH->remoteVersion >= 90600)
1084  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1085 
1086  /*
1087  * Quote all identifiers, if requested.
1088  */
1089  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1090  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1091 
1092  /*
1093  * Adjust row-security mode, if supported.
1094  */
1095  if (AH->remoteVersion >= 90500)
1096  {
1097  if (dopt->enable_row_security)
1098  ExecuteSqlStatement(AH, "SET row_security = on");
1099  else
1100  ExecuteSqlStatement(AH, "SET row_security = off");
1101  }
1102 
1103  /*
1104  * Start transaction-snapshot mode transaction to dump consistent data.
1105  */
1106  ExecuteSqlStatement(AH, "BEGIN");
1107  if (AH->remoteVersion >= 90100)
1108  {
1109  /*
1110  * To support the combination of serializable_deferrable with the jobs
1111  * option we use REPEATABLE READ for the worker connections that are
1112  * passed a snapshot. As long as the snapshot is acquired in a
1113  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1114  * REPEATABLE READ transaction provides the appropriate integrity
1115  * guarantees. This is a kluge, but safe for back-patching.
1116  */
1117  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1119  "SET TRANSACTION ISOLATION LEVEL "
1120  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1121  else
1123  "SET TRANSACTION ISOLATION LEVEL "
1124  "REPEATABLE READ, READ ONLY");
1125  }
1126  else
1127  {
1129  "SET TRANSACTION ISOLATION LEVEL "
1130  "SERIALIZABLE, READ ONLY");
1131  }
1132 
1133  /*
1134  * If user specified a snapshot to use, select that. In a parallel dump
1135  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1136  * is already set (if the server can handle it) and we should use that.
1137  */
1138  if (dumpsnapshot)
1139  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1140 
1141  if (AH->sync_snapshot_id)
1142  {
1143  PQExpBuffer query = createPQExpBuffer();
1144 
1145  appendPQExpBuffer(query, "SET TRANSACTION SNAPSHOT ");
1146  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1147  ExecuteSqlStatement(AH, query->data);
1148  destroyPQExpBuffer(query);
1149  }
1150  else if (AH->numWorkers > 1 &&
1151  AH->remoteVersion >= 90200 &&
1153  {
1154  if (AH->isStandby && AH->remoteVersion < 100000)
1155  exit_horribly(NULL,
1156  "Synchronized snapshots on standby servers are not supported by this server version.\n"
1157  "Run with --no-synchronized-snapshots instead if you do not need\n"
1158  "synchronized snapshots.\n");
1159 
1160 
1162  }
1163 }
1164 
1165 /* Set up connection for a parallel worker process */
1166 static void
1168 {
1169  /*
1170  * We want to re-select all the same values the master connection is
1171  * using. We'll have inherited directly-usable values in
1172  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1173  * inherited encoding value back to a string to pass to setup_connection.
1174  */
1175  setup_connection(AH,
1177  NULL,
1178  NULL);
1179 }
1180 
1181 static char *
1183 {
1184  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1185  char *result;
1186  PGresult *res;
1187 
1188  res = ExecuteSqlQueryForSingleRow(fout, query);
1189  result = pg_strdup(PQgetvalue(res, 0, 0));
1190  PQclear(res);
1191 
1192  return result;
1193 }
1194 
1195 static ArchiveFormat
1197 {
1198  ArchiveFormat archiveFormat;
1199 
1200  *mode = archModeWrite;
1201 
1202  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1203  {
1204  /* This is used by pg_dumpall, and is not documented */
1205  archiveFormat = archNull;
1206  *mode = archModeAppend;
1207  }
1208  else if (pg_strcasecmp(format, "c") == 0)
1209  archiveFormat = archCustom;
1210  else if (pg_strcasecmp(format, "custom") == 0)
1211  archiveFormat = archCustom;
1212  else if (pg_strcasecmp(format, "d") == 0)
1213  archiveFormat = archDirectory;
1214  else if (pg_strcasecmp(format, "directory") == 0)
1215  archiveFormat = archDirectory;
1216  else if (pg_strcasecmp(format, "p") == 0)
1217  archiveFormat = archNull;
1218  else if (pg_strcasecmp(format, "plain") == 0)
1219  archiveFormat = archNull;
1220  else if (pg_strcasecmp(format, "t") == 0)
1221  archiveFormat = archTar;
1222  else if (pg_strcasecmp(format, "tar") == 0)
1223  archiveFormat = archTar;
1224  else
1225  exit_horribly(NULL, "invalid output format \"%s\" specified\n", format);
1226  return archiveFormat;
1227 }
1228 
1229 /*
1230  * Find the OIDs of all schemas matching the given list of patterns,
1231  * and append them to the given OID list.
1232  */
1233 static void
1235  SimpleStringList *patterns,
1236  SimpleOidList *oids,
1237  bool strict_names)
1238 {
1239  PQExpBuffer query;
1240  PGresult *res;
1241  SimpleStringListCell *cell;
1242  int i;
1243 
1244  if (patterns->head == NULL)
1245  return; /* nothing to do */
1246 
1247  query = createPQExpBuffer();
1248 
1249  /*
1250  * The loop below runs multiple SELECTs might sometimes result in
1251  * duplicate entries in the OID list, but we don't care.
1252  */
1253 
1254  for (cell = patterns->head; cell; cell = cell->next)
1255  {
1256  appendPQExpBuffer(query,
1257  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1258  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1259  false, NULL, "n.nspname", NULL, NULL);
1260 
1261  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1262  if (strict_names && PQntuples(res) == 0)
1263  exit_horribly(NULL, "no matching schemas were found for pattern \"%s\"\n", cell->val);
1264 
1265  for (i = 0; i < PQntuples(res); i++)
1266  {
1267  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1268  }
1269 
1270  PQclear(res);
1271  resetPQExpBuffer(query);
1272  }
1273 
1274  destroyPQExpBuffer(query);
1275 }
1276 
1277 /*
1278  * Find the OIDs of all tables matching the given list of patterns,
1279  * and append them to the given OID list.
1280  */
1281 static void
1283  SimpleStringList *patterns, SimpleOidList *oids,
1284  bool strict_names)
1285 {
1286  PQExpBuffer query;
1287  PGresult *res;
1288  SimpleStringListCell *cell;
1289  int i;
1290 
1291  if (patterns->head == NULL)
1292  return; /* nothing to do */
1293 
1294  query = createPQExpBuffer();
1295 
1296  /*
1297  * this might sometimes result in duplicate entries in the OID list, but
1298  * we don't care.
1299  */
1300 
1301  for (cell = patterns->head; cell; cell = cell->next)
1302  {
1303  /*
1304  * Query must remain ABSOLUTELY devoid of unqualified names. This
1305  * would be unnecessary given a pg_table_is_visible() variant taking a
1306  * search_path argument.
1307  */
1308  appendPQExpBuffer(query,
1309  "SELECT c.oid"
1310  "\nFROM pg_catalog.pg_class c"
1311  "\n LEFT JOIN pg_catalog.pg_namespace n"
1312  "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1313  "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1314  "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1315  RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
1316  RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
1317  RELKIND_PARTITIONED_TABLE);
1318  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1319  false, "n.nspname", "c.relname", NULL,
1320  "pg_catalog.pg_table_is_visible(c.oid)");
1321 
1322  ExecuteSqlStatement(fout, "RESET search_path");
1323  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1326  if (strict_names && PQntuples(res) == 0)
1327  exit_horribly(NULL, "no matching tables were found for pattern \"%s\"\n", cell->val);
1328 
1329  for (i = 0; i < PQntuples(res); i++)
1330  {
1331  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1332  }
1333 
1334  PQclear(res);
1335  resetPQExpBuffer(query);
1336  }
1337 
1338  destroyPQExpBuffer(query);
1339 }
1340 
1341 /*
1342  * checkExtensionMembership
1343  * Determine whether object is an extension member, and if so,
1344  * record an appropriate dependency and set the object's dump flag.
1345  *
1346  * It's important to call this for each object that could be an extension
1347  * member. Generally, we integrate this with determining the object's
1348  * to-be-dumped-ness, since extension membership overrides other rules for that.
1349  *
1350  * Returns true if object is an extension member, else false.
1351  */
1352 static bool
1354 {
1355  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1356 
1357  if (ext == NULL)
1358  return false;
1359 
1360  dobj->ext_member = true;
1361 
1362  /* Record dependency so that getDependencies needn't deal with that */
1363  addObjectDependency(dobj, ext->dobj.dumpId);
1364 
1365  /*
1366  * In 9.6 and above, mark the member object to have any non-initial ACL,
1367  * policies, and security labels dumped.
1368  *
1369  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1370  * extract the information about the object. We don't provide support for
1371  * initial policies and security labels and it seems unlikely for those to
1372  * ever exist, but we may have to revisit this later.
1373  *
1374  * Prior to 9.6, we do not include any extension member components.
1375  *
1376  * In binary upgrades, we still dump all components of the members
1377  * individually, since the idea is to exactly reproduce the database
1378  * contents rather than replace the extension contents with something
1379  * different.
1380  */
1381  if (fout->dopt->binary_upgrade)
1382  dobj->dump = ext->dobj.dump;
1383  else
1384  {
1385  if (fout->remoteVersion < 90600)
1386  dobj->dump = DUMP_COMPONENT_NONE;
1387  else
1388  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1391  }
1392 
1393  return true;
1394 }
1395 
1396 /*
1397  * selectDumpableNamespace: policy-setting subroutine
1398  * Mark a namespace as to be dumped or not
1399  */
1400 static void
1402 {
1403  /*
1404  * If specific tables are being dumped, do not dump any complete
1405  * namespaces. If specific namespaces are being dumped, dump just those
1406  * namespaces. Otherwise, dump all non-system namespaces.
1407  */
1408  if (table_include_oids.head != NULL)
1409  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1410  else if (schema_include_oids.head != NULL)
1411  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1412  simple_oid_list_member(&schema_include_oids,
1413  nsinfo->dobj.catId.oid) ?
1415  else if (fout->remoteVersion >= 90600 &&
1416  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1417  {
1418  /*
1419  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1420  * they are interesting (and not the original ACLs which were set at
1421  * initdb time, see pg_init_privs).
1422  */
1423  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1424  }
1425  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1426  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1427  {
1428  /* Other system schemas don't get dumped */
1429  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1430  }
1431  else if (strcmp(nsinfo->dobj.name, "public") == 0)
1432  {
1433  /*
1434  * The public schema is a strange beast that sits in a sort of
1435  * no-mans-land between being a system object and a user object. We
1436  * don't want to dump creation or comment commands for it, because
1437  * that complicates matters for non-superuser use of pg_dump. But we
1438  * should dump any ACL changes that have occurred for it, and of
1439  * course we should dump contained objects.
1440  */
1441  nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1443  }
1444  else
1445  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1446 
1447  /*
1448  * In any case, a namespace can be excluded by an exclusion switch
1449  */
1450  if (nsinfo->dobj.dump_contains &&
1451  simple_oid_list_member(&schema_exclude_oids,
1452  nsinfo->dobj.catId.oid))
1453  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1454 
1455  /*
1456  * If the schema belongs to an extension, allow extension membership to
1457  * override the dump decision for the schema itself. However, this does
1458  * not change dump_contains, so this won't change what we do with objects
1459  * within the schema. (If they belong to the extension, they'll get
1460  * suppressed by it, otherwise not.)
1461  */
1462  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1463 }
1464 
1465 /*
1466  * selectDumpableTable: policy-setting subroutine
1467  * Mark a table as to be dumped or not
1468  */
1469 static void
1471 {
1472  if (checkExtensionMembership(&tbinfo->dobj, fout))
1473  return; /* extension membership overrides all else */
1474 
1475  /*
1476  * If specific tables are being dumped, dump just those tables; else, dump
1477  * according to the parent namespace's dump flag.
1478  */
1479  if (table_include_oids.head != NULL)
1480  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1481  tbinfo->dobj.catId.oid) ?
1483  else
1484  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1485 
1486  /*
1487  * In any case, a table can be excluded by an exclusion switch
1488  */
1489  if (tbinfo->dobj.dump &&
1490  simple_oid_list_member(&table_exclude_oids,
1491  tbinfo->dobj.catId.oid))
1492  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1493 }
1494 
1495 /*
1496  * selectDumpableType: policy-setting subroutine
1497  * Mark a type as to be dumped or not
1498  *
1499  * If it's a table's rowtype or an autogenerated array type, we also apply a
1500  * special type code to facilitate sorting into the desired order. (We don't
1501  * want to consider those to be ordinary types because that would bring tables
1502  * up into the datatype part of the dump order.) We still set the object's
1503  * dump flag; that's not going to cause the dummy type to be dumped, but we
1504  * need it so that casts involving such types will be dumped correctly -- see
1505  * dumpCast. This means the flag should be set the same as for the underlying
1506  * object (the table or base type).
1507  */
1508 static void
1510 {
1511  /* skip complex types, except for standalone composite types */
1512  if (OidIsValid(tyinfo->typrelid) &&
1513  tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
1514  {
1515  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1516 
1517  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1518  if (tytable != NULL)
1519  tyinfo->dobj.dump = tytable->dobj.dump;
1520  else
1521  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1522  return;
1523  }
1524 
1525  /* skip auto-generated array types */
1526  if (tyinfo->isArray)
1527  {
1528  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1529 
1530  /*
1531  * Fall through to set the dump flag; we assume that the subsequent
1532  * rules will do the same thing as they would for the array's base
1533  * type. (We cannot reliably look up the base type here, since
1534  * getTypes may not have processed it yet.)
1535  */
1536  }
1537 
1538  if (checkExtensionMembership(&tyinfo->dobj, fout))
1539  return; /* extension membership overrides all else */
1540 
1541  /* Dump based on if the contents of the namespace are being dumped */
1542  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1543 }
1544 
1545 /*
1546  * selectDumpableDefaultACL: policy-setting subroutine
1547  * Mark a default ACL as to be dumped or not
1548  *
1549  * For per-schema default ACLs, dump if the schema is to be dumped.
1550  * Otherwise dump if we are dumping "everything". Note that dataOnly
1551  * and aclsSkip are checked separately.
1552  */
1553 static void
1555 {
1556  /* Default ACLs can't be extension members */
1557 
1558  if (dinfo->dobj.namespace)
1559  /* default ACLs are considered part of the namespace */
1560  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1561  else
1562  dinfo->dobj.dump = dopt->include_everything ?
1564 }
1565 
1566 /*
1567  * selectDumpableCast: policy-setting subroutine
1568  * Mark a cast as to be dumped or not
1569  *
1570  * Casts do not belong to any particular namespace (since they haven't got
1571  * names), nor do they have identifiable owners. To distinguish user-defined
1572  * casts from built-in ones, we must resort to checking whether the cast's
1573  * OID is in the range reserved for initdb.
1574  */
1575 static void
1577 {
1578  if (checkExtensionMembership(&cast->dobj, fout))
1579  return; /* extension membership overrides all else */
1580 
1581  /*
1582  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1583  * support ACLs currently.
1584  */
1585  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1586  cast->dobj.dump = DUMP_COMPONENT_NONE;
1587  else
1588  cast->dobj.dump = fout->dopt->include_everything ?
1590 }
1591 
1592 /*
1593  * selectDumpableProcLang: policy-setting subroutine
1594  * Mark a procedural language as to be dumped or not
1595  *
1596  * Procedural languages do not belong to any particular namespace. To
1597  * identify built-in languages, we must resort to checking whether the
1598  * language's OID is in the range reserved for initdb.
1599  */
1600 static void
1602 {
1603  if (checkExtensionMembership(&plang->dobj, fout))
1604  return; /* extension membership overrides all else */
1605 
1606  /*
1607  * Only include procedural languages when we are dumping everything.
1608  *
1609  * For from-initdb procedural languages, only include ACLs, as we do for
1610  * the pg_catalog namespace. We need this because procedural languages do
1611  * not live in any namespace.
1612  */
1613  if (!fout->dopt->include_everything)
1614  plang->dobj.dump = DUMP_COMPONENT_NONE;
1615  else
1616  {
1617  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1618  plang->dobj.dump = fout->remoteVersion < 90600 ?
1620  else
1621  plang->dobj.dump = DUMP_COMPONENT_ALL;
1622  }
1623 }
1624 
1625 /*
1626  * selectDumpableAccessMethod: policy-setting subroutine
1627  * Mark an access method as to be dumped or not
1628  *
1629  * Access methods do not belong to any particular namespace. To identify
1630  * built-in access methods, we must resort to checking whether the
1631  * method's OID is in the range reserved for initdb.
1632  */
1633 static void
1635 {
1636  if (checkExtensionMembership(&method->dobj, fout))
1637  return; /* extension membership overrides all else */
1638 
1639  /*
1640  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1641  * they do not support ACLs currently.
1642  */
1643  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1644  method->dobj.dump = DUMP_COMPONENT_NONE;
1645  else
1646  method->dobj.dump = fout->dopt->include_everything ?
1648 }
1649 
1650 /*
1651  * selectDumpableExtension: policy-setting subroutine
1652  * Mark an extension as to be dumped or not
1653  *
1654  * Built-in extensions should be skipped except for checking ACLs, since we
1655  * assume those will already be installed in the target database. We identify
1656  * such extensions by their having OIDs in the range reserved for initdb.
1657  * We dump all user-added extensions by default, or none of them if
1658  * include_everything is false (i.e., a --schema or --table switch was given).
1659  */
1660 static void
1662 {
1663  /*
1664  * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
1665  * change permissions on their member objects, if they wish to, and have
1666  * those changes preserved.
1667  */
1668  if (extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1669  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1670  else
1671  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1674 }
1675 
1676 /*
1677  * selectDumpablePublicationTable: policy-setting subroutine
1678  * Mark a publication table as to be dumped or not
1679  *
1680  * Publication tables have schemas, but those are ignored in decision making,
1681  * because publications are only dumped when we are dumping everything.
1682  */
1683 static void
1685 {
1686  if (checkExtensionMembership(dobj, fout))
1687  return; /* extension membership overrides all else */
1688 
1689  dobj->dump = fout->dopt->include_everything ?
1691 }
1692 
1693 /*
1694  * selectDumpableObject: policy-setting subroutine
1695  * Mark a generic dumpable object as to be dumped or not
1696  *
1697  * Use this only for object types without a special-case routine above.
1698  */
1699 static void
1701 {
1702  if (checkExtensionMembership(dobj, fout))
1703  return; /* extension membership overrides all else */
1704 
1705  /*
1706  * Default policy is to dump if parent namespace is dumpable, or for
1707  * non-namespace-associated items, dump if we're dumping "everything".
1708  */
1709  if (dobj->namespace)
1710  dobj->dump = dobj->namespace->dobj.dump_contains;
1711  else
1712  dobj->dump = fout->dopt->include_everything ?
1714 }
1715 
1716 /*
1717  * Dump a table's contents for loading using the COPY command
1718  * - this routine is called by the Archiver when it wants the table
1719  * to be dumped.
1720  */
1721 
1722 static int
1723 dumpTableData_copy(Archive *fout, void *dcontext)
1724 {
1725  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1726  TableInfo *tbinfo = tdinfo->tdtable;
1727  const char *classname = tbinfo->dobj.name;
1729 
1730  /*
1731  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1732  * which uses it already.
1733  */
1734  PQExpBuffer clistBuf = createPQExpBuffer();
1735  PGconn *conn = GetConnection(fout);
1736  PGresult *res;
1737  int ret;
1738  char *copybuf;
1739  const char *column_list;
1740 
1741  if (g_verbose)
1742  write_msg(NULL, "dumping contents of table \"%s.%s\"\n",
1743  tbinfo->dobj.namespace->dobj.name, classname);
1744 
1745  /*
1746  * Specify the column list explicitly so that we have no possibility of
1747  * retrieving data in the wrong column order. (The default column
1748  * ordering of COPY will not be what we want in certain corner cases
1749  * involving ADD COLUMN and inheritance.)
1750  */
1751  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1752 
1753  if (tdinfo->filtercond)
1754  {
1755  /* Note: this syntax is only supported in 8.2 and up */
1756  appendPQExpBufferStr(q, "COPY (SELECT ");
1757  /* klugery to get rid of parens in column list */
1758  if (strlen(column_list) > 2)
1759  {
1760  appendPQExpBufferStr(q, column_list + 1);
1761  q->data[q->len - 1] = ' ';
1762  }
1763  else
1764  appendPQExpBufferStr(q, "* ");
1765  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1766  fmtQualifiedDumpable(tbinfo),
1767  tdinfo->filtercond);
1768  }
1769  else
1770  {
1771  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1772  fmtQualifiedDumpable(tbinfo),
1773  column_list);
1774  }
1775  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1776  PQclear(res);
1777  destroyPQExpBuffer(clistBuf);
1778 
1779  for (;;)
1780  {
1781  ret = PQgetCopyData(conn, &copybuf, 0);
1782 
1783  if (ret < 0)
1784  break; /* done or error */
1785 
1786  if (copybuf)
1787  {
1788  WriteData(fout, copybuf, ret);
1789  PQfreemem(copybuf);
1790  }
1791 
1792  /* ----------
1793  * THROTTLE:
1794  *
1795  * There was considerable discussion in late July, 2000 regarding
1796  * slowing down pg_dump when backing up large tables. Users with both
1797  * slow & fast (multi-processor) machines experienced performance
1798  * degradation when doing a backup.
1799  *
1800  * Initial attempts based on sleeping for a number of ms for each ms
1801  * of work were deemed too complex, then a simple 'sleep in each loop'
1802  * implementation was suggested. The latter failed because the loop
1803  * was too tight. Finally, the following was implemented:
1804  *
1805  * If throttle is non-zero, then
1806  * See how long since the last sleep.
1807  * Work out how long to sleep (based on ratio).
1808  * If sleep is more than 100ms, then
1809  * sleep
1810  * reset timer
1811  * EndIf
1812  * EndIf
1813  *
1814  * where the throttle value was the number of ms to sleep per ms of
1815  * work. The calculation was done in each loop.
1816  *
1817  * Most of the hard work is done in the backend, and this solution
1818  * still did not work particularly well: on slow machines, the ratio
1819  * was 50:1, and on medium paced machines, 1:1, and on fast
1820  * multi-processor machines, it had little or no effect, for reasons
1821  * that were unclear.
1822  *
1823  * Further discussion ensued, and the proposal was dropped.
1824  *
1825  * For those people who want this feature, it can be implemented using
1826  * gettimeofday in each loop, calculating the time since last sleep,
1827  * multiplying that by the sleep ratio, then if the result is more
1828  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1829  * function to sleep for a subsecond period ie.
1830  *
1831  * select(0, NULL, NULL, NULL, &tvi);
1832  *
1833  * This will return after the interval specified in the structure tvi.
1834  * Finally, call gettimeofday again to save the 'last sleep time'.
1835  * ----------
1836  */
1837  }
1838  archprintf(fout, "\\.\n\n\n");
1839 
1840  if (ret == -2)
1841  {
1842  /* copy data transfer failed */
1843  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.\n", classname);
1844  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1845  write_msg(NULL, "The command was: %s\n", q->data);
1846  exit_nicely(1);
1847  }
1848 
1849  /* Check command status and return to normal libpq state */
1850  res = PQgetResult(conn);
1851  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1852  {
1853  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetResult() failed.\n", classname);
1854  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1855  write_msg(NULL, "The command was: %s\n", q->data);
1856  exit_nicely(1);
1857  }
1858  PQclear(res);
1859 
1860  /* Do this to ensure we've pumped libpq back to idle state */
1861  if (PQgetResult(conn) != NULL)
1862  write_msg(NULL, "WARNING: unexpected extra results during COPY of table \"%s\"\n",
1863  classname);
1864 
1865  destroyPQExpBuffer(q);
1866  return 1;
1867 }
1868 
1869 /*
1870  * Dump table data using INSERT commands.
1871  *
1872  * Caution: when we restore from an archive file direct to database, the
1873  * INSERT commands emitted by this function have to be parsed by
1874  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
1875  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
1876  */
1877 static int
1878 dumpTableData_insert(Archive *fout, void *dcontext)
1879 {
1880  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1881  TableInfo *tbinfo = tdinfo->tdtable;
1882  DumpOptions *dopt = fout->dopt;
1884  PQExpBuffer insertStmt = NULL;
1885  PGresult *res;
1886  int tuple;
1887  int nfields;
1888  int field;
1889 
1890  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
1891  "SELECT * FROM ONLY %s",
1892  fmtQualifiedDumpable(tbinfo));
1893  if (tdinfo->filtercond)
1894  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
1895 
1896  ExecuteSqlStatement(fout, q->data);
1897 
1898  while (1)
1899  {
1900  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
1901  PGRES_TUPLES_OK);
1902  nfields = PQnfields(res);
1903  for (tuple = 0; tuple < PQntuples(res); tuple++)
1904  {
1905  /*
1906  * First time through, we build as much of the INSERT statement as
1907  * possible in "insertStmt", which we can then just print for each
1908  * line. If the table happens to have zero columns then this will
1909  * be a complete statement, otherwise it will end in "VALUES(" and
1910  * be ready to have the row's column values appended.
1911  */
1912  if (insertStmt == NULL)
1913  {
1914  TableInfo *targettab;
1915 
1916  insertStmt = createPQExpBuffer();
1917 
1918  /*
1919  * When load-via-partition-root is set, get the root table
1920  * name for the partition table, so that we can reload data
1921  * through the root table.
1922  */
1923  if (dopt->load_via_partition_root && tbinfo->ispartition)
1924  targettab = getRootTableInfo(tbinfo);
1925  else
1926  targettab = tbinfo;
1927 
1928  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
1929  fmtQualifiedDumpable(targettab));
1930 
1931  /* corner case for zero-column table */
1932  if (nfields == 0)
1933  {
1934  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
1935  }
1936  else
1937  {
1938  /* append the list of column names if required */
1939  if (dopt->column_inserts)
1940  {
1941  appendPQExpBufferChar(insertStmt, '(');
1942  for (field = 0; field < nfields; field++)
1943  {
1944  if (field > 0)
1945  appendPQExpBufferStr(insertStmt, ", ");
1946  appendPQExpBufferStr(insertStmt,
1947  fmtId(PQfname(res, field)));
1948  }
1949  appendPQExpBufferStr(insertStmt, ") ");
1950  }
1951 
1952  if (tbinfo->needs_override)
1953  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
1954 
1955  appendPQExpBufferStr(insertStmt, "VALUES (");
1956  }
1957  }
1958 
1959  archputs(insertStmt->data, fout);
1960 
1961  /* if it is zero-column table then we're done */
1962  if (nfields == 0)
1963  continue;
1964 
1965  for (field = 0; field < nfields; field++)
1966  {
1967  if (field > 0)
1968  archputs(", ", fout);
1969  if (PQgetisnull(res, tuple, field))
1970  {
1971  archputs("NULL", fout);
1972  continue;
1973  }
1974 
1975  /* XXX This code is partially duplicated in ruleutils.c */
1976  switch (PQftype(res, field))
1977  {
1978  case INT2OID:
1979  case INT4OID:
1980  case INT8OID:
1981  case OIDOID:
1982  case FLOAT4OID:
1983  case FLOAT8OID:
1984  case NUMERICOID:
1985  {
1986  /*
1987  * These types are printed without quotes unless
1988  * they contain values that aren't accepted by the
1989  * scanner unquoted (e.g., 'NaN'). Note that
1990  * strtod() and friends might accept NaN, so we
1991  * can't use that to test.
1992  *
1993  * In reality we only need to defend against
1994  * infinity and NaN, so we need not get too crazy
1995  * about pattern matching here.
1996  */
1997  const char *s = PQgetvalue(res, tuple, field);
1998 
1999  if (strspn(s, "0123456789 +-eE.") == strlen(s))
2000  archputs(s, fout);
2001  else
2002  archprintf(fout, "'%s'", s);
2003  }
2004  break;
2005 
2006  case BITOID:
2007  case VARBITOID:
2008  archprintf(fout, "B'%s'",
2009  PQgetvalue(res, tuple, field));
2010  break;
2011 
2012  case BOOLOID:
2013  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2014  archputs("true", fout);
2015  else
2016  archputs("false", fout);
2017  break;
2018 
2019  default:
2020  /* All other types are printed as string literals. */
2021  resetPQExpBuffer(q);
2023  PQgetvalue(res, tuple, field),
2024  fout);
2025  archputs(q->data, fout);
2026  break;
2027  }
2028  }
2029 
2030  if (!dopt->do_nothing)
2031  archputs(");\n", fout);
2032  else
2033  archputs(") ON CONFLICT DO NOTHING;\n", fout);
2034  }
2035 
2036  if (PQntuples(res) <= 0)
2037  {
2038  PQclear(res);
2039  break;
2040  }
2041  PQclear(res);
2042  }
2043 
2044  archputs("\n\n", fout);
2045 
2046  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2047 
2048  destroyPQExpBuffer(q);
2049  if (insertStmt != NULL)
2050  destroyPQExpBuffer(insertStmt);
2051 
2052  return 1;
2053 }
2054 
2055 /*
2056  * getRootTableInfo:
2057  * get the root TableInfo for the given partition table.
2058  */
2059 static TableInfo *
2061 {
2062  TableInfo *parentTbinfo;
2063 
2064  Assert(tbinfo->ispartition);
2065  Assert(tbinfo->numParents == 1);
2066 
2067  parentTbinfo = tbinfo->parents[0];
2068  while (parentTbinfo->ispartition)
2069  {
2070  Assert(parentTbinfo->numParents == 1);
2071  parentTbinfo = parentTbinfo->parents[0];
2072  }
2073 
2074  return parentTbinfo;
2075 }
2076 
2077 /*
2078  * dumpTableData -
2079  * dump the contents of a single table
2080  *
2081  * Actually, this just makes an ArchiveEntry for the table contents.
2082  */
2083 static void
2085 {
2086  DumpOptions *dopt = fout->dopt;
2087  TableInfo *tbinfo = tdinfo->tdtable;
2088  PQExpBuffer copyBuf = createPQExpBuffer();
2089  PQExpBuffer clistBuf = createPQExpBuffer();
2090  DataDumperPtr dumpFn;
2091  char *copyStmt;
2092  const char *copyFrom;
2093 
2094  if (!dopt->dump_inserts)
2095  {
2096  /* Dump/restore using COPY */
2097  dumpFn = dumpTableData_copy;
2098 
2099  /*
2100  * When load-via-partition-root is set, get the root table name for
2101  * the partition table, so that we can reload data through the root
2102  * table.
2103  */
2104  if (dopt->load_via_partition_root && tbinfo->ispartition)
2105  {
2106  TableInfo *parentTbinfo;
2107 
2108  parentTbinfo = getRootTableInfo(tbinfo);
2109  copyFrom = fmtQualifiedDumpable(parentTbinfo);
2110  }
2111  else
2112  copyFrom = fmtQualifiedDumpable(tbinfo);
2113 
2114  /* must use 2 steps here 'cause fmtId is nonreentrant */
2115  appendPQExpBuffer(copyBuf, "COPY %s ",
2116  copyFrom);
2117  appendPQExpBuffer(copyBuf, "%s FROM stdin;\n",
2118  fmtCopyColumnList(tbinfo, clistBuf));
2119  copyStmt = copyBuf->data;
2120  }
2121  else
2122  {
2123  /* Restore using INSERT */
2124  dumpFn = dumpTableData_insert;
2125  copyStmt = NULL;
2126  }
2127 
2128  /*
2129  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2130  * dependency on its table as "special" and pass it to ArchiveEntry now.
2131  * See comments for BuildArchiveDependencies.
2132  */
2133  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2134  {
2135  TocEntry *te;
2136 
2137  te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2138  tbinfo->dobj.name, tbinfo->dobj.namespace->dobj.name,
2139  NULL, tbinfo->rolname,
2140  "TABLE DATA", SECTION_DATA,
2141  "", "", copyStmt,
2142  &(tbinfo->dobj.dumpId), 1,
2143  dumpFn, tdinfo);
2144 
2145  /*
2146  * Set the TocEntry's dataLength in case we are doing a parallel dump
2147  * and want to order dump jobs by table size. We choose to measure
2148  * dataLength in table pages during dump, so no scaling is needed.
2149  * However, relpages is declared as "integer" in pg_class, and hence
2150  * also in TableInfo, but it's really BlockNumber a/k/a unsigned int.
2151  * Cast so that we get the right interpretation of table sizes
2152  * exceeding INT_MAX pages.
2153  */
2154  te->dataLength = (BlockNumber) tbinfo->relpages;
2155  }
2156 
2157  destroyPQExpBuffer(copyBuf);
2158  destroyPQExpBuffer(clistBuf);
2159 }
2160 
2161 /*
2162  * refreshMatViewData -
2163  * load or refresh the contents of a single materialized view
2164  *
2165  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2166  * statement.
2167  */
2168 static void
2170 {
2171  TableInfo *tbinfo = tdinfo->tdtable;
2172  PQExpBuffer q;
2173 
2174  /* If the materialized view is not flagged as populated, skip this. */
2175  if (!tbinfo->relispopulated)
2176  return;
2177 
2178  q = createPQExpBuffer();
2179 
2180  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2181  fmtQualifiedDumpable(tbinfo));
2182 
2183  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2184  ArchiveEntry(fout,
2185  tdinfo->dobj.catId, /* catalog ID */
2186  tdinfo->dobj.dumpId, /* dump ID */
2187  tbinfo->dobj.name, /* Name */
2188  tbinfo->dobj.namespace->dobj.name, /* Namespace */
2189  NULL, /* Tablespace */
2190  tbinfo->rolname, /* Owner */
2191  "MATERIALIZED VIEW DATA", /* Desc */
2192  SECTION_POST_DATA, /* Section */
2193  q->data, /* Create */
2194  "", /* Del */
2195  NULL, /* Copy */
2196  tdinfo->dobj.dependencies, /* Deps */
2197  tdinfo->dobj.nDeps, /* # Deps */
2198  NULL, /* Dumper */
2199  NULL); /* Dumper Arg */
2200 
2201  destroyPQExpBuffer(q);
2202 }
2203 
2204 /*
2205  * getTableData -
2206  * set up dumpable objects representing the contents of tables
2207  */
2208 static void
2210 {
2211  int i;
2212 
2213  for (i = 0; i < numTables; i++)
2214  {
2215  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2216  (!relkind || tblinfo[i].relkind == relkind))
2217  makeTableDataInfo(dopt, &(tblinfo[i]));
2218  }
2219 }
2220 
2221 /*
2222  * Make a dumpable object for the data of this specific table
2223  *
2224  * Note: we make a TableDataInfo if and only if we are going to dump the
2225  * table data; the "dump" flag in such objects isn't used.
2226  */
2227 static void
2229 {
2230  TableDataInfo *tdinfo;
2231 
2232  /*
2233  * Nothing to do if we already decided to dump the table. This will
2234  * happen for "config" tables.
2235  */
2236  if (tbinfo->dataObj != NULL)
2237  return;
2238 
2239  /* Skip VIEWs (no data to dump) */
2240  if (tbinfo->relkind == RELKIND_VIEW)
2241  return;
2242  /* Skip FOREIGN TABLEs (no data to dump) */
2243  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2244  return;
2245  /* Skip partitioned tables (data in partitions) */
2246  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2247  return;
2248 
2249  /* Don't dump data in unlogged tables, if so requested */
2250  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2251  dopt->no_unlogged_table_data)
2252  return;
2253 
2254  /* Check that the data is not explicitly excluded */
2255  if (simple_oid_list_member(&tabledata_exclude_oids,
2256  tbinfo->dobj.catId.oid))
2257  return;
2258 
2259  /* OK, let's dump it */
2260  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2261 
2262  if (tbinfo->relkind == RELKIND_MATVIEW)
2263  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2264  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2265  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2266  else
2267  tdinfo->dobj.objType = DO_TABLE_DATA;
2268 
2269  /*
2270  * Note: use tableoid 0 so that this object won't be mistaken for
2271  * something that pg_depend entries apply to.
2272  */
2273  tdinfo->dobj.catId.tableoid = 0;
2274  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2275  AssignDumpId(&tdinfo->dobj);
2276  tdinfo->dobj.name = tbinfo->dobj.name;
2277  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2278  tdinfo->tdtable = tbinfo;
2279  tdinfo->filtercond = NULL; /* might get set later */
2280  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2281 
2282  tbinfo->dataObj = tdinfo;
2283 }
2284 
2285 /*
2286  * The refresh for a materialized view must be dependent on the refresh for
2287  * any materialized view that this one is dependent on.
2288  *
2289  * This must be called after all the objects are created, but before they are
2290  * sorted.
2291  */
2292 static void
2294 {
2295  PQExpBuffer query;
2296  PGresult *res;
2297  int ntups,
2298  i;
2299  int i_classid,
2300  i_objid,
2301  i_refobjid;
2302 
2303  /* No Mat Views before 9.3. */
2304  if (fout->remoteVersion < 90300)
2305  return;
2306 
2307  query = createPQExpBuffer();
2308 
2309  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2310  "( "
2311  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2312  "FROM pg_depend d1 "
2313  "JOIN pg_class c1 ON c1.oid = d1.objid "
2314  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2315  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2316  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2317  "AND d2.objid = r1.oid "
2318  "AND d2.refobjid <> d1.objid "
2319  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2320  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2321  CppAsString2(RELKIND_VIEW) ") "
2322  "WHERE d1.classid = 'pg_class'::regclass "
2323  "UNION "
2324  "SELECT w.objid, d3.refobjid, c3.relkind "
2325  "FROM w "
2326  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2327  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2328  "AND d3.objid = r3.oid "
2329  "AND d3.refobjid <> w.refobjid "
2330  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2331  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2332  CppAsString2(RELKIND_VIEW) ") "
2333  ") "
2334  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2335  "FROM w "
2336  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2337 
2338  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2339 
2340  ntups = PQntuples(res);
2341 
2342  i_classid = PQfnumber(res, "classid");
2343  i_objid = PQfnumber(res, "objid");
2344  i_refobjid = PQfnumber(res, "refobjid");
2345 
2346  for (i = 0; i < ntups; i++)
2347  {
2348  CatalogId objId;
2349  CatalogId refobjId;
2350  DumpableObject *dobj;
2351  DumpableObject *refdobj;
2352  TableInfo *tbinfo;
2353  TableInfo *reftbinfo;
2354 
2355  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2356  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2357  refobjId.tableoid = objId.tableoid;
2358  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2359 
2360  dobj = findObjectByCatalogId(objId);
2361  if (dobj == NULL)
2362  continue;
2363 
2364  Assert(dobj->objType == DO_TABLE);
2365  tbinfo = (TableInfo *) dobj;
2366  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2367  dobj = (DumpableObject *) tbinfo->dataObj;
2368  if (dobj == NULL)
2369  continue;
2370  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2371 
2372  refdobj = findObjectByCatalogId(refobjId);
2373  if (refdobj == NULL)
2374  continue;
2375 
2376  Assert(refdobj->objType == DO_TABLE);
2377  reftbinfo = (TableInfo *) refdobj;
2378  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2379  refdobj = (DumpableObject *) reftbinfo->dataObj;
2380  if (refdobj == NULL)
2381  continue;
2382  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2383 
2384  addObjectDependency(dobj, refdobj->dumpId);
2385 
2386  if (!reftbinfo->relispopulated)
2387  tbinfo->relispopulated = false;
2388  }
2389 
2390  PQclear(res);
2391 
2392  destroyPQExpBuffer(query);
2393 }
2394 
2395 /*
2396  * getTableDataFKConstraints -
2397  * add dump-order dependencies reflecting foreign key constraints
2398  *
2399  * This code is executed only in a data-only dump --- in schema+data dumps
2400  * we handle foreign key issues by not creating the FK constraints until
2401  * after the data is loaded. In a data-only dump, however, we want to
2402  * order the table data objects in such a way that a table's referenced
2403  * tables are restored first. (In the presence of circular references or
2404  * self-references this may be impossible; we'll detect and complain about
2405  * that during the dependency sorting step.)
2406  */
2407 static void
2409 {
2410  DumpableObject **dobjs;
2411  int numObjs;
2412  int i;
2413 
2414  /* Search through all the dumpable objects for FK constraints */
2415  getDumpableObjects(&dobjs, &numObjs);
2416  for (i = 0; i < numObjs; i++)
2417  {
2418  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2419  {
2420  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2421  TableInfo *ftable;
2422 
2423  /* Not interesting unless both tables are to be dumped */
2424  if (cinfo->contable == NULL ||
2425  cinfo->contable->dataObj == NULL)
2426  continue;
2427  ftable = findTableByOid(cinfo->confrelid);
2428  if (ftable == NULL ||
2429  ftable->dataObj == NULL)
2430  continue;
2431 
2432  /*
2433  * Okay, make referencing table's TABLE_DATA object depend on the
2434  * referenced table's TABLE_DATA object.
2435  */
2437  ftable->dataObj->dobj.dumpId);
2438  }
2439  }
2440  free(dobjs);
2441 }
2442 
2443 
2444 /*
2445  * guessConstraintInheritance:
2446  * In pre-8.4 databases, we can't tell for certain which constraints
2447  * are inherited. We assume a CHECK constraint is inherited if its name
2448  * matches the name of any constraint in the parent. Originally this code
2449  * tried to compare the expression texts, but that can fail for various
2450  * reasons --- for example, if the parent and child tables are in different
2451  * schemas, reverse-listing of function calls may produce different text
2452  * (schema-qualified or not) depending on search path.
2453  *
2454  * In 8.4 and up we can rely on the conislocal field to decide which
2455  * constraints must be dumped; much safer.
2456  *
2457  * This function assumes all conislocal flags were initialized to true.
2458  * It clears the flag on anything that seems to be inherited.
2459  */
2460 static void
2462 {
2463  int i,
2464  j,
2465  k;
2466 
2467  for (i = 0; i < numTables; i++)
2468  {
2469  TableInfo *tbinfo = &(tblinfo[i]);
2470  int numParents;
2471  TableInfo **parents;
2472  TableInfo *parent;
2473 
2474  /* Sequences and views never have parents */
2475  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2476  tbinfo->relkind == RELKIND_VIEW)
2477  continue;
2478 
2479  /* Don't bother computing anything for non-target tables, either */
2480  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2481  continue;
2482 
2483  numParents = tbinfo->numParents;
2484  parents = tbinfo->parents;
2485 
2486  if (numParents == 0)
2487  continue; /* nothing to see here, move along */
2488 
2489  /* scan for inherited CHECK constraints */
2490  for (j = 0; j < tbinfo->ncheck; j++)
2491  {
2492  ConstraintInfo *constr;
2493 
2494  constr = &(tbinfo->checkexprs[j]);
2495 
2496  for (k = 0; k < numParents; k++)
2497  {
2498  int l;
2499 
2500  parent = parents[k];
2501  for (l = 0; l < parent->ncheck; l++)
2502  {
2503  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2504 
2505  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2506  {
2507  constr->conislocal = false;
2508  break;
2509  }
2510  }
2511  if (!constr->conislocal)
2512  break;
2513  }
2514  }
2515  }
2516 }
2517 
2518 
2519 /*
2520  * dumpDatabase:
2521  * dump the database definition
2522  */
2523 static void
2525 {
2526  DumpOptions *dopt = fout->dopt;
2527  PQExpBuffer dbQry = createPQExpBuffer();
2528  PQExpBuffer delQry = createPQExpBuffer();
2529  PQExpBuffer creaQry = createPQExpBuffer();
2530  PQExpBuffer labelq = createPQExpBuffer();
2531  PGconn *conn = GetConnection(fout);
2532  PGresult *res;
2533  int i_tableoid,
2534  i_oid,
2535  i_datname,
2536  i_dba,
2537  i_encoding,
2538  i_collate,
2539  i_ctype,
2540  i_frozenxid,
2541  i_minmxid,
2542  i_datacl,
2543  i_rdatacl,
2544  i_datistemplate,
2545  i_datconnlimit,
2546  i_tablespace;
2547  CatalogId dbCatId;
2548  DumpId dbDumpId;
2549  const char *datname,
2550  *dba,
2551  *encoding,
2552  *collate,
2553  *ctype,
2554  *datacl,
2555  *rdatacl,
2556  *datistemplate,
2557  *datconnlimit,
2558  *tablespace;
2559  uint32 frozenxid,
2560  minmxid;
2561  char *qdatname;
2562 
2563  if (g_verbose)
2564  write_msg(NULL, "saving database definition\n");
2565 
2566  /* Fetch the database-level properties for this database */
2567  if (fout->remoteVersion >= 90600)
2568  {
2569  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2570  "(%s datdba) AS dba, "
2571  "pg_encoding_to_char(encoding) AS encoding, "
2572  "datcollate, datctype, datfrozenxid, datminmxid, "
2573  "(SELECT array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
2574  " SELECT unnest(coalesce(datacl,acldefault('d',datdba))) AS acl "
2575  " EXCEPT SELECT unnest(acldefault('d',datdba))) as datacls)"
2576  " AS datacl, "
2577  "(SELECT array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
2578  " SELECT unnest(acldefault('d',datdba)) AS acl "
2579  " EXCEPT SELECT unnest(coalesce(datacl,acldefault('d',datdba)))) as rdatacls)"
2580  " AS rdatacl, "
2581  "datistemplate, datconnlimit, "
2582  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2583  "shobj_description(oid, 'pg_database') AS description "
2584 
2585  "FROM pg_database "
2586  "WHERE datname = current_database()",
2588  }
2589  else if (fout->remoteVersion >= 90300)
2590  {
2591  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2592  "(%s datdba) AS dba, "
2593  "pg_encoding_to_char(encoding) AS encoding, "
2594  "datcollate, datctype, datfrozenxid, datminmxid, "
2595  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2596  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2597  "shobj_description(oid, 'pg_database') AS description "
2598 
2599  "FROM pg_database "
2600  "WHERE datname = current_database()",
2602  }
2603  else if (fout->remoteVersion >= 80400)
2604  {
2605  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2606  "(%s datdba) AS dba, "
2607  "pg_encoding_to_char(encoding) AS encoding, "
2608  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2609  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2610  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2611  "shobj_description(oid, 'pg_database') AS description "
2612 
2613  "FROM pg_database "
2614  "WHERE datname = current_database()",
2616  }
2617  else if (fout->remoteVersion >= 80200)
2618  {
2619  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2620  "(%s datdba) AS dba, "
2621  "pg_encoding_to_char(encoding) AS encoding, "
2622  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2623  "datacl, '' as rdatacl, datistemplate, datconnlimit, "
2624  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2625  "shobj_description(oid, 'pg_database') AS description "
2626 
2627  "FROM pg_database "
2628  "WHERE datname = current_database()",
2630  }
2631  else
2632  {
2633  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
2634  "(%s datdba) AS dba, "
2635  "pg_encoding_to_char(encoding) AS encoding, "
2636  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2637  "datacl, '' as rdatacl, datistemplate, "
2638  "-1 as datconnlimit, "
2639  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2640  "FROM pg_database "
2641  "WHERE datname = current_database()",
2643  }
2644 
2645  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2646 
2647  i_tableoid = PQfnumber(res, "tableoid");
2648  i_oid = PQfnumber(res, "oid");
2649  i_datname = PQfnumber(res, "datname");
2650  i_dba = PQfnumber(res, "dba");
2651  i_encoding = PQfnumber(res, "encoding");
2652  i_collate = PQfnumber(res, "datcollate");
2653  i_ctype = PQfnumber(res, "datctype");
2654  i_frozenxid = PQfnumber(res, "datfrozenxid");
2655  i_minmxid = PQfnumber(res, "datminmxid");
2656  i_datacl = PQfnumber(res, "datacl");
2657  i_rdatacl = PQfnumber(res, "rdatacl");
2658  i_datistemplate = PQfnumber(res, "datistemplate");
2659  i_datconnlimit = PQfnumber(res, "datconnlimit");
2660  i_tablespace = PQfnumber(res, "tablespace");
2661 
2662  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2663  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2664  datname = PQgetvalue(res, 0, i_datname);
2665  dba = PQgetvalue(res, 0, i_dba);
2666  encoding = PQgetvalue(res, 0, i_encoding);
2667  collate = PQgetvalue(res, 0, i_collate);
2668  ctype = PQgetvalue(res, 0, i_ctype);
2669  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2670  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2671  datacl = PQgetvalue(res, 0, i_datacl);
2672  rdatacl = PQgetvalue(res, 0, i_rdatacl);
2673  datistemplate = PQgetvalue(res, 0, i_datistemplate);
2674  datconnlimit = PQgetvalue(res, 0, i_datconnlimit);
2675  tablespace = PQgetvalue(res, 0, i_tablespace);
2676 
2677  qdatname = pg_strdup(fmtId(datname));
2678 
2679  /*
2680  * Prepare the CREATE DATABASE command. We must specify encoding, locale,
2681  * and tablespace since those can't be altered later. Other DB properties
2682  * are left to the DATABASE PROPERTIES entry, so that they can be applied
2683  * after reconnecting to the target DB.
2684  */
2685  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2686  qdatname);
2687  if (strlen(encoding) > 0)
2688  {
2689  appendPQExpBufferStr(creaQry, " ENCODING = ");
2690  appendStringLiteralAH(creaQry, encoding, fout);
2691  }
2692  if (strlen(collate) > 0)
2693  {
2694  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2695  appendStringLiteralAH(creaQry, collate, fout);
2696  }
2697  if (strlen(ctype) > 0)
2698  {
2699  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2700  appendStringLiteralAH(creaQry, ctype, fout);
2701  }
2702 
2703  /*
2704  * Note: looking at dopt->outputNoTablespaces here is completely the wrong
2705  * thing; the decision whether to specify a tablespace should be left till
2706  * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
2707  * label the DATABASE entry with the tablespace and let the normal
2708  * tablespace selection logic work ... but CREATE DATABASE doesn't pay
2709  * attention to default_tablespace, so that won't work.
2710  */
2711  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2712  !dopt->outputNoTablespaces)
2713  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2714  fmtId(tablespace));
2715  appendPQExpBufferStr(creaQry, ";\n");
2716 
2717  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2718  qdatname);
2719 
2720  dbDumpId = createDumpId();
2721 
2722  ArchiveEntry(fout,
2723  dbCatId, /* catalog ID */
2724  dbDumpId, /* dump ID */
2725  datname, /* Name */
2726  NULL, /* Namespace */
2727  NULL, /* Tablespace */
2728  dba, /* Owner */
2729  "DATABASE", /* Desc */
2730  SECTION_PRE_DATA, /* Section */
2731  creaQry->data, /* Create */
2732  delQry->data, /* Del */
2733  NULL, /* Copy */
2734  NULL, /* Deps */
2735  0, /* # Deps */
2736  NULL, /* Dumper */
2737  NULL); /* Dumper Arg */
2738 
2739  /* Compute correct tag for archive entry */
2740  appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
2741 
2742  /* Dump DB comment if any */
2743  if (fout->remoteVersion >= 80200)
2744  {
2745  /*
2746  * 8.2 and up keep comments on shared objects in a shared table, so we
2747  * cannot use the dumpComment() code used for other database objects.
2748  * Be careful that the ArchiveEntry parameters match that function.
2749  */
2750  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2751 
2752  if (comment && *comment && !dopt->no_comments)
2753  {
2754  resetPQExpBuffer(dbQry);
2755 
2756  /*
2757  * Generates warning when loaded into a differently-named
2758  * database.
2759  */
2760  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
2761  appendStringLiteralAH(dbQry, comment, fout);
2762  appendPQExpBufferStr(dbQry, ";\n");
2763 
2764  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2765  labelq->data, NULL, NULL, dba,
2766  "COMMENT", SECTION_NONE,
2767  dbQry->data, "", NULL,
2768  &(dbDumpId), 1,
2769  NULL, NULL);
2770  }
2771  }
2772  else
2773  {
2774  dumpComment(fout, "DATABASE", qdatname, NULL, dba,
2775  dbCatId, 0, dbDumpId);
2776  }
2777 
2778  /* Dump DB security label, if enabled */
2779  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2780  {
2781  PGresult *shres;
2782  PQExpBuffer seclabelQry;
2783 
2784  seclabelQry = createPQExpBuffer();
2785 
2786  buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry);
2787  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2788  resetPQExpBuffer(seclabelQry);
2789  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2790  if (seclabelQry->len > 0)
2791  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2792  labelq->data, NULL, NULL, dba,
2793  "SECURITY LABEL", SECTION_NONE,
2794  seclabelQry->data, "", NULL,
2795  &(dbDumpId), 1,
2796  NULL, NULL);
2797  destroyPQExpBuffer(seclabelQry);
2798  PQclear(shres);
2799  }
2800 
2801  /*
2802  * Dump ACL if any. Note that we do not support initial privileges
2803  * (pg_init_privs) on databases.
2804  */
2805  dumpACL(fout, dbCatId, dbDumpId, "DATABASE",
2806  qdatname, NULL, NULL,
2807  dba, datacl, rdatacl, "", "");
2808 
2809  /*
2810  * Now construct a DATABASE PROPERTIES archive entry to restore any
2811  * non-default database-level properties. (The reason this must be
2812  * separate is that we cannot put any additional commands into the TOC
2813  * entry that has CREATE DATABASE. pg_restore would execute such a group
2814  * in an implicit transaction block, and the backend won't allow CREATE
2815  * DATABASE in that context.)
2816  */
2817  resetPQExpBuffer(creaQry);
2818  resetPQExpBuffer(delQry);
2819 
2820  if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
2821  appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
2822  qdatname, datconnlimit);
2823 
2824  if (strcmp(datistemplate, "t") == 0)
2825  {
2826  appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
2827  qdatname);
2828 
2829  /*
2830  * The backend won't accept DROP DATABASE on a template database. We
2831  * can deal with that by removing the template marking before the DROP
2832  * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
2833  * since no such command is currently supported, fake it with a direct
2834  * UPDATE on pg_database.
2835  */
2836  appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
2837  "SET datistemplate = false WHERE datname = ");
2838  appendStringLiteralAH(delQry, datname, fout);
2839  appendPQExpBufferStr(delQry, ";\n");
2840  }
2841 
2842  /* Add database-specific SET options */
2843  dumpDatabaseConfig(fout, creaQry, datname, dbCatId.oid);
2844 
2845  /*
2846  * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
2847  * entry, too, for lack of a better place.
2848  */
2849  if (dopt->binary_upgrade)
2850  {
2851  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
2852  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
2853  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
2854  "WHERE datname = ",
2855  frozenxid, minmxid);
2856  appendStringLiteralAH(creaQry, datname, fout);
2857  appendPQExpBufferStr(creaQry, ";\n");
2858  }
2859 
2860  if (creaQry->len > 0)
2861  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2862  datname, NULL, NULL, dba,
2863  "DATABASE PROPERTIES", SECTION_PRE_DATA,
2864  creaQry->data, delQry->data, NULL,
2865  &(dbDumpId), 1,
2866  NULL, NULL);
2867 
2868  /*
2869  * pg_largeobject comes from the old system intact, so set its
2870  * relfrozenxids and relminmxids.
2871  */
2872  if (dopt->binary_upgrade)
2873  {
2874  PGresult *lo_res;
2875  PQExpBuffer loFrozenQry = createPQExpBuffer();
2876  PQExpBuffer loOutQry = createPQExpBuffer();
2877  int i_relfrozenxid,
2878  i_relminmxid;
2879 
2880  /*
2881  * pg_largeobject
2882  */
2883  if (fout->remoteVersion >= 90300)
2884  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2885  "FROM pg_catalog.pg_class\n"
2886  "WHERE oid = %u;\n",
2887  LargeObjectRelationId);
2888  else
2889  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2890  "FROM pg_catalog.pg_class\n"
2891  "WHERE oid = %u;\n",
2892  LargeObjectRelationId);
2893 
2894  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2895 
2896  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2897  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2898 
2899  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
2900  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2901  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2902  "WHERE oid = %u;\n",
2903  atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2904  atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
2905  LargeObjectRelationId);
2906  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2907  "pg_largeobject", NULL, NULL, "",
2908  "pg_largeobject", SECTION_PRE_DATA,
2909  loOutQry->data, "", NULL,
2910  NULL, 0,
2911  NULL, NULL);
2912 
2913  PQclear(lo_res);
2914 
2915  destroyPQExpBuffer(loFrozenQry);
2916  destroyPQExpBuffer(loOutQry);
2917  }
2918 
2919  PQclear(res);
2920 
2921  free(qdatname);
2922  destroyPQExpBuffer(dbQry);
2923  destroyPQExpBuffer(delQry);
2924  destroyPQExpBuffer(creaQry);
2925  destroyPQExpBuffer(labelq);
2926 }
2927 
2928 /*
2929  * Collect any database-specific or role-and-database-specific SET options
2930  * for this database, and append them to outbuf.
2931  */
2932 static void
2934  const char *dbname, Oid dboid)
2935 {
2936  PGconn *conn = GetConnection(AH);
2938  PGresult *res;
2939  int count = 1;
2940 
2941  /*
2942  * First collect database-specific options. Pre-8.4 server versions lack
2943  * unnest(), so we do this the hard way by querying once per subscript.
2944  */
2945  for (;;)
2946  {
2947  if (AH->remoteVersion >= 90000)
2948  printfPQExpBuffer(buf, "SELECT setconfig[%d] FROM pg_db_role_setting "
2949  "WHERE setrole = 0 AND setdatabase = '%u'::oid",
2950  count, dboid);
2951  else
2952  printfPQExpBuffer(buf, "SELECT datconfig[%d] FROM pg_database WHERE oid = '%u'::oid", count, dboid);
2953 
2954  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
2955 
2956  if (PQntuples(res) == 1 &&
2957  !PQgetisnull(res, 0, 0))
2958  {
2959  makeAlterConfigCommand(conn, PQgetvalue(res, 0, 0),
2960  "DATABASE", dbname, NULL, NULL,
2961  outbuf);
2962  PQclear(res);
2963  count++;
2964  }
2965  else
2966  {
2967  PQclear(res);
2968  break;
2969  }
2970  }
2971 
2972  /* Now look for role-and-database-specific options */
2973  if (AH->remoteVersion >= 90000)
2974  {
2975  /* Here we can assume we have unnest() */
2976  printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
2977  "FROM pg_db_role_setting s, pg_roles r "
2978  "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
2979  dboid);
2980 
2981  res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
2982 
2983  if (PQntuples(res) > 0)
2984  {
2985  int i;
2986 
2987  for (i = 0; i < PQntuples(res); i++)
2988  makeAlterConfigCommand(conn, PQgetvalue(res, i, 1),
2989  "ROLE", PQgetvalue(res, i, 0),
2990  "DATABASE", dbname,
2991  outbuf);
2992  }
2993 
2994  PQclear(res);
2995  }
2996 
2997  destroyPQExpBuffer(buf);
2998 }
2999 
3000 /*
3001  * dumpEncoding: put the correct encoding into the archive
3002  */
3003 static void
3005 {
3006  const char *encname = pg_encoding_to_char(AH->encoding);
3008 
3009  if (g_verbose)
3010  write_msg(NULL, "saving encoding = %s\n", encname);
3011 
3012  appendPQExpBufferStr(qry, "SET client_encoding = ");
3013  appendStringLiteralAH(qry, encname, AH);
3014  appendPQExpBufferStr(qry, ";\n");
3015 
3016  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3017  "ENCODING", NULL, NULL, "",
3018  "ENCODING", SECTION_PRE_DATA,
3019  qry->data, "", NULL,
3020  NULL, 0,
3021  NULL, NULL);
3022 
3023  destroyPQExpBuffer(qry);
3024 }
3025 
3026 
3027 /*
3028  * dumpStdStrings: put the correct escape string behavior into the archive
3029  */
3030 static void
3032 {
3033  const char *stdstrings = AH->std_strings ? "on" : "off";
3035 
3036  if (g_verbose)
3037  write_msg(NULL, "saving standard_conforming_strings = %s\n",
3038  stdstrings);
3039 
3040  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3041  stdstrings);
3042 
3043  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3044  "STDSTRINGS", NULL, NULL, "",
3045  "STDSTRINGS", SECTION_PRE_DATA,
3046  qry->data, "", NULL,
3047  NULL, 0,
3048  NULL, NULL);
3049 
3050  destroyPQExpBuffer(qry);
3051 }
3052 
3053 /*
3054  * dumpSearchPath: record the active search_path in the archive
3055  */
3056 static void
3058 {
3060  PQExpBuffer path = createPQExpBuffer();
3061  PGresult *res;
3062  char **schemanames = NULL;
3063  int nschemanames = 0;
3064  int i;
3065 
3066  /*
3067  * We use the result of current_schemas(), not the search_path GUC,
3068  * because that might contain wildcards such as "$user", which won't
3069  * necessarily have the same value during restore. Also, this way avoids
3070  * listing schemas that may appear in search_path but not actually exist,
3071  * which seems like a prudent exclusion.
3072  */
3073  res = ExecuteSqlQueryForSingleRow(AH,
3074  "SELECT pg_catalog.current_schemas(false)");
3075 
3076  if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3077  exit_horribly(NULL, "could not parse result of current_schemas()\n");
3078 
3079  /*
3080  * We use set_config(), not a simple "SET search_path" command, because
3081  * the latter has less-clean behavior if the search path is empty. While
3082  * that's likely to get fixed at some point, it seems like a good idea to
3083  * be as backwards-compatible as possible in what we put into archives.
3084  */
3085  for (i = 0; i < nschemanames; i++)
3086  {
3087  if (i > 0)
3088  appendPQExpBufferStr(path, ", ");
3089  appendPQExpBufferStr(path, fmtId(schemanames[i]));
3090  }
3091 
3092  appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3093  appendStringLiteralAH(qry, path->data, AH);
3094  appendPQExpBufferStr(qry, ", false);\n");
3095 
3096  if (g_verbose)
3097  write_msg(NULL, "saving search_path = %s\n", path->data);
3098 
3099  ArchiveEntry(AH, nilCatalogId, createDumpId(),
3100  "SEARCHPATH", NULL, NULL, "",
3101  "SEARCHPATH", SECTION_PRE_DATA,
3102  qry->data, "", NULL,
3103  NULL, 0,
3104  NULL, NULL);
3105 
3106  /* Also save it in AH->searchpath, in case we're doing plain text dump */
3107  AH->searchpath = pg_strdup(qry->data);
3108 
3109  if (schemanames)
3110  free(schemanames);
3111  PQclear(res);
3112  destroyPQExpBuffer(qry);
3113  destroyPQExpBuffer(path);
3114 }
3115 
3116 
3117 /*
3118  * getBlobs:
3119  * Collect schema-level data about large objects
3120  */
3121 static void
3123 {
3124  DumpOptions *dopt = fout->dopt;
3125  PQExpBuffer blobQry = createPQExpBuffer();
3126  BlobInfo *binfo;
3127  DumpableObject *bdata;
3128  PGresult *res;
3129  int ntups;
3130  int i;
3131  int i_oid;
3132  int i_lomowner;
3133  int i_lomacl;
3134  int i_rlomacl;
3135  int i_initlomacl;
3136  int i_initrlomacl;
3137 
3138  /* Verbose message */
3139  if (g_verbose)
3140  write_msg(NULL, "reading large objects\n");
3141 
3142  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
3143  if (fout->remoteVersion >= 90600)
3144  {
3145  PQExpBuffer acl_subquery = createPQExpBuffer();
3146  PQExpBuffer racl_subquery = createPQExpBuffer();
3147  PQExpBuffer init_acl_subquery = createPQExpBuffer();
3148  PQExpBuffer init_racl_subquery = createPQExpBuffer();
3149 
3150  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
3151  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
3152  dopt->binary_upgrade);
3153 
3154  appendPQExpBuffer(blobQry,
3155  "SELECT l.oid, (%s l.lomowner) AS rolname, "
3156  "%s AS lomacl, "
3157  "%s AS rlomacl, "
3158  "%s AS initlomacl, "
3159  "%s AS initrlomacl "
3160  "FROM pg_largeobject_metadata l "
3161  "LEFT JOIN pg_init_privs pip ON "
3162  "(l.oid = pip.objoid "
3163  "AND pip.classoid = 'pg_largeobject'::regclass "
3164  "AND pip.objsubid = 0) ",
3166  acl_subquery->data,
3167  racl_subquery->data,
3168  init_acl_subquery->data,
3169  init_racl_subquery->data);
3170 
3171  destroyPQExpBuffer(acl_subquery);
3172  destroyPQExpBuffer(racl_subquery);
3173  destroyPQExpBuffer(init_acl_subquery);
3174  destroyPQExpBuffer(init_racl_subquery);
3175  }
3176  else if (fout->remoteVersion >= 90000)
3177  appendPQExpBuffer(blobQry,
3178  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
3179  "NULL AS rlomacl, NULL AS initlomacl, "
3180  "NULL AS initrlomacl "
3181  " FROM pg_largeobject_metadata",
3183  else
3184  appendPQExpBufferStr(blobQry,
3185  "SELECT DISTINCT loid AS oid, "
3186  "NULL::name AS rolname, NULL::oid AS lomacl, "
3187  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
3188  "NULL::oid AS initrlomacl "
3189  " FROM pg_largeobject");
3190 
3191  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
3192 
3193  i_oid = PQfnumber(res, "oid");
3194  i_lomowner = PQfnumber(res, "rolname");
3195  i_lomacl = PQfnumber(res, "lomacl");
3196  i_rlomacl = PQfnumber(res, "rlomacl");
3197  i_initlomacl = PQfnumber(res, "initlomacl");
3198  i_initrlomacl = PQfnumber(res, "initrlomacl");
3199 
3200  ntups = PQntuples(res);
3201 
3202  /*
3203  * Each large object has its own BLOB archive entry.
3204  */
3205  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
3206 
3207  for (i = 0; i < ntups; i++)
3208  {
3209  binfo[i].dobj.objType = DO_BLOB;
3210  binfo[i].dobj.catId.tableoid = LargeObjectRelationId;
3211  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3212  AssignDumpId(&binfo[i].dobj);
3213 
3214  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
3215  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
3216  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
3217  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
3218  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
3219  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
3220 
3221  if (PQgetisnull(res, i, i_lomacl) &&
3222  PQgetisnull(res, i, i_rlomacl) &&
3223  PQgetisnull(res, i, i_initlomacl) &&
3224  PQgetisnull(res, i, i_initrlomacl))
3225  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
3226 
3227  /*
3228  * In binary-upgrade mode for blobs, we do *not* dump out the blob
3229  * data, as it will be copied by pg_upgrade, which simply copies the
3230  * pg_largeobject table. We *do* however dump out anything but the
3231  * data, as pg_upgrade copies just pg_largeobject, but not
3232  * pg_largeobject_metadata, after the dump is restored.
3233  */
3234  if (dopt->binary_upgrade)
3235  binfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
3236  }
3237 
3238  /*
3239  * If we have any large objects, a "BLOBS" archive entry is needed. This
3240  * is just a placeholder for sorting; it carries no data now.
3241  */
3242  if (ntups > 0)
3243  {
3244  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
3245  bdata->objType = DO_BLOB_DATA;
3246  bdata->catId = nilCatalogId;
3247  AssignDumpId(bdata);
3248  bdata->name = pg_strdup("BLOBS");
3249  }
3250 
3251  PQclear(res);
3252  destroyPQExpBuffer(blobQry);
3253 }
3254 
3255 /*
3256  * dumpBlob
3257  *
3258  * dump the definition (metadata) of the given large object
3259  */
3260 static void
3261 dumpBlob(Archive *fout, BlobInfo *binfo)
3262 {
3263  PQExpBuffer cquery = createPQExpBuffer();
3264  PQExpBuffer dquery = createPQExpBuffer();
3265 
3266  appendPQExpBuffer(cquery,
3267  "SELECT pg_catalog.lo_create('%s');\n",
3268  binfo->dobj.name);
3269 
3270  appendPQExpBuffer(dquery,
3271  "SELECT pg_catalog.lo_unlink('%s');\n",
3272  binfo->dobj.name);
3273 
3274  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
3275  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
3276  binfo->dobj.name,
3277  NULL, NULL,
3278  binfo->rolname,
3279  "BLOB", SECTION_PRE_DATA,
3280  cquery->data, dquery->data, NULL,
3281  NULL, 0,
3282  NULL, NULL);
3283 
3284  /* Dump comment if any */
3285  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3286  dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
3287  NULL, binfo->rolname,
3288  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3289 
3290  /* Dump security label if any */
3291  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3292  dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
3293  NULL, binfo->rolname,
3294  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3295 
3296  /* Dump ACL if any */
3297  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3298  dumpACL(fout, binfo->dobj.catId, binfo->dobj.dumpId, "LARGE OBJECT",
3299  binfo->dobj.name, NULL,
3300  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3301  binfo->initblobacl, binfo->initrblobacl);
3302 
3303  destroyPQExpBuffer(cquery);
3304  destroyPQExpBuffer(dquery);
3305 }
3306 
3307 /*
3308  * dumpBlobs:
3309  * dump the data contents of all large objects
3310  */
3311 static int
3312 dumpBlobs(Archive *fout, void *arg)
3313 {
3314  const char *blobQry;
3315  const char *blobFetchQry;
3316  PGconn *conn = GetConnection(fout);
3317  PGresult *res;
3318  char buf[LOBBUFSIZE];
3319  int ntups;
3320  int i;
3321  int cnt;
3322 
3323  if (g_verbose)
3324  write_msg(NULL, "saving large objects\n");
3325 
3326  /*
3327  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3328  * the already-in-memory dumpable objects instead...
3329  */
3330  if (fout->remoteVersion >= 90000)
3331  blobQry =
3332  "DECLARE bloboid CURSOR FOR "
3333  "SELECT oid FROM pg_largeobject_metadata ORDER BY 1";
3334  else
3335  blobQry =
3336  "DECLARE bloboid CURSOR FOR "
3337  "SELECT DISTINCT loid FROM pg_largeobject ORDER BY 1";
3338 
3339  ExecuteSqlStatement(fout, blobQry);
3340 
3341  /* Command to fetch from cursor */
3342  blobFetchQry = "FETCH 1000 IN bloboid";
3343 
3344  do
3345  {
3346  /* Do a fetch */
3347  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3348 
3349  /* Process the tuples, if any */
3350  ntups = PQntuples(res);
3351  for (i = 0; i < ntups; i++)
3352  {
3353  Oid blobOid;
3354  int loFd;
3355 
3356  blobOid = atooid(PQgetvalue(res, i, 0));
3357  /* Open the BLOB */
3358  loFd = lo_open(conn, blobOid, INV_READ);
3359  if (loFd == -1)
3360  exit_horribly(NULL, "could not open large object %u: %s",
3361  blobOid, PQerrorMessage(conn));
3362 
3363  StartBlob(fout, blobOid);
3364 
3365  /* Now read it in chunks, sending data to archive */
3366  do
3367  {
3368  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3369  if (cnt < 0)
3370  exit_horribly(NULL, "error reading large object %u: %s",
3371  blobOid, PQerrorMessage(conn));
3372 
3373  WriteData(fout, buf, cnt);
3374  } while (cnt > 0);
3375 
3376  lo_close(conn, loFd);
3377 
3378  EndBlob(fout, blobOid);
3379  }
3380 
3381  PQclear(res);
3382  } while (ntups > 0);
3383 
3384  return 1;
3385 }
3386 
3387 /*
3388  * getPolicies
3389  * get information about policies on a dumpable table.
3390  */
3391 void
3392 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3393 {
3394  PQExpBuffer query;
3395  PGresult *res;
3396  PolicyInfo *polinfo;
3397  int i_oid;
3398  int i_tableoid;
3399  int i_polname;
3400  int i_polcmd;
3401  int i_polpermissive;
3402  int i_polroles;
3403  int i_polqual;
3404  int i_polwithcheck;
3405  int i,
3406  j,
3407  ntups;
3408 
3409  if (fout->remoteVersion < 90500)
3410  return;
3411 
3412  query = createPQExpBuffer();
3413 
3414  for (i = 0; i < numTables; i++)
3415  {
3416  TableInfo *tbinfo = &tblinfo[i];
3417 
3418  /* Ignore row security on tables not to be dumped */
3419  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3420  continue;
3421 
3422  if (g_verbose)
3423  write_msg(NULL, "reading row security enabled for table \"%s.%s\"\n",
3424  tbinfo->dobj.namespace->dobj.name,
3425  tbinfo->dobj.name);
3426 
3427  /*
3428  * Get row security enabled information for the table. We represent
3429  * RLS being enabled on a table by creating a PolicyInfo object with
3430  * null polname.
3431  */
3432  if (tbinfo->rowsec)
3433  {
3434  /*
3435  * Note: use tableoid 0 so that this object won't be mistaken for
3436  * something that pg_depend entries apply to.
3437  */
3438  polinfo = pg_malloc(sizeof(PolicyInfo));
3439  polinfo->dobj.objType = DO_POLICY;
3440  polinfo->dobj.catId.tableoid = 0;
3441  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3442  AssignDumpId(&polinfo->dobj);
3443  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3444  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3445  polinfo->poltable = tbinfo;
3446  polinfo->polname = NULL;
3447  polinfo->polcmd = '\0';
3448  polinfo->polpermissive = 0;
3449  polinfo->polroles = NULL;
3450  polinfo->polqual = NULL;
3451  polinfo->polwithcheck = NULL;
3452  }
3453 
3454  if (g_verbose)
3455  write_msg(NULL, "reading policies for table \"%s.%s\"\n",
3456  tbinfo->dobj.namespace->dobj.name,
3457  tbinfo->dobj.name);
3458 
3459  resetPQExpBuffer(query);
3460 
3461  /* Get the policies for the table. */
3462  if (fout->remoteVersion >= 100000)
3463  appendPQExpBuffer(query,
3464  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3465  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3466  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3467  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3468  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3469  "FROM pg_catalog.pg_policy pol "
3470  "WHERE polrelid = '%u'",
3471  tbinfo->dobj.catId.oid);
3472  else
3473  appendPQExpBuffer(query,
3474  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3475  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3476  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3477  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3478  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3479  "FROM pg_catalog.pg_policy pol "
3480  "WHERE polrelid = '%u'",
3481  tbinfo->dobj.catId.oid);
3482  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3483 
3484  ntups = PQntuples(res);
3485 
3486  if (ntups == 0)
3487  {
3488  /*
3489  * No explicit policies to handle (only the default-deny policy,
3490  * which is handled as part of the table definition). Clean up
3491  * and return.
3492  */
3493  PQclear(res);
3494  continue;
3495  }
3496 
3497  i_oid = PQfnumber(res, "oid");
3498  i_tableoid = PQfnumber(res, "tableoid");
3499  i_polname = PQfnumber(res, "polname");
3500  i_polcmd = PQfnumber(res, "polcmd");
3501  i_polpermissive = PQfnumber(res, "polpermissive");
3502  i_polroles = PQfnumber(res, "polroles");
3503  i_polqual = PQfnumber(res, "polqual");
3504  i_polwithcheck = PQfnumber(res, "polwithcheck");
3505 
3506  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3507 
3508  for (j = 0; j < ntups; j++)
3509  {
3510  polinfo[j].dobj.objType = DO_POLICY;
3511  polinfo[j].dobj.catId.tableoid =
3512  atooid(PQgetvalue(res, j, i_tableoid));
3513  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3514  AssignDumpId(&polinfo[j].dobj);
3515  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3516  polinfo[j].poltable = tbinfo;
3517  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3518  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3519 
3520  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3521  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3522 
3523  if (PQgetisnull(res, j, i_polroles))
3524  polinfo[j].polroles = NULL;
3525  else
3526  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3527 
3528  if (PQgetisnull(res, j, i_polqual))
3529  polinfo[j].polqual = NULL;
3530  else
3531  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3532 
3533  if (PQgetisnull(res, j, i_polwithcheck))
3534  polinfo[j].polwithcheck = NULL;
3535  else
3536  polinfo[j].polwithcheck
3537  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3538  }
3539  PQclear(res);
3540  }
3541  destroyPQExpBuffer(query);
3542 }
3543 
3544 /*
3545  * dumpPolicy
3546  * dump the definition of the given policy
3547  */
3548 static void
3550 {
3551  DumpOptions *dopt = fout->dopt;
3552  TableInfo *tbinfo = polinfo->poltable;
3553  PQExpBuffer query;
3554  PQExpBuffer delqry;
3555  const char *cmd;
3556  char *tag;
3557 
3558  if (dopt->dataOnly)
3559  return;
3560 
3561  /*
3562  * If polname is NULL, then this record is just indicating that ROW LEVEL
3563  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3564  * ROW LEVEL SECURITY.
3565  */
3566  if (polinfo->polname == NULL)
3567  {
3568  query = createPQExpBuffer();
3569 
3570  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3571  fmtQualifiedDumpable(tbinfo));
3572 
3573  /*
3574  * We must emit the ROW SECURITY object's dependency on its table
3575  * explicitly, because it will not match anything in pg_depend (unlike
3576  * the case for other PolicyInfo objects).
3577  */
3578  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3579  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3580  polinfo->dobj.name,
3581  polinfo->dobj.namespace->dobj.name,
3582  NULL,
3583  tbinfo->rolname,
3584  "ROW SECURITY", SECTION_POST_DATA,
3585  query->data, "", NULL,
3586  &(tbinfo->dobj.dumpId), 1,
3587  NULL, NULL);
3588 
3589  destroyPQExpBuffer(query);
3590  return;
3591  }
3592 
3593  if (polinfo->polcmd == '*')
3594  cmd = "";
3595  else if (polinfo->polcmd == 'r')
3596  cmd = " FOR SELECT";
3597  else if (polinfo->polcmd == 'a')
3598  cmd = " FOR INSERT";
3599  else if (polinfo->polcmd == 'w')
3600  cmd = " FOR UPDATE";
3601  else if (polinfo->polcmd == 'd')
3602  cmd = " FOR DELETE";
3603  else
3604  {
3605  write_msg(NULL, "unexpected policy command type: %c\n",
3606  polinfo->polcmd);
3607  exit_nicely(1);
3608  }
3609 
3610  query = createPQExpBuffer();
3611  delqry = createPQExpBuffer();
3612 
3613  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3614 
3615  appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
3616  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3617 
3618  if (polinfo->polroles != NULL)
3619  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3620 
3621  if (polinfo->polqual != NULL)
3622  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3623 
3624  if (polinfo->polwithcheck != NULL)
3625  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3626 
3627  appendPQExpBuffer(query, ";\n");
3628 
3629  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3630  appendPQExpBuffer(delqry, " ON %s;\n", fmtQualifiedDumpable(tbinfo));
3631 
3632  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3633 
3634  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3635  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3636  tag,
3637  polinfo->dobj.namespace->dobj.name,
3638  NULL,
3639  tbinfo->rolname,
3640  "POLICY", SECTION_POST_DATA,
3641  query->data, delqry->data, NULL,
3642  NULL, 0,
3643  NULL, NULL);
3644 
3645  free(tag);
3646  destroyPQExpBuffer(query);
3647  destroyPQExpBuffer(delqry);
3648 }
3649 
3650 /*
3651  * getPublications
3652  * get information about publications
3653  */
3654 void
3656 {
3657  DumpOptions *dopt = fout->dopt;
3658  PQExpBuffer query;
3659  PGresult *res;
3660  PublicationInfo *pubinfo;
3661  int i_tableoid;
3662  int i_oid;
3663  int i_pubname;
3664  int i_rolname;
3665  int i_puballtables;
3666  int i_pubinsert;
3667  int i_pubupdate;
3668  int i_pubdelete;
3669  int i_pubtruncate;
3670  int i,
3671  ntups;
3672 
3673  if (dopt->no_publications || fout->remoteVersion < 100000)
3674  return;
3675 
3676  query = createPQExpBuffer();
3677 
3678  resetPQExpBuffer(query);
3679 
3680  /* Get the publications. */
3681  if (fout->remoteVersion >= 110000)
3682  appendPQExpBuffer(query,
3683  "SELECT p.tableoid, p.oid, p.pubname, "
3684  "(%s p.pubowner) AS rolname, "
3685  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate "
3686  "FROM pg_publication p",
3688  else
3689  appendPQExpBuffer(query,
3690  "SELECT p.tableoid, p.oid, p.pubname, "
3691  "(%s p.pubowner) AS rolname, "
3692  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, false AS pubtruncate "
3693  "FROM pg_publication p",
3695 
3696  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3697 
3698  ntups = PQntuples(res);
3699 
3700  i_tableoid = PQfnumber(res, "tableoid");
3701  i_oid = PQfnumber(res, "oid");
3702  i_pubname = PQfnumber(res, "pubname");
3703  i_rolname = PQfnumber(res, "rolname");
3704  i_puballtables = PQfnumber(res, "puballtables");
3705  i_pubinsert = PQfnumber(res, "pubinsert");
3706  i_pubupdate = PQfnumber(res, "pubupdate");
3707  i_pubdelete = PQfnumber(res, "pubdelete");
3708  i_pubtruncate = PQfnumber(res, "pubtruncate");
3709 
3710  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3711 
3712  for (i = 0; i < ntups; i++)
3713  {
3714  pubinfo[i].dobj.objType = DO_PUBLICATION;
3715  pubinfo[i].dobj.catId.tableoid =
3716  atooid(PQgetvalue(res, i, i_tableoid));
3717  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3718  AssignDumpId(&pubinfo[i].dobj);
3719  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3720  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3721  pubinfo[i].puballtables =
3722  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3723  pubinfo[i].pubinsert =
3724  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3725  pubinfo[i].pubupdate =
3726  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3727  pubinfo[i].pubdelete =
3728  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3729  pubinfo[i].pubtruncate =
3730  (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
3731 
3732  if (strlen(pubinfo[i].rolname) == 0)
3733  write_msg(NULL, "WARNING: owner of publication \"%s\" appears to be invalid\n",
3734  pubinfo[i].dobj.name);
3735 
3736  /* Decide whether we want to dump it */
3737  selectDumpableObject(&(pubinfo[i].dobj), fout);
3738  }
3739  PQclear(res);
3740 
3741  destroyPQExpBuffer(query);
3742 }
3743 
3744 /*
3745  * dumpPublication
3746  * dump the definition of the given publication
3747  */
3748 static void
3750 {
3751  PQExpBuffer delq;
3752  PQExpBuffer query;
3753  char *qpubname;
3754  bool first = true;
3755 
3756  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3757  return;
3758 
3759  delq = createPQExpBuffer();
3760  query = createPQExpBuffer();
3761 
3762  qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
3763 
3764  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3765  qpubname);
3766 
3767  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3768  qpubname);
3769 
3770  if (pubinfo->puballtables)
3771  appendPQExpBufferStr(query, " FOR ALL TABLES");
3772 
3773  appendPQExpBufferStr(query, " WITH (publish = '");
3774  if (pubinfo->pubinsert)
3775  {
3776  appendPQExpBufferStr(query, "insert");
3777  first = false;
3778  }
3779 
3780  if (pubinfo->pubupdate)
3781  {
3782  if (!first)
3783  appendPQExpBufferStr(query, ", ");
3784 
3785  appendPQExpBufferStr(query, "update");
3786  first = false;
3787  }
3788 
3789  if (pubinfo->pubdelete)
3790  {
3791  if (!first)
3792  appendPQExpBufferStr(query, ", ");
3793 
3794  appendPQExpBufferStr(query, "delete");
3795  first = false;
3796  }
3797 
3798  if (pubinfo->pubtruncate)
3799  {
3800  if (!first)
3801  appendPQExpBufferStr(query, ", ");
3802 
3803  appendPQExpBufferStr(query, "truncate");
3804  first = false;
3805  }
3806 
3807  appendPQExpBufferStr(query, "');\n");
3808 
3809  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
3810  pubinfo->dobj.name,
3811  NULL,
3812  NULL,
3813  pubinfo->rolname,
3814  "PUBLICATION", SECTION_POST_DATA,
3815  query->data, delq->data, NULL,
3816  NULL, 0,
3817  NULL, NULL);
3818 
3819  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3820  dumpComment(fout, "PUBLICATION", qpubname,
3821  NULL, pubinfo->rolname,
3822  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3823 
3824  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3825  dumpSecLabel(fout, "PUBLICATION", qpubname,
3826  NULL, pubinfo->rolname,
3827  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3828 
3829  destroyPQExpBuffer(delq);
3830  destroyPQExpBuffer(query);
3831  free(qpubname);
3832 }
3833 
3834 /*
3835  * getPublicationTables
3836  * get information about publication membership for dumpable tables.
3837  */
3838 void
3840 {
3841  PQExpBuffer query;
3842  PGresult *res;
3843  PublicationRelInfo *pubrinfo;
3844  DumpOptions *dopt = fout->dopt;
3845  int i_tableoid;
3846  int i_oid;
3847  int i_pubname;
3848  int i,
3849  j,
3850  ntups;
3851 
3852  if (dopt->no_publications || fout->remoteVersion < 100000)
3853  return;
3854 
3855  query = createPQExpBuffer();
3856 
3857  for (i = 0; i < numTables; i++)
3858  {
3859  TableInfo *tbinfo = &tblinfo[i];
3860 
3861  /* Only plain tables can be aded to publications. */
3862  if (tbinfo->relkind != RELKIND_RELATION)
3863  continue;
3864 
3865  /*
3866  * Ignore publication membership of tables whose definitions are not
3867  * to be dumped.
3868  */
3869  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3870  continue;
3871 
3872  if (g_verbose)
3873  write_msg(NULL, "reading publication membership for table \"%s.%s\"\n",
3874  tbinfo->dobj.namespace->dobj.name,
3875  tbinfo->dobj.name);
3876 
3877  resetPQExpBuffer(query);
3878 
3879  /* Get the publication membership for the table. */
3880  appendPQExpBuffer(query,
3881  "SELECT pr.tableoid, pr.oid, p.pubname "
3882  "FROM pg_publication_rel pr, pg_publication p "
3883  "WHERE pr.prrelid = '%u'"
3884  " AND p.oid = pr.prpubid",
3885  tbinfo->dobj.catId.oid);
3886  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3887 
3888  ntups = PQntuples(res);
3889 
3890  if (ntups == 0)
3891  {
3892  /*
3893  * Table is not member of any publications. Clean up and return.
3894  */
3895  PQclear(res);
3896  continue;
3897  }
3898 
3899  i_tableoid = PQfnumber(res, "tableoid");
3900  i_oid = PQfnumber(res, "oid");
3901  i_pubname = PQfnumber(res, "pubname");
3902 
3903  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
3904 
3905  for (j = 0; j < ntups; j++)
3906  {
3907  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
3908  pubrinfo[j].dobj.catId.tableoid =
3909  atooid(PQgetvalue(res, j, i_tableoid));
3910  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3911  AssignDumpId(&pubrinfo[j].dobj);
3912  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3913  pubrinfo[j].dobj.name = tbinfo->dobj.name;
3914  pubrinfo[j].pubname = pg_strdup(PQgetvalue(res, j, i_pubname));
3915  pubrinfo[j].pubtable = tbinfo;
3916 
3917  /* Decide whether we want to dump it */
3918  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
3919  }
3920  PQclear(res);
3921  }
3922  destroyPQExpBuffer(query);
3923 }
3924 
3925 /*
3926  * dumpPublicationTable
3927  * dump the definition of the given publication table mapping
3928  */
3929 static void
3931 {
3932  TableInfo *tbinfo = pubrinfo->pubtable;
3933  PQExpBuffer query;
3934  char *tag;
3935 
3936  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3937  return;
3938 
3939  tag = psprintf("%s %s", pubrinfo->pubname, tbinfo->dobj.name);
3940 
3941  query = createPQExpBuffer();
3942 
3943  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
3944  fmtId(pubrinfo->pubname));
3945  appendPQExpBuffer(query, " %s;\n",
3946  fmtQualifiedDumpable(tbinfo));
3947 
3948  /*
3949  * There is no point in creating drop query as drop query as the drop is
3950  * done by table drop.
3951  */
3952  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
3953  tag,
3954  tbinfo->dobj.namespace->dobj.name,
3955  NULL,
3956  "",
3957  "PUBLICATION TABLE", SECTION_POST_DATA,
3958  query->data, "", NULL,
3959  NULL, 0,
3960  NULL, NULL);
3961 
3962  free(tag);
3963  destroyPQExpBuffer(query);
3964 }
3965 
3966 /*
3967  * Is the currently connected user a superuser?
3968  */
3969 static bool
3971 {
3972  ArchiveHandle *AH = (ArchiveHandle *) fout;
3973  const char *val;
3974 
3975  val = PQparameterStatus(AH->connection, "is_superuser");
3976 
3977  if (val && strcmp(val, "on") == 0)
3978  return true;
3979 
3980  return false;
3981 }
3982 
3983 /*
3984  * getSubscriptions
3985  * get information about subscriptions
3986  */
3987 void
3989 {
3990  DumpOptions *dopt = fout->dopt;
3991  PQExpBuffer query;
3992  PGresult *res;
3993  SubscriptionInfo *subinfo;
3994  int i_tableoid;
3995  int i_oid;
3996  int i_subname;
3997  int i_rolname;
3998  int i_subconninfo;
3999  int i_subslotname;
4000  int i_subsynccommit;
4001  int i_subpublications;
4002  int i,
4003  ntups;
4004 
4005  if (dopt->no_subscriptions || fout->remoteVersion < 100000)
4006  return;
4007 
4008  if (!is_superuser(fout))
4009  {
4010  int n;
4011 
4012  res = ExecuteSqlQuery(fout,
4013  "SELECT count(*) FROM pg_subscription "
4014  "WHERE subdbid = (SELECT oid FROM pg_database"
4015  " WHERE datname = current_database())",
4016  PGRES_TUPLES_OK);
4017  n = atoi(PQgetvalue(res, 0, 0));
4018  if (n > 0)
4019  write_msg(NULL, "WARNING: subscriptions not dumped because current user is not a superuser\n");
4020  PQclear(res);
4021  return;
4022  }
4023 
4024  query = createPQExpBuffer();
4025 
4026  resetPQExpBuffer(query);
4027 
4028  /* Get the subscriptions in current database. */
4029  appendPQExpBuffer(query,
4030  "SELECT s.tableoid, s.oid, s.subname,"
4031  "(%s s.subowner) AS rolname, "
4032  " s.subconninfo, s.subslotname, s.subsynccommit, "
4033  " s.subpublications "
4034  "FROM pg_subscription s "
4035  "WHERE s.subdbid = (SELECT oid FROM pg_database"
4036  " WHERE datname = current_database())",
4038  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4039 
4040  ntups = PQntuples(res);
4041 
4042  i_tableoid = PQfnumber(res, "tableoid");
4043  i_oid = PQfnumber(res, "oid");
4044  i_subname = PQfnumber(res, "subname");
4045  i_rolname = PQfnumber(res, "rolname");
4046  i_subconninfo = PQfnumber(res, "subconninfo");
4047  i_subslotname = PQfnumber(res, "subslotname");
4048  i_subsynccommit = PQfnumber(res, "subsynccommit");
4049  i_subpublications = PQfnumber(res, "subpublications");
4050 
4051  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
4052 
4053  for (i = 0; i < ntups; i++)
4054  {
4055  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
4056  subinfo[i].dobj.catId.tableoid =
4057  atooid(PQgetvalue(res, i, i_tableoid));
4058  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4059  AssignDumpId(&subinfo[i].dobj);
4060  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
4061  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4062  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
4063  if (PQgetisnull(res, i, i_subslotname))
4064  subinfo[i].subslotname = NULL;
4065  else
4066  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
4067  subinfo[i].subsynccommit =
4068  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
4069  subinfo[i].subpublications =
4070  pg_strdup(PQgetvalue(res, i, i_subpublications));
4071 
4072  if (strlen(subinfo[i].rolname) == 0)
4073  write_msg(NULL, "WARNING: owner of subscription \"%s\" appears to be invalid\n",
4074  subinfo[i].dobj.name);
4075 
4076  /* Decide whether we want to dump it */
4077  selectDumpableObject(&(subinfo[i].dobj), fout);
4078  }
4079  PQclear(res);
4080 
4081  destroyPQExpBuffer(query);
4082 }
4083 
4084 /*
4085  * dumpSubscription
4086  * dump the definition of the given subscription
4087  */
4088 static void
4090 {
4091  PQExpBuffer delq;
4092  PQExpBuffer query;
4093  PQExpBuffer publications;
4094  char *qsubname;
4095  char **pubnames = NULL;
4096  int npubnames = 0;
4097  int i;
4098 
4099  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4100  return;
4101 
4102  delq = createPQExpBuffer();
4103  query = createPQExpBuffer();
4104 
4105  qsubname = pg_strdup(fmtId(subinfo->dobj.name));
4106 
4107  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
4108  qsubname);
4109 
4110  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
4111  qsubname);
4112  appendStringLiteralAH(query, subinfo->subconninfo, fout);
4113 
4114  /* Build list of quoted publications and append them to query. */
4115  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
4116  {
4117  write_msg(NULL,
4118  "WARNING: could not parse subpublications array\n");
4119  if (pubnames)
4120  free(pubnames);
4121  pubnames = NULL;
4122  npubnames = 0;
4123  }
4124 
4125  publications = createPQExpBuffer();
4126  for (i = 0; i < npubnames; i++)
4127  {
4128  if (i > 0)
4129  appendPQExpBufferStr(publications, ", ");
4130 
4131  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
4132  }
4133 
4134  appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
4135  if (subinfo->subslotname)
4136  appendStringLiteralAH(query, subinfo->subslotname, fout);
4137  else
4138  appendPQExpBufferStr(query, "NONE");
4139 
4140  if (strcmp(subinfo->subsynccommit, "off") != 0)
4141  appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
4142 
4143  appendPQExpBufferStr(query, ");\n");
4144 
4145  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
4146  subinfo->dobj.name,
4147  NULL,
4148  NULL,
4149  subinfo->rolname,
4150  "SUBSCRIPTION", SECTION_POST_DATA,
4151  query->data, delq->data, NULL,
4152  NULL, 0,
4153  NULL, NULL);
4154 
4155  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4156  dumpComment(fout, "SUBSCRIPTION", qsubname,
4157  NULL, subinfo->rolname,
4158  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4159 
4160  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4161  dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
4162  NULL, subinfo->rolname,
4163  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
4164 
4165  destroyPQExpBuffer(publications);
4166  if (pubnames)
4167  free(pubnames);
4168 
4169  destroyPQExpBuffer(delq);
4170  destroyPQExpBuffer(query);
4171  free(qsubname);
4172 }
4173 
4174 static void
4176  PQExpBuffer upgrade_buffer,
4177  Oid pg_type_oid,
4178  bool force_array_type)
4179 {
4180  PQExpBuffer upgrade_query = createPQExpBuffer();
4181  PGresult *res;
4182  Oid pg_type_array_oid;
4183 
4184  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
4185  appendPQExpBuffer(upgrade_buffer,
4186  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4187  pg_type_oid);
4188 
4189  /* we only support old >= 8.3 for binary upgrades */
4190  appendPQExpBuffer(upgrade_query,
4191  "SELECT typarray "
4192  "FROM pg_catalog.pg_type "
4193  "WHERE oid = '%u'::pg_catalog.oid;",
4194  pg_type_oid);
4195 
4196  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4197 
4198  pg_type_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
4199 
4200  PQclear(res);
4201 
4202  if (!OidIsValid(pg_type_array_oid) && force_array_type)
4203  {
4204  /*
4205  * If the old version didn't assign an array type, but the new version
4206  * does, we must select an unused type OID to assign. This currently
4207  * only happens for domains, when upgrading pre-v11 to v11 and up.
4208  *
4209  * Note: local state here is kind of ugly, but we must have some,
4210  * since we mustn't choose the same unused OID more than once.
4211  */
4212  static Oid next_possible_free_oid = FirstNormalObjectId;
4213  bool is_dup;
4214 
4215  do
4216  {
4217  ++next_possible_free_oid;
4218  printfPQExpBuffer(upgrade_query,
4219  "SELECT EXISTS(SELECT 1 "
4220  "FROM pg_catalog.pg_type "
4221  "WHERE oid = '%u'::pg_catalog.oid);",
4222  next_possible_free_oid);
4223  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4224  is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
4225  PQclear(res);
4226  } while (is_dup);
4227 
4228  pg_type_array_oid = next_possible_free_oid;
4229  }
4230 
4231  if (OidIsValid(pg_type_array_oid))
4232  {
4233  appendPQExpBufferStr(upgrade_buffer,
4234  "\n-- For binary upgrade, must preserve pg_type array oid\n");
4235  appendPQExpBuffer(upgrade_buffer,
4236  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4237  pg_type_array_oid);
4238  }
4239 
4240  destroyPQExpBuffer(upgrade_query);
4241 }
4242 
4243 static bool
4245  PQExpBuffer upgrade_buffer,
4246  Oid pg_rel_oid)
4247 {
4248  PQExpBuffer upgrade_query = createPQExpBuffer();
4249  PGresult *upgrade_res;
4250  Oid pg_type_oid;
4251  bool toast_set = false;
4252 
4253  /* we only support old >= 8.3 for binary upgrades */
4254  appendPQExpBuffer(upgrade_query,
4255  "SELECT c.reltype AS crel, t.reltype AS trel "
4256  "FROM pg_catalog.pg_class c "
4257  "LEFT JOIN pg_catalog.pg_class t ON "
4258  " (c.reltoastrelid = t.oid) "
4259  "WHERE c.oid = '%u'::pg_catalog.oid;",
4260  pg_rel_oid);
4261 
4262  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4263 
4264  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
4265 
4266  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
4267  pg_type_oid, false);
4268 
4269  if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
4270  {
4271  /* Toast tables do not have pg_type array rows */
4272  Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
4273  PQfnumber(upgrade_res, "trel")));
4274 
4275  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
4276  appendPQExpBuffer(upgrade_buffer,
4277  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4278  pg_type_toast_oid);
4279 
4280  toast_set = true;
4281  }
4282 
4283  PQclear(upgrade_res);
4284  destroyPQExpBuffer(upgrade_query);
4285 
4286  return toast_set;
4287 }
4288 
4289 static void
4291  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
4292  bool is_index)
4293 {
4294  PQExpBuffer upgrade_query = createPQExpBuffer();
4295  PGresult *upgrade_res;
4296  Oid pg_class_reltoastrelid;
4297  Oid pg_index_indexrelid;
4298 
4299  appendPQExpBuffer(upgrade_query,
4300  "SELECT c.reltoastrelid, i.indexrelid "
4301  "FROM pg_catalog.pg_class c LEFT JOIN "
4302  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
4303  "WHERE c.oid = '%u'::pg_catalog.oid;",
4304  pg_class_oid);
4305 
4306  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4307 
4308  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastrelid")));
4309  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
4310 
4311  appendPQExpBufferStr(upgrade_buffer,
4312  "\n-- For binary upgrade, must preserve pg_class oids\n");
4313 
4314  if (!is_index)
4315  {
4316  appendPQExpBuffer(upgrade_buffer,
4317  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
4318  pg_class_oid);
4319  /* only tables have toast tables, not indexes */
4320  if (OidIsValid(pg_class_reltoastrelid))
4321  {
4322  /*
4323  * One complexity is that the table definition might not require
4324  * the creation of a TOAST table, and the TOAST table might have
4325  * been created long after table creation, when the table was
4326  * loaded with wide data. By setting the TOAST oid we force
4327  * creation of the TOAST heap and TOAST index by the backend so we
4328  * can cleanly copy the files during binary upgrade.
4329  */
4330 
4331  appendPQExpBuffer(upgrade_buffer,
4332  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
4333  pg_class_reltoastrelid);
4334 
4335  /* every toast table has an index */
4336  appendPQExpBuffer(upgrade_buffer,
4337  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4338  pg_index_indexrelid);
4339  }
4340  }
4341  else
4342  appendPQExpBuffer(upgrade_buffer,
4343  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4344  pg_class_oid);
4345 
4346  appendPQExpBufferChar(upgrade_buffer, '\n');
4347 
4348  PQclear(upgrade_res);
4349  destroyPQExpBuffer(upgrade_query);
4350 }
4351 
4352 /*
4353  * If the DumpableObject is a member of an extension, add a suitable
4354  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4355  *
4356  * For somewhat historical reasons, objname should already be quoted,
4357  * but not objnamespace (if any).
4358  */
4359 static void
4361  DumpableObject *dobj,
4362  const char *objtype,
4363  const char *objname,
4364  const char *objnamespace)
4365 {
4366  DumpableObject *extobj = NULL;
4367  int i;
4368 
4369  if (!dobj->ext_member)
4370  return;
4371 
4372  /*
4373  * Find the parent extension. We could avoid this search if we wanted to
4374  * add a link field to DumpableObject, but the space costs of that would
4375  * be considerable. We assume that member objects could only have a
4376  * direct dependency on their own extension, not any others.
4377  */
4378  for (i = 0; i < dobj->nDeps; i++)
4379  {
4380  extobj = findObjectByDumpId(dobj->dependencies[i]);
4381  if (extobj && extobj->objType == DO_EXTENSION)
4382  break;
4383  extobj = NULL;
4384  }
4385  if (extobj == NULL)
4386  exit_horribly(NULL, "could not find parent extension for %s %s\n",
4387  objtype, objname);
4388 
4389  appendPQExpBufferStr(upgrade_buffer,
4390  "\n-- For binary upgrade, handle extension membership the hard way\n");
4391  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
4392  fmtId(extobj->name),
4393  objtype);
4394  if (objnamespace && *objnamespace)
4395  appendPQExpBuffer(upgrade_buffer, "%s.", fmtId(objnamespace));
4396  appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
4397 }
4398 
4399 /*
4400  * getNamespaces:
4401  * read all namespaces in the system catalogs and return them in the
4402  * NamespaceInfo* structure
4403  *
4404  * numNamespaces is set to the number of namespaces read in
4405  */
4406 NamespaceInfo *
4408 {
4409  DumpOptions *dopt = fout->dopt;
4410  PGresult *res;
4411  int ntups;
4412  int i;
4413  PQExpBuffer query;
4414  NamespaceInfo *nsinfo;
4415  int i_tableoid;
4416  int i_oid;
4417  int i_nspname;
4418  int i_rolname;
4419  int i_nspacl;
4420  int i_rnspacl;
4421  int i_initnspacl;
4422  int i_initrnspacl;
4423 
4424  query = createPQExpBuffer();
4425 
4426  /*
4427  * we fetch all namespaces including system ones, so that every object we
4428  * read in can be linked to a containing namespace.
4429  */
4430  if (fout->remoteVersion >= 90600)
4431  {
4432  PQExpBuffer acl_subquery = createPQExpBuffer();
4433  PQExpBuffer racl_subquery = createPQExpBuffer();
4434  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4435  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4436 
4437  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4438  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4439  dopt->binary_upgrade);
4440 
4441  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4442  "(%s nspowner) AS rolname, "
4443  "%s as nspacl, "
4444  "%s as rnspacl, "
4445  "%s as initnspacl, "
4446  "%s as initrnspacl "
4447  "FROM pg_namespace n "
4448  "LEFT JOIN pg_init_privs pip "
4449  "ON (n.oid = pip.objoid "
4450  "AND pip.classoid = 'pg_namespace'::regclass "
4451  "AND pip.objsubid = 0",
4453  acl_subquery->data,
4454  racl_subquery->data,
4455  init_acl_subquery->data,
4456  init_racl_subquery->data);
4457 
4458  appendPQExpBuffer(query, ") ");
4459 
4460  destroyPQExpBuffer(acl_subquery);
4461  destroyPQExpBuffer(racl_subquery);
4462  destroyPQExpBuffer(init_acl_subquery);
4463  destroyPQExpBuffer(init_racl_subquery);
4464  }
4465  else
4466  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4467  "(%s nspowner) AS rolname, "
4468  "nspacl, NULL as rnspacl, "
4469  "NULL AS initnspacl, NULL as initrnspacl "
4470  "FROM pg_namespace",
4472 
4473  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4474 
4475  ntups = PQntuples(res);
4476 
4477  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4478 
4479  i_tableoid = PQfnumber(res, "tableoid");
4480  i_oid = PQfnumber(res, "oid");
4481  i_nspname = PQfnumber(res, "nspname");
4482  i_rolname = PQfnumber(res, "rolname");
4483  i_nspacl = PQfnumber(res, "nspacl");
4484  i_rnspacl = PQfnumber(res, "rnspacl");
4485  i_initnspacl = PQfnumber(res, "initnspacl");
4486  i_initrnspacl = PQfnumber(res, "initrnspacl");
4487 
4488  for (i = 0; i < ntups; i++)
4489  {
4490  nsinfo[i].dobj.objType = DO_NAMESPACE;
4491  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4492  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4493  AssignDumpId(&nsinfo[i].dobj);
4494  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4495  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4496  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4497  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4498  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4499  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4500 
4501  /* Decide whether to dump this namespace */
4502  selectDumpableNamespace(&nsinfo[i], fout);
4503 
4504  /*
4505  * Do not try to dump ACL if the ACL is empty or the default.
4506  *
4507  * This is useful because, for some schemas/objects, the only
4508  * component we are going to try and dump is the ACL and if we can
4509  * remove that then 'dump' goes to zero/false and we don't consider
4510  * this object for dumping at all later on.
4511  */
4512  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4513  PQgetisnull(res, i, i_initnspacl) &&
4514  PQgetisnull(res, i, i_initrnspacl))
4515  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4516 
4517  if (strlen(nsinfo[i].rolname) == 0)
4518  write_msg(NULL, "WARNING: owner of schema \"%s\" appears to be invalid\n",
4519  nsinfo[i].dobj.name);
4520  }
4521 
4522  PQclear(res);
4523  destroyPQExpBuffer(query);
4524 
4525  *numNamespaces = ntups;
4526 
4527  return nsinfo;
4528 }
4529 
4530 /*
4531  * findNamespace:
4532  * given a namespace OID, look up the info read by getNamespaces
4533  */
4534 static NamespaceInfo *
4536 {
4537  NamespaceInfo *nsinfo;
4538 
4539  nsinfo = findNamespaceByOid(nsoid);
4540  if (nsinfo == NULL)
4541  exit_horribly(NULL, "schema with OID %u does not exist\n", nsoid);
4542  return nsinfo;
4543 }
4544 
4545 /*
4546  * getExtensions:
4547  * read all extensions in the system catalogs and return them in the
4548  * ExtensionInfo* structure
4549  *
4550  * numExtensions is set to the number of extensions read in
4551  */
4552 ExtensionInfo *
4554 {
4555  DumpOptions *dopt = fout->dopt;
4556  PGresult *res;
4557  int ntups;
4558  int i;
4559  PQExpBuffer query;
4560  ExtensionInfo *extinfo;
4561  int i_tableoid;
4562  int i_oid;
4563  int i_extname;
4564  int i_nspname;
4565  int i_extrelocatable;
4566  int i_extversion;
4567  int i_extconfig;
4568  int i_extcondition;
4569 
4570  /*
4571  * Before 9.1, there are no extensions.
4572  */
4573  if (fout->remoteVersion < 90100)
4574  {
4575  *numExtensions = 0;
4576  return NULL;
4577  }
4578 
4579  query = createPQExpBuffer();
4580 
4581  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4582  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4583  "FROM pg_extension x "
4584  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4585 
4586  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4587 
4588  ntups = PQntuples(res);
4589 
4590  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4591 
4592  i_tableoid = PQfnumber(res, "tableoid");
4593  i_oid = PQfnumber(res, "oid");
4594  i_extname = PQfnumber(res, "extname");
4595  i_nspname = PQfnumber(res, "nspname");
4596  i_extrelocatable = PQfnumber(res, "extrelocatable");
4597  i_extversion = PQfnumber(res, "extversion");
4598  i_extconfig = PQfnumber(res, "extconfig");
4599  i_extcondition = PQfnumber(res, "extcondition");
4600 
4601  for (i = 0; i < ntups; i++)
4602  {
4603  extinfo[i].dobj.objType = DO_EXTENSION;
4604  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4605  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4606  AssignDumpId(&extinfo[i].dobj);
4607  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4608  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4609  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4610  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4611  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4612  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4613 
4614  /* Decide whether we want to dump it */
4615  selectDumpableExtension(&(extinfo[i]), dopt);
4616  }
4617 
4618  PQclear(res);
4619  destroyPQExpBuffer(query);
4620 
4621  *numExtensions = ntups;
4622 
4623  return extinfo;
4624 }
4625 
4626 /*
4627  * getTypes:
4628  * read all types in the system catalogs and return them in the
4629  * TypeInfo* structure
4630  *
4631  * numTypes is set to the number of types read in
4632  *
4633  * NB: this must run after getFuncs() because we assume we can do
4634  * findFuncByOid().
4635  */
4636 TypeInfo *
4638 {
4639  DumpOptions *dopt = fout->dopt;
4640  PGresult *res;
4641  int ntups;
4642  int i;
4643  PQExpBuffer query = createPQExpBuffer();
4644  TypeInfo *tyinfo;
4645  ShellTypeInfo *stinfo;
4646  int i_tableoid;
4647  int i_oid;
4648  int i_typname;
4649  int i_typnamespace;
4650  int i_typacl;
4651  int i_rtypacl;
4652  int i_inittypacl;
4653  int i_initrtypacl;
4654  int i_rolname;
4655  int i_typelem;
4656  int i_typrelid;
4657  int i_typrelkind;
4658  int i_typtype;
4659  int i_typisdefined;
4660  int i_isarray;
4661 
4662  /*
4663  * we include even the built-in types because those may be used as array
4664  * elements by user-defined types
4665  *
4666  * we filter out the built-in types when we dump out the types
4667  *
4668  * same approach for undefined (shell) types and array types
4669  *
4670  * Note: as of 8.3 we can reliably detect whether a type is an
4671  * auto-generated array type by checking the element type's typarray.
4672  * (Before that the test is capable of generating false positives.) We
4673  * still check for name beginning with '_', though, so as to avoid the
4674  * cost of the subselect probe for all standard types. This would have to
4675  * be revisited if the backend ever allows renaming of array types.
4676  */
4677 
4678  if (fout->remoteVersion >= 90600)
4679  {
4680  PQExpBuffer acl_subquery = createPQExpBuffer();
4681  PQExpBuffer racl_subquery = createPQExpBuffer();
4682  PQExpBuffer initacl_subquery = createPQExpBuffer();
4683  PQExpBuffer initracl_subquery = createPQExpBuffer();
4684 
4685  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
4686  initracl_subquery, "t.typacl", "t.typowner", "'T'",
4687  dopt->binary_upgrade);
4688 
4689  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
4690  "t.typnamespace, "
4691  "%s AS typacl, "
4692  "%s AS rtypacl, "
4693  "%s AS inittypacl, "
4694  "%s AS initrtypacl, "
4695  "(%s t.typowner) AS rolname, "
4696  "t.typelem, t.typrelid, "
4697  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
4698  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
4699  "t.typtype, t.typisdefined, "
4700  "t.typname[0] = '_' AND t.typelem != 0 AND "
4701  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
4702  "FROM pg_type t "
4703  "LEFT JOIN pg_init_privs pip ON "
4704  "(t.oid = pip.objoid "
4705  "AND pip.classoid = 'pg_type'::regclass "
4706  "AND pip.objsubid = 0) ",
4707  acl_subquery->data,
4708  racl_subquery->data,
4709  initacl_subquery->data,
4710  initracl_subquery->data,
4712 
4713  destroyPQExpBuffer(acl_subquery);
4714  destroyPQExpBuffer(racl_subquery);
4715  destroyPQExpBuffer(initacl_subquery);
4716  destroyPQExpBuffer(initracl_subquery);
4717  }
4718  else if (fout->remoteVersion >= 90200)
4719  {
4720  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4721  "typnamespace, typacl, NULL as rtypacl, "
4722  "NULL AS inittypacl, NULL AS initrtypacl, "
4723  "(%s typowner) AS rolname, "
4724  "typelem, typrelid, "
4725  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4726  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4727  "typtype, typisdefined, "
4728  "typname[0] = '_' AND typelem != 0 AND "
4729  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4730  "FROM pg_type",
4732  }
4733  else if (fout->remoteVersion >= 80300)
4734  {
4735  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4736  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4737  "NULL AS inittypacl, NULL AS initrtypacl, "
4738  "(%s typowner) AS rolname, "
4739  "typelem, typrelid, "
4740  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4741  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4742  "typtype, typisdefined, "
4743  "typname[0] = '_' AND typelem != 0 AND "
4744  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4745  "FROM pg_type",
4747  }
4748  else
4749  {
4750  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4751  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4752  "NULL AS inittypacl, NULL AS initrtypacl, "
4753  "(%s typowner) AS rolname, "
4754  "typelem, typrelid, "
4755  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4756  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4757  "typtype, typisdefined, "
4758  "typname[0] = '_' AND typelem != 0 AS isarray "
4759  "FROM pg_type",
4761  }
4762 
4763  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4764 
4765  ntups = PQntuples(res);
4766 
4767  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
4768 
4769  i_tableoid = PQfnumber(res, "tableoid");
4770  i_oid = PQfnumber(res, "oid");
4771  i_typname = PQfnumber(res, "typname");
4772  i_typnamespace = PQfnumber(res, "typnamespace");
4773  i_typacl = PQfnumber(res, "typacl");
4774  i_rtypacl = PQfnumber(res, "rtypacl");
4775  i_inittypacl = PQfnumber(res, "inittypacl");
4776  i_initrtypacl = PQfnumber(res, "initrtypacl");
4777  i_rolname = PQfnumber(res, "rolname");
4778  i_typelem = PQfnumber(res, "typelem");
4779  i_typrelid = PQfnumber(res, "typrelid");
4780  i_typrelkind = PQfnumber(res, "typrelkind");
4781  i_typtype = PQfnumber(res, "typtype");
4782  i_typisdefined = PQfnumber(res, "typisdefined");
4783  i_isarray = PQfnumber(res, "isarray");
4784 
4785  for (i = 0; i < ntups; i++)
4786  {
4787  tyinfo[i].dobj.objType = DO_TYPE;
4788  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4789  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4790  AssignDumpId(&tyinfo[i].dobj);
4791  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
4792  tyinfo[i].dobj.namespace =
4793  findNamespace(fout,
4794  atooid(PQgetvalue(res, i, i_typnamespace)));
4795  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4796  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
4797  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
4798  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
4799  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
4800  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
4801  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
4802  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
4803  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
4804  tyinfo[i].shellType = NULL;
4805 
4806  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
4807  tyinfo[i].isDefined = true;
4808  else
4809  tyinfo[i].isDefined = false;
4810 
4811  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
4812  tyinfo[i].isArray = true;
4813  else
4814  tyinfo[i].isArray = false;
4815 
4816  /* Decide whether we want to dump it */
4817  selectDumpableType(&tyinfo[i], fout);
4818 
4819  /* Do not try to dump ACL if no ACL exists. */
4820  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
4821  PQgetisnull(res, i, i_inittypacl) &&
4822  PQgetisnull(res, i, i_initrtypacl))
4823  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4824 
4825  /*
4826  * If it's a domain, fetch info about its constraints, if any
4827  */
4828  tyinfo[i].nDomChecks = 0;
4829  tyinfo[i].domChecks = NULL;
4830  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4831  tyinfo[i].typtype == TYPTYPE_DOMAIN)
4832  getDomainConstraints(fout, &(tyinfo[i]));
4833 
4834  /*
4835  * If it's a base type, make a DumpableObject representing a shell
4836  * definition of the type. We will need to dump that ahead of the I/O
4837  * functions for the type. Similarly, range types need a shell
4838  * definition in case they have a canonicalize function.
4839  *
4840  * Note: the shell type doesn't have a catId. You might think it
4841  * should copy the base type's catId, but then it might capture the
4842  * pg_depend entries for the type, which we don't want.
4843  */
4844  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4845  (tyinfo[i].typtype == TYPTYPE_BASE ||
4846  tyinfo[i].typtype == TYPTYPE_RANGE))
4847  {
4848  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
4849  stinfo->dobj.objType = DO_SHELL_TYPE;
4850  stinfo->dobj.catId = nilCatalogId;
4851  AssignDumpId(&stinfo->dobj);
4852  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
4853  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
4854  stinfo->baseType = &(tyinfo[i]);
4855  tyinfo[i].shellType = stinfo;
4856 
4857  /*
4858  * Initially mark the shell type as not to be dumped. We'll only
4859  * dump it if the I/O or canonicalize functions need to be dumped;
4860  * this is taken care of while sorting dependencies.
4861  */
4862  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
4863  }
4864 
4865  if (strlen(tyinfo[i].rolname) == 0)
4866  write_msg(NULL, "WARNING: owner of data type \"%s\" appears to be invalid\n",
4867  tyinfo[i].dobj.name);
4868  }
4869 
4870  *numTypes = ntups;
4871 
4872  PQclear(res);
4873 
4874  destroyPQExpBuffer(query);
4875 
4876  return tyinfo;
4877 }
4878 
4879 /*
4880  * getOperators:
4881  * read all operators in the system catalogs and return them in the
4882  * OprInfo* structure
4883  *
4884  * numOprs is set to the number of operators read in
4885  */
4886 OprInfo *
4887 getOperators(Archive *fout, int *numOprs)
4888 {
4889  PGresult *res;
4890  int ntups;
4891  int i;
4892  PQExpBuffer query = createPQExpBuffer();
4893  OprInfo *oprinfo;
4894  int i_tableoid;
4895  int i_oid;
4896  int i_oprname;
4897  int i_oprnamespace;
4898  int i_rolname;
4899  int i_oprkind;
4900  int i_oprcode;
4901 
4902  /*
4903  * find all operators, including builtin operators; we filter out
4904  * system-defined operators at dump-out time.
4905  */
4906 
4907  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
4908  "oprnamespace, "
4909  "(%s oprowner) AS rolname, "
4910  "oprkind, "
4911  "oprcode::oid AS oprcode "
4912  "FROM pg_operator",
4914 
4915  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4916 
4917  ntups = PQntuples(res);
4918  *numOprs = ntups;
4919 
4920  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
4921 
4922  i_tableoid = PQfnumber(res, "tableoid");
4923  i_oid = PQfnumber(res, "oid");
4924  i_oprname = PQfnumber(res, "oprname");
4925  i_oprnamespace = PQfnumber(res, "oprnamespace");
4926  i_rolname = PQfnumber(res, "rolname");
4927  i_oprkind = PQfnumber(res, "oprkind");
4928  i_oprcode = PQfnumber(res, "oprcode");
4929 
4930  for (i = 0; i < ntups; i++)
4931  {
4932  oprinfo[i].dobj.objType = DO_OPERATOR;
4933  oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4934  oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4935  AssignDumpId(&oprinfo[i].dobj);
4936  oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
4937  oprinfo[i].dobj.namespace =
4938  findNamespace(fout,
4939  atooid(PQgetvalue(res, i, i_oprnamespace)));
4940  oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4941  oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
4942  oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
4943 
4944  /* Decide whether we want to dump it */
4945  selectDumpableObject(&(oprinfo[i].dobj), fout);
4946 
4947  /* Operators do not currently have ACLs. */
4948  oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4949 
4950  if (strlen(oprinfo[i].rolname) == 0)
4951  write_msg(NULL, "WARNING: owner of operator \"%s\" appears to be invalid\n",
4952  oprinfo[i].dobj.name);
4953  }
4954 
4955  PQclear(res);
4956 
4957  destroyPQExpBuffer(query);
4958 
4959  return oprinfo;
4960 }
4961 
4962 /*
4963  * getCollations:
4964  * read all collations in the system catalogs and return them in the
4965  * CollInfo* structure
4966  *
4967  * numCollations is set to the number of collations read in
4968  */
4969 CollInfo *
4971 {
4972  PGresult *res;
4973  int ntups;
4974  int i;
4975  PQExpBuffer query;
4976  CollInfo *collinfo;
4977  int i_tableoid;
4978  int i_oid;
4979  int i_collname;
4980  int i_collnamespace;
4981  int i_rolname;
4982 
4983  /* Collations didn't exist pre-9.1 */
4984  if (fout->remoteVersion < 90100)
4985  {
4986  *numCollations = 0;
4987  return NULL;
4988  }
4989 
4990  query = createPQExpBuffer();
4991 
4992  /*
4993  * find all collations, including builtin collations; we filter out
4994  * system-defined collations at dump-out time.
4995  */
4996 
4997  appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
4998  "collnamespace, "
4999  "(%s collowner) AS rolname "
5000  "FROM pg_collation",
5002 
5003  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5004 
5005  ntups = PQntuples(res);
5006  *numCollations = ntups;
5007 
5008  collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
5009 
5010  i_tableoid = PQfnumber(res, "tableoid");
5011  i_oid = PQfnumber(res, "oid");
5012  i_collname = PQfnumber(res, "collname");
5013  i_collnamespace = PQfnumber(res, "collnamespace");
5014  i_rolname = PQfnumber(res, "rolname");
5015 
5016  for (i = 0; i < ntups; i++)
5017  {
5018  collinfo[i].dobj.objType = DO_COLLATION;
5019  collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5020  collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5021  AssignDumpId(&collinfo[i].dobj);
5022  collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
5023  collinfo[i].dobj.namespace =
5024  findNamespace(fout,
5025  atooid(PQgetvalue(res, i, i_collnamespace)));
5026  collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5027 
5028  /* Decide whether we want to dump it */
5029  selectDumpableObject(&(collinfo[i].dobj), fout);
5030 
5031  /* Collations do not currently have ACLs. */
5032  collinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5033  }
5034 
5035  PQclear(res);
5036 
5037  destroyPQExpBuffer(query);
5038 
5039  return collinfo;
5040 }
5041 
5042 /*
5043  * getConversions:
5044  * read all conversions in the system catalogs and return them in the
5045  * ConvInfo* structure
5046  *
5047  * numConversions is set to the number of conversions read in
5048  */
5049 ConvInfo *
5050 getConversions(Archive *fout, int *numConversions)
5051 {
5052  PGresult *res;
5053  int ntups;
5054  int i;
5055  PQExpBuffer query;
5056  ConvInfo *convinfo;
5057  int i_tableoid;
5058  int i_oid;
5059  int i_conname;
5060  int i_connamespace;
5061  int i_rolname;
5062 
5063  query = createPQExpBuffer();
5064 
5065  /*
5066  * find all conversions, including builtin conversions; we filter out
5067  * system-defined conversions at dump-out time.
5068  */
5069 
5070  appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
5071  "connamespace, "
5072  "(%s conowner) AS rolname "
5073  "FROM pg_conversion",
5075 
5076  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5077 
5078  ntups = PQntuples(res);
5079  *numConversions = ntups;
5080 
5081  convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
5082 
5083  i_tableoid = PQfnumber(res, "tableoid");
5084  i_oid = PQfnumber(res, "oid");
5085  i_conname = PQfnumber(res, "conname");
5086  i_connamespace = PQfnumber(res, "connamespace");
5087  i_rolname = PQfnumber(res, "rolname");
5088 
5089  for (i = 0; i < ntups; i++)
5090  {
5091  convinfo[i].dobj.objType = DO_CONVERSION;
5092  convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5093  convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5094  AssignDumpId(&convinfo[i].dobj);
5095  convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
5096  convinfo[i].dobj.namespace =
5097  findNamespace(fout,
5098  atooid(PQgetvalue(res, i, i_connamespace)));
5099  convinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5100 
5101  /* Decide whether we want to dump it */
5102  selectDumpableObject(&(convinfo[i].dobj), fout);
5103 
5104  /* Conversions do not currently have ACLs. */
5105  convinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5106  }
5107 
5108  PQclear(res);
5109 
5110  destroyPQExpBuffer(query);
5111 
5112  return convinfo;
5113 }
5114 
5115 /*
5116  * getAccessMethods:
5117  * read all user-defined access methods in the system catalogs and return
5118  * them in the AccessMethodInfo* structure
5119  *
5120  * numAccessMethods is set to the number of access methods read in
5121  */
5123 getAccessMethods(Archive *fout, int *numAccessMethods)
5124 {
5125  PGresult *res;
5126  int ntups;
5127  int i;
5128  PQExpBuffer query;
5129  AccessMethodInfo *aminfo;
5130  int i_tableoid;
5131  int i_oid;
5132  int i_amname;
5133  int i_amhandler;
5134  int i_amtype;
5135 
5136  /* Before 9.6, there are no user-defined access methods */
5137  if (fout->remoteVersion < 90600)
5138  {
5139  *numAccessMethods = 0;
5140  return NULL;
5141  }
5142 
5143  query = createPQExpBuffer();
5144 
5145  /* Select all access methods from pg_am table */
5146  appendPQExpBuffer(query, "SELECT tableoid, oid, amname, amtype, "
5147  "amhandler::pg_catalog.regproc AS amhandler "
5148  "FROM pg_am");
5149 
5150  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5151 
5152  ntups = PQntuples(res);
5153  *numAccessMethods = ntups;
5154 
5155  aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
5156 
5157  i_tableoid = PQfnumber(res, "tableoid");
5158  i_oid = PQfnumber(res, "oid");
5159  i_amname = PQfnumber(res, "amname");
5160  i_amhandler = PQfnumber(res, "amhandler");
5161  i_amtype = PQfnumber(res, "amtype");
5162 
5163  for (i = 0; i < ntups; i++)
5164  {
5165  aminfo[i].dobj.objType = DO_ACCESS_METHOD;
5166  aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5167  aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5168  AssignDumpId(&aminfo[i].dobj);
5169  aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
5170  aminfo[i].dobj.namespace = NULL;
5171  aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
5172  aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
5173 
5174  /* Decide whether we want to dump it */
5175  selectDumpableAccessMethod(&(aminfo[i]), fout);
5176 
5177  /* Access methods do not currently have ACLs. */
5178  aminfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5179  }
5180 
5181  PQclear(res);
5182 
5183  destroyPQExpBuffer(query);
5184 
5185  return aminfo;
5186 }
5187 
5188 
5189 /*
5190  * getOpclasses:
5191  * read all opclasses in the system catalogs and return them in the
5192  * OpclassInfo* structure
5193  *
5194  * numOpclasses is set to the number of opclasses read in
5195  */
5196 OpclassInfo *
5197 getOpclasses(Archive *fout, int *numOpclasses)
5198 {
5199  PGresult *res;
5200  int ntups;
5201  int i;
5202  PQExpBuffer query = createPQExpBuffer();
5203  OpclassInfo *opcinfo;
5204  int i_tableoid;
5205  int i_oid;
5206  int i_opcname;
5207  int i_opcnamespace;
5208  int i_rolname;
5209 
5210  /*
5211  * find all opclasses, including builtin opclasses; we filter out
5212  * system-defined opclasses at dump-out time.
5213  */
5214 
5215  appendPQExpBuffer(query, "SELECT tableoid, oid, opcname, "
5216  "opcnamespace, "
5217  "(%s opcowner) AS rolname "
5218  "FROM pg_opclass",
5220 
5221  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5222 
5223  ntups = PQntuples(res);
5224  *numOpclasses = ntups;
5225 
5226  opcinfo = (OpclassInfo *) pg_malloc(ntups * sizeof(OpclassInfo));
5227 
5228  i_tableoid = PQfnumber(res, "tableoid");
5229  i_oid = PQfnumber(res, "oid");
5230  i_opcname = PQfnumber(res, "opcname");
5231  i_opcnamespace = PQfnumber(res, "opcnamespace");
5232  i_rolname = PQfnumber(res, "rolname");
5233 
5234  for (i = 0; i < ntups; i++)
5235  {
5236  opcinfo[i].dobj.objType = DO_OPCLASS;
5237  opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5238  opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5239  AssignDumpId(&opcinfo[i].dobj);
5240  opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
5241  opcinfo[i].dobj.namespace =
5242  findNamespace(fout,
5243  atooid(PQgetvalue(res, i, i_opcnamespace)));
5244  opcinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5245 
5246  /* Decide whether we want to dump it */
5247  selectDumpableObject(&(opcinfo[i].dobj), fout);
5248 
5249  /* Op Classes do not currently have ACLs. */
5250  opcinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5251 
5252  if (strlen(opcinfo[i].rolname) == 0)
5253  write_msg(NULL, "WARNING: owner of operator class \"%s\" appears to be invalid\n",
5254  opcinfo[i].dobj.name);
5255  }
5256 
5257  PQclear(res);
5258 
5259  destroyPQExpBuffer(query);
5260 
5261  return opcinfo;
5262 }
5263 
5264 /*
5265  * getOpfamilies:
5266  * read all opfamilies in the system catalogs and return them in the
5267  * OpfamilyInfo* structure
5268  *
5269  * numOpfamilies is set to the number of opfamilies read in
5270  */
5271 OpfamilyInfo *
5272 getOpfamilies(Archive *fout, int *numOpfamilies)
5273 {
5274  PGresult *res;
5275  int ntups;
5276  int i;
5277  PQExpBuffer query;
5278  OpfamilyInfo *opfinfo;
5279  int i_tableoid;
5280  int i_oid;
5281  int i_opfname;
5282  int i_opfnamespace;
5283  int i_rolname;
5284 
5285  /* Before 8.3, there is no separate concept of opfamilies */
5286  if (fout->remoteVersion < 80300)
5287  {
5288  *numOpfamilies = 0;
5289  return NULL;
5290  }
5291 
5292  query = createPQExpBuffer();
5293 
5294  /*
5295  * find all opfamilies, including builtin opfamilies; we filter out
5296  * system-defined opfamilies at dump-out time.
5297  */
5298 
5299  appendPQExpBuffer(query, "SELECT tableoid, oid, opfname, "
5300  "opfnamespace, "
5301  "(%s opfowner) AS rolname "
5302  "FROM pg_opfamily",
5304 
5305  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5306 
5307  ntups = PQntuples(res);
5308  *numOpfamilies = ntups;
5309 
5310  opfinfo = (OpfamilyInfo *) pg_malloc(ntups * sizeof(OpfamilyInfo));
5311 
5312  i_tableoid = PQfnumber(res, "tableoid");
5313  i_oid = PQfnumber(res, "oid");
5314  i_opfname = PQfnumber(res, "opfname");
5315  i_opfnamespace = PQfnumber(res, "opfnamespace");