PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #ifdef HAVE_TERMIOS_H
37 #include <termios.h>
38 #endif
39 
40 #include "getopt_long.h"
41 
42 #include "access/attnum.h"
43 #include "access/sysattr.h"
44 #include "access/transam.h"
45 #include "catalog/pg_aggregate.h"
46 #include "catalog/pg_am.h"
47 #include "catalog/pg_attribute.h"
48 #include "catalog/pg_cast.h"
49 #include "catalog/pg_class.h"
50 #include "catalog/pg_default_acl.h"
51 #include "catalog/pg_largeobject.h"
53 #include "catalog/pg_proc.h"
54 #include "catalog/pg_trigger.h"
55 #include "catalog/pg_type.h"
56 #include "libpq/libpq-fs.h"
57 
58 #include "dumputils.h"
59 #include "parallel.h"
60 #include "pg_backup_db.h"
61 #include "pg_backup_utils.h"
62 #include "pg_dump.h"
63 #include "fe_utils/string_utils.h"
64 
65 
66 typedef struct
67 {
68  const char *descr; /* comment for an object */
69  Oid classoid; /* object class (catalog OID) */
70  Oid objoid; /* object OID */
71  int objsubid; /* subobject (table column #) */
72 } CommentItem;
73 
74 typedef struct
75 {
76  const char *provider; /* label provider of this security label */
77  const char *label; /* security label for an object */
78  Oid classoid; /* object class (catalog OID) */
79  Oid objoid; /* object OID */
80  int objsubid; /* subobject (table column #) */
81 } SecLabelItem;
82 
83 typedef enum OidOptions
84 {
86  zeroAsAny = 2,
89 } OidOptions;
90 
91 /* global decls */
92 bool g_verbose; /* User wants verbose narration of our
93  * activities. */
94 static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
95 
96 /* subquery used to convert user ID (eg, datdba) to user name */
97 static const char *username_subquery;
98 
99 /*
100  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
101  * FirstNormalObjectId - 1.
102  */
103 static Oid g_last_builtin_oid; /* value of the last builtin oid */
104 
105 /* The specified names/patterns should to match at least one entity */
106 static int strict_names = 0;
107 
108 /*
109  * Object inclusion/exclusion lists
110  *
111  * The string lists record the patterns given by command-line switches,
112  * which we then convert to lists of OIDs of matching objects.
113  */
115 static SimpleOidList schema_include_oids = {NULL, NULL};
117 static SimpleOidList schema_exclude_oids = {NULL, NULL};
118 
120 static SimpleOidList table_include_oids = {NULL, NULL};
122 static SimpleOidList table_exclude_oids = {NULL, NULL};
124 static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
125 
126 
127 char g_opaque_type[10]; /* name for the opaque type */
128 
129 /* placeholders for the delimiters for comments */
131 char g_comment_end[10];
132 
133 static const CatalogId nilCatalogId = {0, 0};
134 
135 static void help(const char *progname);
136 static void setup_connection(Archive *AH,
137  const char *dumpencoding, const char *dumpsnapshot,
138  char *use_role);
139 static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
140 static void expand_schema_name_patterns(Archive *fout,
141  SimpleStringList *patterns,
142  SimpleOidList *oids,
143  bool strict_names);
144 static void expand_table_name_patterns(Archive *fout,
145  SimpleStringList *patterns,
146  SimpleOidList *oids,
147  bool strict_names);
148 static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
149 static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
150 static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
151 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
152 static void dumpComment(Archive *fout, const char *target,
153  const char *namespace, const char *owner,
154  CatalogId catalogId, int subid, DumpId dumpId);
155 static int findComments(Archive *fout, Oid classoid, Oid objoid,
156  CommentItem **items);
157 static int collectComments(Archive *fout, CommentItem **items);
158 static void dumpSecLabel(Archive *fout, const char *target,
159  const char *namespace, const char *owner,
160  CatalogId catalogId, int subid, DumpId dumpId);
161 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
162  SecLabelItem **items);
163 static int collectSecLabels(Archive *fout, SecLabelItem **items);
164 static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
165 static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
166 static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
167 static void dumpType(Archive *fout, TypeInfo *tyinfo);
168 static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
169 static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
170 static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
171 static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
172 static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
173 static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
174 static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
175 static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
176 static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
177 static void dumpFunc(Archive *fout, FuncInfo *finfo);
178 static void dumpCast(Archive *fout, CastInfo *cast);
179 static void dumpTransform(Archive *fout, TransformInfo *transform);
180 static void dumpOpr(Archive *fout, OprInfo *oprinfo);
181 static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
182 static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
183 static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
184 static void dumpCollation(Archive *fout, CollInfo *collinfo);
185 static void dumpConversion(Archive *fout, ConvInfo *convinfo);
186 static void dumpRule(Archive *fout, RuleInfo *rinfo);
187 static void dumpAgg(Archive *fout, AggInfo *agginfo);
188 static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
189 static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
190 static void dumpTable(Archive *fout, TableInfo *tbinfo);
191 static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
192 static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
193 static void dumpSequence(Archive *fout, TableInfo *tbinfo);
194 static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
195 static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
196 static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
197 static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
198 static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
199 static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
200 static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
201 static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
202 static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
203 static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
204 static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
205 static void dumpUserMappings(Archive *fout,
206  const char *servername, const char *namespace,
207  const char *owner, CatalogId catalogId, DumpId dumpId);
208 static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
209 
210 static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
211  const char *type, const char *name, const char *subname,
212  const char *tag, const char *nspname, const char *owner,
213  const char *acls, const char *racls,
214  const char *initacls, const char *initracls);
215 
216 static void getDependencies(Archive *fout);
217 static void BuildArchiveDependencies(Archive *fout);
219  DumpId **dependencies, int *nDeps, int *allocDeps);
220 
222 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
223  DumpableObject *boundaryObjs);
224 
225 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
226 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind);
227 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids);
228 static void buildMatViewRefreshDependencies(Archive *fout);
229 static void getTableDataFKConstraints(void);
230 static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
231  bool is_agg);
232 static char *format_function_arguments_old(Archive *fout,
233  FuncInfo *finfo, int nallargs,
234  char **allargtypes,
235  char **argmodes,
236  char **argnames);
237 static char *format_function_signature(Archive *fout,
238  FuncInfo *finfo, bool honor_quotes);
239 static char *convertRegProcReference(Archive *fout,
240  const char *proc);
241 static char *convertOperatorReference(Archive *fout, const char *opr);
242 static char *convertTSFunction(Archive *fout, Oid funcOid);
243 static Oid findLastBuiltinOid_V71(Archive *fout, const char *);
244 static void selectSourceSchema(Archive *fout, const char *schemaName);
245 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
246 static void getBlobs(Archive *fout);
247 static void dumpBlob(Archive *fout, BlobInfo *binfo);
248 static int dumpBlobs(Archive *fout, void *arg);
249 static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
250 static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
251 static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
252 static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
253 static void dumpDatabase(Archive *AH);
254 static void dumpEncoding(Archive *AH);
255 static void dumpStdStrings(Archive *AH);
257  PQExpBuffer upgrade_buffer,
258  Oid pg_type_oid,
259  bool force_array_type);
261  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
262 static void binary_upgrade_set_pg_class_oids(Archive *fout,
263  PQExpBuffer upgrade_buffer,
264  Oid pg_class_oid, bool is_index);
265 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
266  DumpableObject *dobj,
267  const char *objlabel);
268 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
269 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
270 static bool nonemptyReloptions(const char *reloptions);
271 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
272  const char *prefix, Archive *fout);
273 static char *get_synchronized_snapshot(Archive *fout);
274 static void setupDumpWorker(Archive *AHX);
275 static TableInfo *getRootTableInfo(TableInfo *tbinfo);
276 
277 
278 int
279 main(int argc, char **argv)
280 {
281  int c;
282  const char *filename = NULL;
283  const char *format = "p";
284  TableInfo *tblinfo;
285  int numTables;
286  DumpableObject **dobjs;
287  int numObjs;
288  DumpableObject *boundaryObjs;
289  int i;
290  int optindex;
291  RestoreOptions *ropt;
292  Archive *fout; /* the script file */
293  const char *dumpencoding = NULL;
294  const char *dumpsnapshot = NULL;
295  char *use_role = NULL;
296  int numWorkers = 1;
297  trivalue prompt_password = TRI_DEFAULT;
298  int compressLevel = -1;
299  int plainText = 0;
300  ArchiveFormat archiveFormat = archUnknown;
301  ArchiveMode archiveMode;
302 
303  static DumpOptions dopt;
304 
305  static struct option long_options[] = {
306  {"data-only", no_argument, NULL, 'a'},
307  {"blobs", no_argument, NULL, 'b'},
308  {"no-blobs", no_argument, NULL, 'B'},
309  {"clean", no_argument, NULL, 'c'},
310  {"create", no_argument, NULL, 'C'},
311  {"dbname", required_argument, NULL, 'd'},
312  {"file", required_argument, NULL, 'f'},
313  {"format", required_argument, NULL, 'F'},
314  {"host", required_argument, NULL, 'h'},
315  {"jobs", 1, NULL, 'j'},
316  {"no-reconnect", no_argument, NULL, 'R'},
317  {"oids", no_argument, NULL, 'o'},
318  {"no-owner", no_argument, NULL, 'O'},
319  {"port", required_argument, NULL, 'p'},
320  {"schema", required_argument, NULL, 'n'},
321  {"exclude-schema", required_argument, NULL, 'N'},
322  {"schema-only", no_argument, NULL, 's'},
323  {"superuser", required_argument, NULL, 'S'},
324  {"table", required_argument, NULL, 't'},
325  {"exclude-table", required_argument, NULL, 'T'},
326  {"no-password", no_argument, NULL, 'w'},
327  {"password", no_argument, NULL, 'W'},
328  {"username", required_argument, NULL, 'U'},
329  {"verbose", no_argument, NULL, 'v'},
330  {"no-privileges", no_argument, NULL, 'x'},
331  {"no-acl", no_argument, NULL, 'x'},
332  {"compress", required_argument, NULL, 'Z'},
333  {"encoding", required_argument, NULL, 'E'},
334  {"help", no_argument, NULL, '?'},
335  {"version", no_argument, NULL, 'V'},
336 
337  /*
338  * the following options don't have an equivalent short option letter
339  */
340  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
341  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
342  {"column-inserts", no_argument, &dopt.column_inserts, 1},
343  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
344  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
345  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
346  {"exclude-table-data", required_argument, NULL, 4},
347  {"if-exists", no_argument, &dopt.if_exists, 1},
348  {"inserts", no_argument, &dopt.dump_inserts, 1},
349  {"lock-wait-timeout", required_argument, NULL, 2},
350  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
351  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
352  {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
353  {"role", required_argument, NULL, 3},
354  {"section", required_argument, NULL, 5},
355  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
356  {"snapshot", required_argument, NULL, 6},
357  {"strict-names", no_argument, &strict_names, 1},
358  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
359  {"no-publications", no_argument, &dopt.no_publications, 1},
360  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
361  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
362  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
363  {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
364  {"no-sync", no_argument, NULL, 7},
365 
366  {NULL, 0, NULL, 0}
367  };
368 
369  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
370 
371  /*
372  * Initialize what we need for parallel execution, especially for thread
373  * support on Windows.
374  */
376 
377  g_verbose = false;
378 
379  strcpy(g_comment_start, "-- ");
380  g_comment_end[0] = '\0';
381  strcpy(g_opaque_type, "opaque");
382 
383  progname = get_progname(argv[0]);
384 
385  if (argc > 1)
386  {
387  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
388  {
389  help(progname);
390  exit_nicely(0);
391  }
392  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
393  {
394  puts("pg_dump (PostgreSQL) " PG_VERSION);
395  exit_nicely(0);
396  }
397  }
398 
399  InitDumpOptions(&dopt);
400 
401  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:oOp:RsS:t:T:U:vwWxZ:",
402  long_options, &optindex)) != -1)
403  {
404  switch (c)
405  {
406  case 'a': /* Dump data only */
407  dopt.dataOnly = true;
408  break;
409 
410  case 'b': /* Dump blobs */
411  dopt.outputBlobs = true;
412  break;
413 
414  case 'B': /* Don't dump blobs */
415  dopt.dontOutputBlobs = true;
416  break;
417 
418  case 'c': /* clean (i.e., drop) schema prior to create */
419  dopt.outputClean = 1;
420  break;
421 
422  case 'C': /* Create DB */
423  dopt.outputCreateDB = 1;
424  break;
425 
426  case 'd': /* database name */
427  dopt.dbname = pg_strdup(optarg);
428  break;
429 
430  case 'E': /* Dump encoding */
431  dumpencoding = pg_strdup(optarg);
432  break;
433 
434  case 'f':
435  filename = pg_strdup(optarg);
436  break;
437 
438  case 'F':
439  format = pg_strdup(optarg);
440  break;
441 
442  case 'h': /* server host */
443  dopt.pghost = pg_strdup(optarg);
444  break;
445 
446  case 'j': /* number of dump jobs */
447  numWorkers = atoi(optarg);
448  break;
449 
450  case 'n': /* include schema(s) */
451  simple_string_list_append(&schema_include_patterns, optarg);
452  dopt.include_everything = false;
453  break;
454 
455  case 'N': /* exclude schema(s) */
456  simple_string_list_append(&schema_exclude_patterns, optarg);
457  break;
458 
459  case 'o': /* Dump oids */
460  dopt.oids = true;
461  break;
462 
463  case 'O': /* Don't reconnect to match owner */
464  dopt.outputNoOwner = 1;
465  break;
466 
467  case 'p': /* server port */
468  dopt.pgport = pg_strdup(optarg);
469  break;
470 
471  case 'R':
472  /* no-op, still accepted for backwards compatibility */
473  break;
474 
475  case 's': /* dump schema only */
476  dopt.schemaOnly = true;
477  break;
478 
479  case 'S': /* Username for superuser in plain text output */
481  break;
482 
483  case 't': /* include table(s) */
484  simple_string_list_append(&table_include_patterns, optarg);
485  dopt.include_everything = false;
486  break;
487 
488  case 'T': /* exclude table(s) */
489  simple_string_list_append(&table_exclude_patterns, optarg);
490  break;
491 
492  case 'U':
493  dopt.username = pg_strdup(optarg);
494  break;
495 
496  case 'v': /* verbose */
497  g_verbose = true;
498  break;
499 
500  case 'w':
501  prompt_password = TRI_NO;
502  break;
503 
504  case 'W':
505  prompt_password = TRI_YES;
506  break;
507 
508  case 'x': /* skip ACL dump */
509  dopt.aclsSkip = true;
510  break;
511 
512  case 'Z': /* Compression Level */
513  compressLevel = atoi(optarg);
514  if (compressLevel < 0 || compressLevel > 9)
515  {
516  write_msg(NULL, "compression level must be in range 0..9\n");
517  exit_nicely(1);
518  }
519  break;
520 
521  case 0:
522  /* This covers the long options. */
523  break;
524 
525  case 2: /* lock-wait-timeout */
527  break;
528 
529  case 3: /* SET ROLE */
530  use_role = pg_strdup(optarg);
531  break;
532 
533  case 4: /* exclude table(s) data */
534  simple_string_list_append(&tabledata_exclude_patterns, optarg);
535  break;
536 
537  case 5: /* section */
539  break;
540 
541  case 6: /* snapshot */
542  dumpsnapshot = pg_strdup(optarg);
543  break;
544 
545  case 7: /* no-sync */
546  dosync = false;
547  break;
548 
549  default:
550  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
551  exit_nicely(1);
552  }
553  }
554 
555  /*
556  * Non-option argument specifies database name as long as it wasn't
557  * already specified with -d / --dbname
558  */
559  if (optind < argc && dopt.dbname == NULL)
560  dopt.dbname = argv[optind++];
561 
562  /* Complain if any arguments remain */
563  if (optind < argc)
564  {
565  fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
566  progname, argv[optind]);
567  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
568  progname);
569  exit_nicely(1);
570  }
571 
572  /* --column-inserts implies --inserts */
573  if (dopt.column_inserts)
574  dopt.dump_inserts = 1;
575 
576  /*
577  * Binary upgrade mode implies dumping sequence data even in schema-only
578  * mode. This is not exposed as a separate option, but kept separate
579  * internally for clarity.
580  */
581  if (dopt.binary_upgrade)
582  dopt.sequence_data = 1;
583 
584  if (dopt.dataOnly && dopt.schemaOnly)
585  {
586  write_msg(NULL, "options -s/--schema-only and -a/--data-only cannot be used together\n");
587  exit_nicely(1);
588  }
589 
590  if (dopt.dataOnly && dopt.outputClean)
591  {
592  write_msg(NULL, "options -c/--clean and -a/--data-only cannot be used together\n");
593  exit_nicely(1);
594  }
595 
596  if (dopt.dump_inserts && dopt.oids)
597  {
598  write_msg(NULL, "options --inserts/--column-inserts and -o/--oids cannot be used together\n");
599  write_msg(NULL, "(The INSERT command cannot set OIDs.)\n");
600  exit_nicely(1);
601  }
602 
603  if (dopt.if_exists && !dopt.outputClean)
604  exit_horribly(NULL, "option --if-exists requires option -c/--clean\n");
605 
606  /* Identify archive format to emit */
607  archiveFormat = parseArchiveFormat(format, &archiveMode);
608 
609  /* archiveFormat specific setup */
610  if (archiveFormat == archNull)
611  plainText = 1;
612 
613  /* Custom and directory formats are compressed by default, others not */
614  if (compressLevel == -1)
615  {
616 #ifdef HAVE_LIBZ
617  if (archiveFormat == archCustom || archiveFormat == archDirectory)
618  compressLevel = Z_DEFAULT_COMPRESSION;
619  else
620 #endif
621  compressLevel = 0;
622  }
623 
624 #ifndef HAVE_LIBZ
625  if (compressLevel != 0)
626  write_msg(NULL, "WARNING: requested compression not available in this "
627  "installation -- archive will be uncompressed\n");
628  compressLevel = 0;
629 #endif
630 
631  /*
632  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
633  * parallel jobs because that's the maximum limit for the
634  * WaitForMultipleObjects() call.
635  */
636  if (numWorkers <= 0
637 #ifdef WIN32
638  || numWorkers > MAXIMUM_WAIT_OBJECTS
639 #endif
640  )
641  exit_horribly(NULL, "invalid number of parallel jobs\n");
642 
643  /* Parallel backup only in the directory archive format so far */
644  if (archiveFormat != archDirectory && numWorkers > 1)
645  exit_horribly(NULL, "parallel backup only supported by the directory format\n");
646 
647  /* Open the output file */
648  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
649  archiveMode, setupDumpWorker);
650 
651  /* Make dump options accessible right away */
652  SetArchiveOptions(fout, &dopt, NULL);
653 
654  /* Register the cleanup hook */
655  on_exit_close_archive(fout);
656 
657  /* Let the archiver know how noisy to be */
658  fout->verbose = g_verbose;
659 
660  /*
661  * We allow the server to be back to 8.0, and up to any minor release of
662  * our own major version. (See also version check in pg_dumpall.c.)
663  */
664  fout->minRemoteVersion = 80000;
665  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
666 
667  fout->numWorkers = numWorkers;
668 
669  /*
670  * Open the database using the Archiver, so it knows about it. Errors mean
671  * death.
672  */
673  ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
674  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
675 
676  /*
677  * Disable security label support if server version < v9.1.x (prevents
678  * access to nonexistent pg_seclabel catalog)
679  */
680  if (fout->remoteVersion < 90100)
681  dopt.no_security_labels = 1;
682 
683  /*
684  * On hot standbys, never try to dump unlogged table data, since it will
685  * just throw an error.
686  */
687  if (fout->isStandby)
688  dopt.no_unlogged_table_data = true;
689 
690  /* Select the appropriate subquery to convert user IDs to names */
691  if (fout->remoteVersion >= 80100)
692  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
693  else
694  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
695 
696  /* check the version for the synchronized snapshots feature */
697  if (numWorkers > 1 && fout->remoteVersion < 90200
698  && !dopt.no_synchronized_snapshots)
699  exit_horribly(NULL,
700  "Synchronized snapshots are not supported by this server version.\n"
701  "Run with --no-synchronized-snapshots instead if you do not need\n"
702  "synchronized snapshots.\n");
703 
704  /* check the version when a snapshot is explicitly specified by user */
705  if (dumpsnapshot && fout->remoteVersion < 90200)
706  exit_horribly(NULL,
707  "Exported snapshots are not supported by this server version.\n");
708 
709  /*
710  * Find the last built-in OID, if needed (prior to 8.1)
711  *
712  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
713  */
714  if (fout->remoteVersion < 80100)
716  PQdb(GetConnection(fout)));
717  else
719 
720  if (g_verbose)
721  write_msg(NULL, "last built-in OID is %u\n", g_last_builtin_oid);
722 
723  /* Expand schema selection patterns into OID lists */
724  if (schema_include_patterns.head != NULL)
725  {
726  expand_schema_name_patterns(fout, &schema_include_patterns,
727  &schema_include_oids,
728  strict_names);
729  if (schema_include_oids.head == NULL)
730  exit_horribly(NULL, "no matching schemas were found\n");
731  }
732  expand_schema_name_patterns(fout, &schema_exclude_patterns,
733  &schema_exclude_oids,
734  false);
735  /* non-matching exclusion patterns aren't an error */
736 
737  /* Expand table selection patterns into OID lists */
738  if (table_include_patterns.head != NULL)
739  {
740  expand_table_name_patterns(fout, &table_include_patterns,
741  &table_include_oids,
742  strict_names);
743  if (table_include_oids.head == NULL)
744  exit_horribly(NULL, "no matching tables were found\n");
745  }
746  expand_table_name_patterns(fout, &table_exclude_patterns,
747  &table_exclude_oids,
748  false);
749 
750  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
751  &tabledata_exclude_oids,
752  false);
753 
754  /* non-matching exclusion patterns aren't an error */
755 
756  /*
757  * Dumping blobs is the default for dumps where an inclusion switch is not
758  * used (an "include everything" dump). -B can be used to exclude blobs
759  * from those dumps. -b can be used to include blobs even when an
760  * inclusion switch is used.
761  *
762  * -s means "schema only" and blobs are data, not schema, so we never
763  * include blobs when -s is used.
764  */
765  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
766  dopt.outputBlobs = true;
767 
768  /*
769  * Now scan the database and create DumpableObject structs for all the
770  * objects we intend to dump.
771  */
772  tblinfo = getSchemaData(fout, &numTables);
773 
774  if (fout->remoteVersion < 80400)
775  guessConstraintInheritance(tblinfo, numTables);
776 
777  if (!dopt.schemaOnly)
778  {
779  getTableData(&dopt, tblinfo, numTables, dopt.oids, 0);
781  if (dopt.dataOnly)
783  }
784 
785  if (dopt.schemaOnly && dopt.sequence_data)
786  getTableData(&dopt, tblinfo, numTables, dopt.oids, RELKIND_SEQUENCE);
787 
788  /*
789  * In binary-upgrade mode, we do not have to worry about the actual blob
790  * data or the associated metadata that resides in the pg_largeobject and
791  * pg_largeobject_metadata tables, respectivly.
792  *
793  * However, we do need to collect blob information as there may be
794  * comments or other information on blobs that we do need to dump out.
795  */
796  if (dopt.outputBlobs || dopt.binary_upgrade)
797  getBlobs(fout);
798 
799  /*
800  * Collect dependency data to assist in ordering the objects.
801  */
802  getDependencies(fout);
803 
804  /* Lastly, create dummy objects to represent the section boundaries */
805  boundaryObjs = createBoundaryObjects();
806 
807  /* Get pointers to all the known DumpableObjects */
808  getDumpableObjects(&dobjs, &numObjs);
809 
810  /*
811  * Add dummy dependencies to enforce the dump section ordering.
812  */
813  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
814 
815  /*
816  * Sort the objects into a safe dump order (no forward references).
817  *
818  * We rely on dependency information to help us determine a safe order, so
819  * the initial sort is mostly for cosmetic purposes: we sort by name to
820  * ensure that logically identical schemas will dump identically.
821  */
822  sortDumpableObjectsByTypeName(dobjs, numObjs);
823 
824  /* If we do a parallel dump, we want the largest tables to go first */
825  if (archiveFormat == archDirectory && numWorkers > 1)
826  sortDataAndIndexObjectsBySize(dobjs, numObjs);
827 
828  sortDumpableObjects(dobjs, numObjs,
829  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
830 
831  /*
832  * Create archive TOC entries for all the objects to be dumped, in a safe
833  * order.
834  */
835 
836  /* First the special ENCODING and STDSTRINGS entries. */
837  dumpEncoding(fout);
838  dumpStdStrings(fout);
839 
840  /* The database item is always next, unless we don't want it at all */
841  if (dopt.include_everything && !dopt.dataOnly)
842  dumpDatabase(fout);
843 
844  /* Now the rearrangeable objects. */
845  for (i = 0; i < numObjs; i++)
846  dumpDumpableObject(fout, dobjs[i]);
847 
848  /*
849  * Set up options info to ensure we dump what we want.
850  */
851  ropt = NewRestoreOptions();
852  ropt->filename = filename;
853 
854  /* if you change this list, see dumpOptionsFromRestoreOptions */
855  ropt->dropSchema = dopt.outputClean;
856  ropt->dataOnly = dopt.dataOnly;
857  ropt->schemaOnly = dopt.schemaOnly;
858  ropt->if_exists = dopt.if_exists;
859  ropt->column_inserts = dopt.column_inserts;
860  ropt->dumpSections = dopt.dumpSections;
861  ropt->aclsSkip = dopt.aclsSkip;
862  ropt->superuser = dopt.outputSuperuser;
863  ropt->createDB = dopt.outputCreateDB;
864  ropt->noOwner = dopt.outputNoOwner;
865  ropt->noTablespace = dopt.outputNoTablespaces;
866  ropt->disable_triggers = dopt.disable_triggers;
867  ropt->use_setsessauth = dopt.use_setsessauth;
869  ropt->dump_inserts = dopt.dump_inserts;
870  ropt->no_publications = dopt.no_publications;
872  ropt->no_subscriptions = dopt.no_subscriptions;
873  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
876  ropt->sequence_data = dopt.sequence_data;
877  ropt->binary_upgrade = dopt.binary_upgrade;
878 
879  if (compressLevel == -1)
880  ropt->compression = 0;
881  else
882  ropt->compression = compressLevel;
883 
884  ropt->suppressDumpWarnings = true; /* We've already shown them */
885 
886  SetArchiveOptions(fout, &dopt, ropt);
887 
888  /* Mark which entries should be output */
890 
891  /*
892  * The archive's TOC entries are now marked as to which ones will actually
893  * be output, so we can set up their dependency lists properly. This isn't
894  * necessary for plain-text output, though.
895  */
896  if (!plainText)
898 
899  /*
900  * And finally we can do the actual output.
901  *
902  * Note: for non-plain-text output formats, the output file is written
903  * inside CloseArchive(). This is, um, bizarre; but not worth changing
904  * right now.
905  */
906  if (plainText)
907  RestoreArchive(fout);
908 
909  CloseArchive(fout);
910 
911  exit_nicely(0);
912 }
913 
914 
915 static void
916 help(const char *progname)
917 {
918  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
919  printf(_("Usage:\n"));
920  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
921 
922  printf(_("\nGeneral options:\n"));
923  printf(_(" -f, --file=FILENAME output file or directory name\n"));
924  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
925  " plain text (default))\n"));
926  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
927  printf(_(" -v, --verbose verbose mode\n"));
928  printf(_(" -V, --version output version information, then exit\n"));
929  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
930  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
931  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
932  printf(_(" -?, --help show this help, then exit\n"));
933 
934  printf(_("\nOptions controlling the output content:\n"));
935  printf(_(" -a, --data-only dump only the data, not the schema\n"));
936  printf(_(" -b, --blobs include large objects in dump\n"));
937  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
938  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
939  printf(_(" -C, --create include commands to create database in dump\n"));
940  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
941  printf(_(" -n, --schema=SCHEMA dump the named schema(s) only\n"));
942  printf(_(" -N, --exclude-schema=SCHEMA do NOT dump the named schema(s)\n"));
943  printf(_(" -o, --oids include OIDs in dump\n"));
944  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
945  " plain-text format\n"));
946  printf(_(" -s, --schema-only dump only the schema, no data\n"));
947  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
948  printf(_(" -t, --table=TABLE dump the named table(s) only\n"));
949  printf(_(" -T, --exclude-table=TABLE do NOT dump the named table(s)\n"));
950  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
951  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
952  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
953  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
954  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
955  printf(_(" --enable-row-security enable row security (dump only content user has\n"
956  " access to)\n"));
957  printf(_(" --exclude-table-data=TABLE do NOT dump data for the named table(s)\n"));
958  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
959  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
960  printf(_(" --no-publications do not dump publications\n"));
961  printf(_(" --no-security-labels do not dump security label assignments\n"));
962  printf(_(" --no-subscriptions do not dump subscriptions\n"));
963  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
964  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
965  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
966  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
967  printf(_(" --load-via-partition-root load partitions via the root table\n"));
968  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
969  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
970  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
971  printf(_(" --strict-names require table and/or schema include patterns to\n"
972  " match at least one entity each\n"));
973  printf(_(" --use-set-session-authorization\n"
974  " use SET SESSION AUTHORIZATION commands instead of\n"
975  " ALTER OWNER commands to set ownership\n"));
976 
977  printf(_("\nConnection options:\n"));
978  printf(_(" -d, --dbname=DBNAME database to dump\n"));
979  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
980  printf(_(" -p, --port=PORT database server port number\n"));
981  printf(_(" -U, --username=NAME connect as specified database user\n"));
982  printf(_(" -w, --no-password never prompt for password\n"));
983  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
984  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
985 
986  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
987  "variable value is used.\n\n"));
988  printf(_("Report bugs to <pgsql-bugs@postgresql.org>.\n"));
989 }
990 
991 static void
992 setup_connection(Archive *AH, const char *dumpencoding,
993  const char *dumpsnapshot, char *use_role)
994 {
995  DumpOptions *dopt = AH->dopt;
996  PGconn *conn = GetConnection(AH);
997  const char *std_strings;
998 
999  /*
1000  * Set the client encoding if requested.
1001  */
1002  if (dumpencoding)
1003  {
1004  if (PQsetClientEncoding(conn, dumpencoding) < 0)
1005  exit_horribly(NULL, "invalid client encoding \"%s\" specified\n",
1006  dumpencoding);
1007  }
1008 
1009  /*
1010  * Get the active encoding and the standard_conforming_strings setting, so
1011  * we know how to escape strings.
1012  */
1013  AH->encoding = PQclientEncoding(conn);
1014 
1015  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1016  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1017 
1018  /*
1019  * Set the role if requested. In a parallel dump worker, we'll be passed
1020  * use_role == NULL, but AH->use_role is already set (if user specified it
1021  * originally) and we should use that.
1022  */
1023  if (!use_role && AH->use_role)
1024  use_role = AH->use_role;
1025 
1026  /* Set the role if requested */
1027  if (use_role && AH->remoteVersion >= 80100)
1028  {
1029  PQExpBuffer query = createPQExpBuffer();
1030 
1031  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1032  ExecuteSqlStatement(AH, query->data);
1033  destroyPQExpBuffer(query);
1034 
1035  /* save it for possible later use by parallel workers */
1036  if (!AH->use_role)
1037  AH->use_role = pg_strdup(use_role);
1038  }
1039 
1040  /* Set the datestyle to ISO to ensure the dump's portability */
1041  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1042 
1043  /* Likewise, avoid using sql_standard intervalstyle */
1044  if (AH->remoteVersion >= 80400)
1045  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1046 
1047  /*
1048  * Set extra_float_digits so that we can dump float data exactly (given
1049  * correctly implemented float I/O code, anyway)
1050  */
1051  if (AH->remoteVersion >= 90000)
1052  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1053  else
1054  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1055 
1056  /*
1057  * If synchronized scanning is supported, disable it, to prevent
1058  * unpredictable changes in row ordering across a dump and reload.
1059  */
1060  if (AH->remoteVersion >= 80300)
1061  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1062 
1063  /*
1064  * Disable timeouts if supported.
1065  */
1066  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1067  if (AH->remoteVersion >= 90300)
1068  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1069  if (AH->remoteVersion >= 90600)
1070  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1071 
1072  /*
1073  * Quote all identifiers, if requested.
1074  */
1075  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1076  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1077 
1078  /*
1079  * Adjust row-security mode, if supported.
1080  */
1081  if (AH->remoteVersion >= 90500)
1082  {
1083  if (dopt->enable_row_security)
1084  ExecuteSqlStatement(AH, "SET row_security = on");
1085  else
1086  ExecuteSqlStatement(AH, "SET row_security = off");
1087  }
1088 
1089  /*
1090  * Start transaction-snapshot mode transaction to dump consistent data.
1091  */
1092  ExecuteSqlStatement(AH, "BEGIN");
1093  if (AH->remoteVersion >= 90100)
1094  {
1095  /*
1096  * To support the combination of serializable_deferrable with the jobs
1097  * option we use REPEATABLE READ for the worker connections that are
1098  * passed a snapshot. As long as the snapshot is acquired in a
1099  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1100  * REPEATABLE READ transaction provides the appropriate integrity
1101  * guarantees. This is a kluge, but safe for back-patching.
1102  */
1103  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1105  "SET TRANSACTION ISOLATION LEVEL "
1106  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1107  else
1109  "SET TRANSACTION ISOLATION LEVEL "
1110  "REPEATABLE READ, READ ONLY");
1111  }
1112  else
1113  {
1115  "SET TRANSACTION ISOLATION LEVEL "
1116  "SERIALIZABLE, READ ONLY");
1117  }
1118 
1119  /*
1120  * If user specified a snapshot to use, select that. In a parallel dump
1121  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1122  * is already set (if the server can handle it) and we should use that.
1123  */
1124  if (dumpsnapshot)
1125  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1126 
1127  if (AH->sync_snapshot_id)
1128  {
1129  PQExpBuffer query = createPQExpBuffer();
1130 
1131  appendPQExpBuffer(query, "SET TRANSACTION SNAPSHOT ");
1132  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1133  ExecuteSqlStatement(AH, query->data);
1134  destroyPQExpBuffer(query);
1135  }
1136  else if (AH->numWorkers > 1 &&
1137  AH->remoteVersion >= 90200 &&
1139  {
1140  if (AH->isStandby && AH->remoteVersion < 100000)
1141  exit_horribly(NULL,
1142  "Synchronized snapshots on standby servers are not supported by this server version.\n"
1143  "Run with --no-synchronized-snapshots instead if you do not need\n"
1144  "synchronized snapshots.\n");
1145 
1146 
1148  }
1149 }
1150 
1151 /* Set up connection for a parallel worker process */
1152 static void
1154 {
1155  /*
1156  * We want to re-select all the same values the master connection is
1157  * using. We'll have inherited directly-usable values in
1158  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1159  * inherited encoding value back to a string to pass to setup_connection.
1160  */
1161  setup_connection(AH,
1163  NULL,
1164  NULL);
1165 }
1166 
1167 static char *
1169 {
1170  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1171  char *result;
1172  PGresult *res;
1173 
1174  res = ExecuteSqlQueryForSingleRow(fout, query);
1175  result = pg_strdup(PQgetvalue(res, 0, 0));
1176  PQclear(res);
1177 
1178  return result;
1179 }
1180 
1181 static ArchiveFormat
1183 {
1184  ArchiveFormat archiveFormat;
1185 
1186  *mode = archModeWrite;
1187 
1188  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1189  {
1190  /* This is used by pg_dumpall, and is not documented */
1191  archiveFormat = archNull;
1192  *mode = archModeAppend;
1193  }
1194  else if (pg_strcasecmp(format, "c") == 0)
1195  archiveFormat = archCustom;
1196  else if (pg_strcasecmp(format, "custom") == 0)
1197  archiveFormat = archCustom;
1198  else if (pg_strcasecmp(format, "d") == 0)
1199  archiveFormat = archDirectory;
1200  else if (pg_strcasecmp(format, "directory") == 0)
1201  archiveFormat = archDirectory;
1202  else if (pg_strcasecmp(format, "p") == 0)
1203  archiveFormat = archNull;
1204  else if (pg_strcasecmp(format, "plain") == 0)
1205  archiveFormat = archNull;
1206  else if (pg_strcasecmp(format, "t") == 0)
1207  archiveFormat = archTar;
1208  else if (pg_strcasecmp(format, "tar") == 0)
1209  archiveFormat = archTar;
1210  else
1211  exit_horribly(NULL, "invalid output format \"%s\" specified\n", format);
1212  return archiveFormat;
1213 }
1214 
1215 /*
1216  * Find the OIDs of all schemas matching the given list of patterns,
1217  * and append them to the given OID list.
1218  */
1219 static void
1221  SimpleStringList *patterns,
1222  SimpleOidList *oids,
1223  bool strict_names)
1224 {
1225  PQExpBuffer query;
1226  PGresult *res;
1227  SimpleStringListCell *cell;
1228  int i;
1229 
1230  if (patterns->head == NULL)
1231  return; /* nothing to do */
1232 
1233  query = createPQExpBuffer();
1234 
1235  /*
1236  * The loop below runs multiple SELECTs might sometimes result in
1237  * duplicate entries in the OID list, but we don't care.
1238  */
1239 
1240  for (cell = patterns->head; cell; cell = cell->next)
1241  {
1242  appendPQExpBuffer(query,
1243  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1244  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1245  false, NULL, "n.nspname", NULL, NULL);
1246 
1247  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1248  if (strict_names && PQntuples(res) == 0)
1249  exit_horribly(NULL, "no matching schemas were found for pattern \"%s\"\n", cell->val);
1250 
1251  for (i = 0; i < PQntuples(res); i++)
1252  {
1253  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1254  }
1255 
1256  PQclear(res);
1257  resetPQExpBuffer(query);
1258  }
1259 
1260  destroyPQExpBuffer(query);
1261 }
1262 
1263 /*
1264  * Find the OIDs of all tables matching the given list of patterns,
1265  * and append them to the given OID list.
1266  */
1267 static void
1269  SimpleStringList *patterns, SimpleOidList *oids,
1270  bool strict_names)
1271 {
1272  PQExpBuffer query;
1273  PGresult *res;
1274  SimpleStringListCell *cell;
1275  int i;
1276 
1277  if (patterns->head == NULL)
1278  return; /* nothing to do */
1279 
1280  query = createPQExpBuffer();
1281 
1282  /*
1283  * this might sometimes result in duplicate entries in the OID list, but
1284  * we don't care.
1285  */
1286 
1287  for (cell = patterns->head; cell; cell = cell->next)
1288  {
1289  appendPQExpBuffer(query,
1290  "SELECT c.oid"
1291  "\nFROM pg_catalog.pg_class c"
1292  "\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace"
1293  "\nWHERE c.relkind in ('%c', '%c', '%c', '%c', '%c', '%c')\n",
1297  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1298  false, "n.nspname", "c.relname", NULL,
1299  "pg_catalog.pg_table_is_visible(c.oid)");
1300 
1301  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1302  if (strict_names && PQntuples(res) == 0)
1303  exit_horribly(NULL, "no matching tables were found for pattern \"%s\"\n", cell->val);
1304 
1305  for (i = 0; i < PQntuples(res); i++)
1306  {
1307  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1308  }
1309 
1310  PQclear(res);
1311  resetPQExpBuffer(query);
1312  }
1313 
1314  destroyPQExpBuffer(query);
1315 }
1316 
1317 /*
1318  * checkExtensionMembership
1319  * Determine whether object is an extension member, and if so,
1320  * record an appropriate dependency and set the object's dump flag.
1321  *
1322  * It's important to call this for each object that could be an extension
1323  * member. Generally, we integrate this with determining the object's
1324  * to-be-dumped-ness, since extension membership overrides other rules for that.
1325  *
1326  * Returns true if object is an extension member, else false.
1327  */
1328 static bool
1330 {
1331  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1332 
1333  if (ext == NULL)
1334  return false;
1335 
1336  dobj->ext_member = true;
1337 
1338  /* Record dependency so that getDependencies needn't deal with that */
1339  addObjectDependency(dobj, ext->dobj.dumpId);
1340 
1341  /*
1342  * In 9.6 and above, mark the member object to have any non-initial ACL,
1343  * policies, and security labels dumped.
1344  *
1345  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1346  * extract the information about the object. We don't provide support for
1347  * initial policies and security labels and it seems unlikely for those to
1348  * ever exist, but we may have to revisit this later.
1349  *
1350  * Prior to 9.6, we do not include any extension member components.
1351  *
1352  * In binary upgrades, we still dump all components of the members
1353  * individually, since the idea is to exactly reproduce the database
1354  * contents rather than replace the extension contents with something
1355  * different.
1356  */
1357  if (fout->dopt->binary_upgrade)
1358  dobj->dump = ext->dobj.dump;
1359  else
1360  {
1361  if (fout->remoteVersion < 90600)
1362  dobj->dump = DUMP_COMPONENT_NONE;
1363  else
1364  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1367  }
1368 
1369  return true;
1370 }
1371 
1372 /*
1373  * selectDumpableNamespace: policy-setting subroutine
1374  * Mark a namespace as to be dumped or not
1375  */
1376 static void
1378 {
1379  /*
1380  * If specific tables are being dumped, do not dump any complete
1381  * namespaces. If specific namespaces are being dumped, dump just those
1382  * namespaces. Otherwise, dump all non-system namespaces.
1383  */
1384  if (table_include_oids.head != NULL)
1385  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1386  else if (schema_include_oids.head != NULL)
1387  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1388  simple_oid_list_member(&schema_include_oids,
1389  nsinfo->dobj.catId.oid) ?
1391  else if (fout->remoteVersion >= 90600 &&
1392  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1393  {
1394  /*
1395  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1396  * they are interesting (and not the original ACLs which were set at
1397  * initdb time, see pg_init_privs).
1398  */
1399  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1400  }
1401  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1402  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1403  {
1404  /* Other system schemas don't get dumped */
1405  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1406  }
1407  else
1408  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1409 
1410  /*
1411  * In any case, a namespace can be excluded by an exclusion switch
1412  */
1413  if (nsinfo->dobj.dump_contains &&
1414  simple_oid_list_member(&schema_exclude_oids,
1415  nsinfo->dobj.catId.oid))
1416  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1417 
1418  /*
1419  * If the schema belongs to an extension, allow extension membership to
1420  * override the dump decision for the schema itself. However, this does
1421  * not change dump_contains, so this won't change what we do with objects
1422  * within the schema. (If they belong to the extension, they'll get
1423  * suppressed by it, otherwise not.)
1424  */
1425  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1426 }
1427 
1428 /*
1429  * selectDumpableTable: policy-setting subroutine
1430  * Mark a table as to be dumped or not
1431  */
1432 static void
1434 {
1435  if (checkExtensionMembership(&tbinfo->dobj, fout))
1436  return; /* extension membership overrides all else */
1437 
1438  /*
1439  * If specific tables are being dumped, dump just those tables; else, dump
1440  * according to the parent namespace's dump flag.
1441  */
1442  if (table_include_oids.head != NULL)
1443  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1444  tbinfo->dobj.catId.oid) ?
1446  else
1447  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1448 
1449  /*
1450  * In any case, a table can be excluded by an exclusion switch
1451  */
1452  if (tbinfo->dobj.dump &&
1453  simple_oid_list_member(&table_exclude_oids,
1454  tbinfo->dobj.catId.oid))
1455  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1456 }
1457 
1458 /*
1459  * selectDumpableType: policy-setting subroutine
1460  * Mark a type as to be dumped or not
1461  *
1462  * If it's a table's rowtype or an autogenerated array type, we also apply a
1463  * special type code to facilitate sorting into the desired order. (We don't
1464  * want to consider those to be ordinary types because that would bring tables
1465  * up into the datatype part of the dump order.) We still set the object's
1466  * dump flag; that's not going to cause the dummy type to be dumped, but we
1467  * need it so that casts involving such types will be dumped correctly -- see
1468  * dumpCast. This means the flag should be set the same as for the underlying
1469  * object (the table or base type).
1470  */
1471 static void
1473 {
1474  /* skip complex types, except for standalone composite types */
1475  if (OidIsValid(tyinfo->typrelid) &&
1477  {
1478  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1479 
1480  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1481  if (tytable != NULL)
1482  tyinfo->dobj.dump = tytable->dobj.dump;
1483  else
1484  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1485  return;
1486  }
1487 
1488  /* skip auto-generated array types */
1489  if (tyinfo->isArray)
1490  {
1491  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1492 
1493  /*
1494  * Fall through to set the dump flag; we assume that the subsequent
1495  * rules will do the same thing as they would for the array's base
1496  * type. (We cannot reliably look up the base type here, since
1497  * getTypes may not have processed it yet.)
1498  */
1499  }
1500 
1501  if (checkExtensionMembership(&tyinfo->dobj, fout))
1502  return; /* extension membership overrides all else */
1503 
1504  /* Dump based on if the contents of the namespace are being dumped */
1505  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1506 }
1507 
1508 /*
1509  * selectDumpableDefaultACL: policy-setting subroutine
1510  * Mark a default ACL as to be dumped or not
1511  *
1512  * For per-schema default ACLs, dump if the schema is to be dumped.
1513  * Otherwise dump if we are dumping "everything". Note that dataOnly
1514  * and aclsSkip are checked separately.
1515  */
1516 static void
1518 {
1519  /* Default ACLs can't be extension members */
1520 
1521  if (dinfo->dobj.namespace)
1522  /* default ACLs are considered part of the namespace */
1523  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1524  else
1525  dinfo->dobj.dump = dopt->include_everything ?
1527 }
1528 
1529 /*
1530  * selectDumpableCast: policy-setting subroutine
1531  * Mark a cast as to be dumped or not
1532  *
1533  * Casts do not belong to any particular namespace (since they haven't got
1534  * names), nor do they have identifiable owners. To distinguish user-defined
1535  * casts from built-in ones, we must resort to checking whether the cast's
1536  * OID is in the range reserved for initdb.
1537  */
1538 static void
1540 {
1541  if (checkExtensionMembership(&cast->dobj, fout))
1542  return; /* extension membership overrides all else */
1543 
1544  /*
1545  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1546  * support ACLs currently.
1547  */
1548  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1549  cast->dobj.dump = DUMP_COMPONENT_NONE;
1550  else
1551  cast->dobj.dump = fout->dopt->include_everything ?
1553 }
1554 
1555 /*
1556  * selectDumpableProcLang: policy-setting subroutine
1557  * Mark a procedural language as to be dumped or not
1558  *
1559  * Procedural languages do not belong to any particular namespace. To
1560  * identify built-in languages, we must resort to checking whether the
1561  * language's OID is in the range reserved for initdb.
1562  */
1563 static void
1565 {
1566  if (checkExtensionMembership(&plang->dobj, fout))
1567  return; /* extension membership overrides all else */
1568 
1569  /*
1570  * Only include procedural languages when we are dumping everything.
1571  *
1572  * For from-initdb procedural languages, only include ACLs, as we do for
1573  * the pg_catalog namespace. We need this because procedural languages do
1574  * not live in any namespace.
1575  */
1576  if (!fout->dopt->include_everything)
1577  plang->dobj.dump = DUMP_COMPONENT_NONE;
1578  else
1579  {
1580  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1581  plang->dobj.dump = fout->remoteVersion < 90600 ?
1583  else
1584  plang->dobj.dump = DUMP_COMPONENT_ALL;
1585  }
1586 }
1587 
1588 /*
1589  * selectDumpableAccessMethod: policy-setting subroutine
1590  * Mark an access method as to be dumped or not
1591  *
1592  * Access methods do not belong to any particular namespace. To identify
1593  * built-in access methods, we must resort to checking whether the
1594  * method's OID is in the range reserved for initdb.
1595  */
1596 static void
1598 {
1599  if (checkExtensionMembership(&method->dobj, fout))
1600  return; /* extension membership overrides all else */
1601 
1602  /*
1603  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1604  * they do not support ACLs currently.
1605  */
1606  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1607  method->dobj.dump = DUMP_COMPONENT_NONE;
1608  else
1609  method->dobj.dump = fout->dopt->include_everything ?
1611 }
1612 
1613 /*
1614  * selectDumpableExtension: policy-setting subroutine
1615  * Mark an extension as to be dumped or not
1616  *
1617  * Normally, we dump all extensions, or none of them if include_everything
1618  * is false (i.e., a --schema or --table switch was given). However, in
1619  * binary-upgrade mode it's necessary to skip built-in extensions, since we
1620  * assume those will already be installed in the target database. We identify
1621  * such extensions by their having OIDs in the range reserved for initdb.
1622  */
1623 static void
1625 {
1626  /*
1627  * Use DUMP_COMPONENT_ACL for from-initdb extensions, to allow users to
1628  * change permissions on those objects, if they wish to, and have those
1629  * changes preserved.
1630  */
1631  if (dopt->binary_upgrade && extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1632  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1633  else
1634  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1637 }
1638 
1639 /*
1640  * selectDumpablePublicationTable: policy-setting subroutine
1641  * Mark a publication table as to be dumped or not
1642  *
1643  * Publication tables have schemas, but those are ignored in decision making,
1644  * because publications are only dumped when we are dumping everything.
1645  */
1646 static void
1648 {
1649  if (checkExtensionMembership(dobj, fout))
1650  return; /* extension membership overrides all else */
1651 
1652  dobj->dump = fout->dopt->include_everything ?
1654 }
1655 
1656 /*
1657  * selectDumpableObject: policy-setting subroutine
1658  * Mark a generic dumpable object as to be dumped or not
1659  *
1660  * Use this only for object types without a special-case routine above.
1661  */
1662 static void
1664 {
1665  if (checkExtensionMembership(dobj, fout))
1666  return; /* extension membership overrides all else */
1667 
1668  /*
1669  * Default policy is to dump if parent namespace is dumpable, or for
1670  * non-namespace-associated items, dump if we're dumping "everything".
1671  */
1672  if (dobj->namespace)
1673  dobj->dump = dobj->namespace->dobj.dump_contains;
1674  else
1675  dobj->dump = fout->dopt->include_everything ?
1677 }
1678 
1679 /*
1680  * Dump a table's contents for loading using the COPY command
1681  * - this routine is called by the Archiver when it wants the table
1682  * to be dumped.
1683  */
1684 
1685 static int
1686 dumpTableData_copy(Archive *fout, void *dcontext)
1687 {
1688  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1689  TableInfo *tbinfo = tdinfo->tdtable;
1690  const char *classname = tbinfo->dobj.name;
1691  const bool hasoids = tbinfo->hasoids;
1692  const bool oids = tdinfo->oids;
1694 
1695  /*
1696  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1697  * which uses it already.
1698  */
1699  PQExpBuffer clistBuf = createPQExpBuffer();
1700  PGconn *conn = GetConnection(fout);
1701  PGresult *res;
1702  int ret;
1703  char *copybuf;
1704  const char *column_list;
1705 
1706  if (g_verbose)
1707  write_msg(NULL, "dumping contents of table \"%s.%s\"\n",
1708  tbinfo->dobj.namespace->dobj.name, classname);
1709 
1710  /*
1711  * Make sure we are in proper schema. We will qualify the table name
1712  * below anyway (in case its name conflicts with a pg_catalog table); but
1713  * this ensures reproducible results in case the table contains regproc,
1714  * regclass, etc columns.
1715  */
1716  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
1717 
1718  /*
1719  * Specify the column list explicitly so that we have no possibility of
1720  * retrieving data in the wrong column order. (The default column
1721  * ordering of COPY will not be what we want in certain corner cases
1722  * involving ADD COLUMN and inheritance.)
1723  */
1724  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1725 
1726  if (oids && hasoids)
1727  {
1728  appendPQExpBuffer(q, "COPY %s %s WITH OIDS TO stdout;",
1730  tbinfo->dobj.namespace->dobj.name,
1731  classname),
1732  column_list);
1733  }
1734  else if (tdinfo->filtercond)
1735  {
1736  /* Note: this syntax is only supported in 8.2 and up */
1737  appendPQExpBufferStr(q, "COPY (SELECT ");
1738  /* klugery to get rid of parens in column list */
1739  if (strlen(column_list) > 2)
1740  {
1741  appendPQExpBufferStr(q, column_list + 1);
1742  q->data[q->len - 1] = ' ';
1743  }
1744  else
1745  appendPQExpBufferStr(q, "* ");
1746  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1748  tbinfo->dobj.namespace->dobj.name,
1749  classname),
1750  tdinfo->filtercond);
1751  }
1752  else
1753  {
1754  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1756  tbinfo->dobj.namespace->dobj.name,
1757  classname),
1758  column_list);
1759  }
1760  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1761  PQclear(res);
1762  destroyPQExpBuffer(clistBuf);
1763 
1764  for (;;)
1765  {
1766  ret = PQgetCopyData(conn, &copybuf, 0);
1767 
1768  if (ret < 0)
1769  break; /* done or error */
1770 
1771  if (copybuf)
1772  {
1773  WriteData(fout, copybuf, ret);
1774  PQfreemem(copybuf);
1775  }
1776 
1777  /* ----------
1778  * THROTTLE:
1779  *
1780  * There was considerable discussion in late July, 2000 regarding
1781  * slowing down pg_dump when backing up large tables. Users with both
1782  * slow & fast (multi-processor) machines experienced performance
1783  * degradation when doing a backup.
1784  *
1785  * Initial attempts based on sleeping for a number of ms for each ms
1786  * of work were deemed too complex, then a simple 'sleep in each loop'
1787  * implementation was suggested. The latter failed because the loop
1788  * was too tight. Finally, the following was implemented:
1789  *
1790  * If throttle is non-zero, then
1791  * See how long since the last sleep.
1792  * Work out how long to sleep (based on ratio).
1793  * If sleep is more than 100ms, then
1794  * sleep
1795  * reset timer
1796  * EndIf
1797  * EndIf
1798  *
1799  * where the throttle value was the number of ms to sleep per ms of
1800  * work. The calculation was done in each loop.
1801  *
1802  * Most of the hard work is done in the backend, and this solution
1803  * still did not work particularly well: on slow machines, the ratio
1804  * was 50:1, and on medium paced machines, 1:1, and on fast
1805  * multi-processor machines, it had little or no effect, for reasons
1806  * that were unclear.
1807  *
1808  * Further discussion ensued, and the proposal was dropped.
1809  *
1810  * For those people who want this feature, it can be implemented using
1811  * gettimeofday in each loop, calculating the time since last sleep,
1812  * multiplying that by the sleep ratio, then if the result is more
1813  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1814  * function to sleep for a subsecond period ie.
1815  *
1816  * select(0, NULL, NULL, NULL, &tvi);
1817  *
1818  * This will return after the interval specified in the structure tvi.
1819  * Finally, call gettimeofday again to save the 'last sleep time'.
1820  * ----------
1821  */
1822  }
1823  archprintf(fout, "\\.\n\n\n");
1824 
1825  if (ret == -2)
1826  {
1827  /* copy data transfer failed */
1828  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.\n", classname);
1829  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1830  write_msg(NULL, "The command was: %s\n", q->data);
1831  exit_nicely(1);
1832  }
1833 
1834  /* Check command status and return to normal libpq state */
1835  res = PQgetResult(conn);
1836  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1837  {
1838  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetResult() failed.\n", classname);
1839  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1840  write_msg(NULL, "The command was: %s\n", q->data);
1841  exit_nicely(1);
1842  }
1843  PQclear(res);
1844 
1845  /* Do this to ensure we've pumped libpq back to idle state */
1846  if (PQgetResult(conn) != NULL)
1847  write_msg(NULL, "WARNING: unexpected extra results during COPY of table \"%s\"\n",
1848  classname);
1849 
1850  destroyPQExpBuffer(q);
1851  return 1;
1852 }
1853 
1854 /*
1855  * Dump table data using INSERT commands.
1856  *
1857  * Caution: when we restore from an archive file direct to database, the
1858  * INSERT commands emitted by this function have to be parsed by
1859  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
1860  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
1861  */
1862 static int
1863 dumpTableData_insert(Archive *fout, void *dcontext)
1864 {
1865  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1866  TableInfo *tbinfo = tdinfo->tdtable;
1867  const char *classname = tbinfo->dobj.name;
1868  DumpOptions *dopt = fout->dopt;
1870  PQExpBuffer insertStmt = NULL;
1871  PGresult *res;
1872  int tuple;
1873  int nfields;
1874  int field;
1875 
1876  /*
1877  * Make sure we are in proper schema. We will qualify the table name
1878  * below anyway (in case its name conflicts with a pg_catalog table); but
1879  * this ensures reproducible results in case the table contains regproc,
1880  * regclass, etc columns.
1881  */
1882  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
1883 
1884  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
1885  "SELECT * FROM ONLY %s",
1887  tbinfo->dobj.namespace->dobj.name,
1888  classname));
1889  if (tdinfo->filtercond)
1890  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
1891 
1892  ExecuteSqlStatement(fout, q->data);
1893 
1894  while (1)
1895  {
1896  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
1897  PGRES_TUPLES_OK);
1898  nfields = PQnfields(res);
1899  for (tuple = 0; tuple < PQntuples(res); tuple++)
1900  {
1901  /*
1902  * First time through, we build as much of the INSERT statement as
1903  * possible in "insertStmt", which we can then just print for each
1904  * line. If the table happens to have zero columns then this will
1905  * be a complete statement, otherwise it will end in "VALUES(" and
1906  * be ready to have the row's column values appended.
1907  */
1908  if (insertStmt == NULL)
1909  {
1910  insertStmt = createPQExpBuffer();
1911 
1912  /*
1913  * When load-via-partition-root is set, get the root table
1914  * name for the partition table, so that we can reload data
1915  * through the root table.
1916  */
1917  if (dopt->load_via_partition_root && tbinfo->ispartition)
1918  {
1919  TableInfo *parentTbinfo;
1920 
1921  parentTbinfo = getRootTableInfo(tbinfo);
1922 
1923  /*
1924  * When we loading data through the root, we will qualify
1925  * the table name. This is needed because earlier
1926  * search_path will be set for the partition table.
1927  */
1928  classname = (char *) fmtQualifiedId(fout->remoteVersion,
1929  parentTbinfo->dobj.namespace->dobj.name,
1930  parentTbinfo->dobj.name);
1931  }
1932  else
1933  classname = fmtId(tbinfo->dobj.name);
1934 
1935  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
1936  classname);
1937 
1938  /* corner case for zero-column table */
1939  if (nfields == 0)
1940  {
1941  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
1942  }
1943  else
1944  {
1945  /* append the list of column names if required */
1946  if (dopt->column_inserts)
1947  {
1948  appendPQExpBufferChar(insertStmt, '(');
1949  for (field = 0; field < nfields; field++)
1950  {
1951  if (field > 0)
1952  appendPQExpBufferStr(insertStmt, ", ");
1953  appendPQExpBufferStr(insertStmt,
1954  fmtId(PQfname(res, field)));
1955  }
1956  appendPQExpBufferStr(insertStmt, ") ");
1957  }
1958 
1959  if (tbinfo->needs_override)
1960  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
1961 
1962  appendPQExpBufferStr(insertStmt, "VALUES (");
1963  }
1964  }
1965 
1966  archputs(insertStmt->data, fout);
1967 
1968  /* if it is zero-column table then we're done */
1969  if (nfields == 0)
1970  continue;
1971 
1972  for (field = 0; field < nfields; field++)
1973  {
1974  if (field > 0)
1975  archputs(", ", fout);
1976  if (PQgetisnull(res, tuple, field))
1977  {
1978  archputs("NULL", fout);
1979  continue;
1980  }
1981 
1982  /* XXX This code is partially duplicated in ruleutils.c */
1983  switch (PQftype(res, field))
1984  {
1985  case INT2OID:
1986  case INT4OID:
1987  case INT8OID:
1988  case OIDOID:
1989  case FLOAT4OID:
1990  case FLOAT8OID:
1991  case NUMERICOID:
1992  {
1993  /*
1994  * These types are printed without quotes unless
1995  * they contain values that aren't accepted by the
1996  * scanner unquoted (e.g., 'NaN'). Note that
1997  * strtod() and friends might accept NaN, so we
1998  * can't use that to test.
1999  *
2000  * In reality we only need to defend against
2001  * infinity and NaN, so we need not get too crazy
2002  * about pattern matching here.
2003  */
2004  const char *s = PQgetvalue(res, tuple, field);
2005 
2006  if (strspn(s, "0123456789 +-eE.") == strlen(s))
2007  archputs(s, fout);
2008  else
2009  archprintf(fout, "'%s'", s);
2010  }
2011  break;
2012 
2013  case BITOID:
2014  case VARBITOID:
2015  archprintf(fout, "B'%s'",
2016  PQgetvalue(res, tuple, field));
2017  break;
2018 
2019  case BOOLOID:
2020  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2021  archputs("true", fout);
2022  else
2023  archputs("false", fout);
2024  break;
2025 
2026  default:
2027  /* All other types are printed as string literals. */
2028  resetPQExpBuffer(q);
2030  PQgetvalue(res, tuple, field),
2031  fout);
2032  archputs(q->data, fout);
2033  break;
2034  }
2035  }
2036  archputs(");\n", fout);
2037  }
2038 
2039  if (PQntuples(res) <= 0)
2040  {
2041  PQclear(res);
2042  break;
2043  }
2044  PQclear(res);
2045  }
2046 
2047  archputs("\n\n", fout);
2048 
2049  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2050 
2051  destroyPQExpBuffer(q);
2052  if (insertStmt != NULL)
2053  destroyPQExpBuffer(insertStmt);
2054 
2055  return 1;
2056 }
2057 
2058 /*
2059  * getRootTableInfo:
2060  * get the root TableInfo for the given partition table.
2061  */
2062 static TableInfo *
2064 {
2065  TableInfo *parentTbinfo;
2066 
2067  Assert(tbinfo->ispartition);
2068  Assert(tbinfo->numParents == 1);
2069 
2070  parentTbinfo = tbinfo->parents[0];
2071  while (parentTbinfo->ispartition)
2072  {
2073  Assert(parentTbinfo->numParents == 1);
2074  parentTbinfo = parentTbinfo->parents[0];
2075  }
2076 
2077  return parentTbinfo;
2078 }
2079 
2080 /*
2081  * dumpTableData -
2082  * dump the contents of a single table
2083  *
2084  * Actually, this just makes an ArchiveEntry for the table contents.
2085  */
2086 static void
2088 {
2089  DumpOptions *dopt = fout->dopt;
2090  TableInfo *tbinfo = tdinfo->tdtable;
2091  PQExpBuffer copyBuf = createPQExpBuffer();
2092  PQExpBuffer clistBuf = createPQExpBuffer();
2093  DataDumperPtr dumpFn;
2094  char *copyStmt;
2095  const char *copyFrom;
2096 
2097  if (!dopt->dump_inserts)
2098  {
2099  /* Dump/restore using COPY */
2100  dumpFn = dumpTableData_copy;
2101 
2102  /*
2103  * When load-via-partition-root is set, get the root table name for
2104  * the partition table, so that we can reload data through the root
2105  * table.
2106  */
2107  if (dopt->load_via_partition_root && tbinfo->ispartition)
2108  {
2109  TableInfo *parentTbinfo;
2110 
2111  parentTbinfo = getRootTableInfo(tbinfo);
2112 
2113  /*
2114  * When we load data through the root, we will qualify the table
2115  * name, because search_path is set for the partition.
2116  */
2117  copyFrom = fmtQualifiedId(fout->remoteVersion,
2118  parentTbinfo->dobj.namespace->dobj.name,
2119  parentTbinfo->dobj.name);
2120  }
2121  else
2122  copyFrom = fmtId(tbinfo->dobj.name);
2123 
2124  /* must use 2 steps here 'cause fmtId is nonreentrant */
2125  appendPQExpBuffer(copyBuf, "COPY %s ",
2126  copyFrom);
2127  appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n",
2128  fmtCopyColumnList(tbinfo, clistBuf),
2129  (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
2130  copyStmt = copyBuf->data;
2131  }
2132  else
2133  {
2134  /* Restore using INSERT */
2135  dumpFn = dumpTableData_insert;
2136  copyStmt = NULL;
2137  }
2138 
2139  /*
2140  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2141  * dependency on its table as "special" and pass it to ArchiveEntry now.
2142  * See comments for BuildArchiveDependencies.
2143  */
2144  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2145  ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2146  tbinfo->dobj.name, tbinfo->dobj.namespace->dobj.name,
2147  NULL, tbinfo->rolname,
2148  false, "TABLE DATA", SECTION_DATA,
2149  "", "", copyStmt,
2150  &(tbinfo->dobj.dumpId), 1,
2151  dumpFn, tdinfo);
2152 
2153  destroyPQExpBuffer(copyBuf);
2154  destroyPQExpBuffer(clistBuf);
2155 }
2156 
2157 /*
2158  * refreshMatViewData -
2159  * load or refresh the contents of a single materialized view
2160  *
2161  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2162  * statement.
2163  */
2164 static void
2166 {
2167  TableInfo *tbinfo = tdinfo->tdtable;
2168  PQExpBuffer q;
2169 
2170  /* If the materialized view is not flagged as populated, skip this. */
2171  if (!tbinfo->relispopulated)
2172  return;
2173 
2174  q = createPQExpBuffer();
2175 
2176  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2177  fmtId(tbinfo->dobj.name));
2178 
2179  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2180  ArchiveEntry(fout,
2181  tdinfo->dobj.catId, /* catalog ID */
2182  tdinfo->dobj.dumpId, /* dump ID */
2183  tbinfo->dobj.name, /* Name */
2184  tbinfo->dobj.namespace->dobj.name, /* Namespace */
2185  NULL, /* Tablespace */
2186  tbinfo->rolname, /* Owner */
2187  false, /* with oids */
2188  "MATERIALIZED VIEW DATA", /* Desc */
2189  SECTION_POST_DATA, /* Section */
2190  q->data, /* Create */
2191  "", /* Del */
2192  NULL, /* Copy */
2193  tdinfo->dobj.dependencies, /* Deps */
2194  tdinfo->dobj.nDeps, /* # Deps */
2195  NULL, /* Dumper */
2196  NULL); /* Dumper Arg */
2197 
2198  destroyPQExpBuffer(q);
2199 }
2200 
2201 /*
2202  * getTableData -
2203  * set up dumpable objects representing the contents of tables
2204  */
2205 static void
2206 getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind)
2207 {
2208  int i;
2209 
2210  for (i = 0; i < numTables; i++)
2211  {
2212  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2213  (!relkind || tblinfo[i].relkind == relkind))
2214  makeTableDataInfo(dopt, &(tblinfo[i]), oids);
2215  }
2216 }
2217 
2218 /*
2219  * Make a dumpable object for the data of this specific table
2220  *
2221  * Note: we make a TableDataInfo if and only if we are going to dump the
2222  * table data; the "dump" flag in such objects isn't used.
2223  */
2224 static void
2225 makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids)
2226 {
2227  TableDataInfo *tdinfo;
2228 
2229  /*
2230  * Nothing to do if we already decided to dump the table. This will
2231  * happen for "config" tables.
2232  */
2233  if (tbinfo->dataObj != NULL)
2234  return;
2235 
2236  /* Skip VIEWs (no data to dump) */
2237  if (tbinfo->relkind == RELKIND_VIEW)
2238  return;
2239  /* Skip FOREIGN TABLEs (no data to dump) */
2240  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2241  return;
2242  /* Skip partitioned tables (data in partitions) */
2243  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2244  return;
2245 
2246  /* Don't dump data in unlogged tables, if so requested */
2247  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2248  dopt->no_unlogged_table_data)
2249  return;
2250 
2251  /* Check that the data is not explicitly excluded */
2252  if (simple_oid_list_member(&tabledata_exclude_oids,
2253  tbinfo->dobj.catId.oid))
2254  return;
2255 
2256  /* OK, let's dump it */
2257  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2258 
2259  if (tbinfo->relkind == RELKIND_MATVIEW)
2260  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2261  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2262  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2263  else
2264  tdinfo->dobj.objType = DO_TABLE_DATA;
2265 
2266  /*
2267  * Note: use tableoid 0 so that this object won't be mistaken for
2268  * something that pg_depend entries apply to.
2269  */
2270  tdinfo->dobj.catId.tableoid = 0;
2271  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2272  AssignDumpId(&tdinfo->dobj);
2273  tdinfo->dobj.name = tbinfo->dobj.name;
2274  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2275  tdinfo->tdtable = tbinfo;
2276  tdinfo->oids = oids;
2277  tdinfo->filtercond = NULL; /* might get set later */
2278  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2279 
2280  tbinfo->dataObj = tdinfo;
2281 }
2282 
2283 /*
2284  * The refresh for a materialized view must be dependent on the refresh for
2285  * any materialized view that this one is dependent on.
2286  *
2287  * This must be called after all the objects are created, but before they are
2288  * sorted.
2289  */
2290 static void
2292 {
2293  PQExpBuffer query;
2294  PGresult *res;
2295  int ntups,
2296  i;
2297  int i_classid,
2298  i_objid,
2299  i_refobjid;
2300 
2301  /* No Mat Views before 9.3. */
2302  if (fout->remoteVersion < 90300)
2303  return;
2304 
2305  /* Make sure we are in proper schema */
2306  selectSourceSchema(fout, "pg_catalog");
2307 
2308  query = createPQExpBuffer();
2309 
2310  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2311  "( "
2312  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2313  "FROM pg_depend d1 "
2314  "JOIN pg_class c1 ON c1.oid = d1.objid "
2315  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2316  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2317  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2318  "AND d2.objid = r1.oid "
2319  "AND d2.refobjid <> d1.objid "
2320  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2321  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2323  "WHERE d1.classid = 'pg_class'::regclass "
2324  "UNION "
2325  "SELECT w.objid, d3.refobjid, c3.relkind "
2326  "FROM w "
2327  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2328  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2329  "AND d3.objid = r3.oid "
2330  "AND d3.refobjid <> w.refobjid "
2331  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2332  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2334  ") "
2335  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2336  "FROM w "
2337  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2338 
2339  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2340 
2341  ntups = PQntuples(res);
2342 
2343  i_classid = PQfnumber(res, "classid");
2344  i_objid = PQfnumber(res, "objid");
2345  i_refobjid = PQfnumber(res, "refobjid");
2346 
2347  for (i = 0; i < ntups; i++)
2348  {
2349  CatalogId objId;
2350  CatalogId refobjId;
2351  DumpableObject *dobj;
2352  DumpableObject *refdobj;
2353  TableInfo *tbinfo;
2354  TableInfo *reftbinfo;
2355 
2356  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2357  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2358  refobjId.tableoid = objId.tableoid;
2359  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2360 
2361  dobj = findObjectByCatalogId(objId);
2362  if (dobj == NULL)
2363  continue;
2364 
2365  Assert(dobj->objType == DO_TABLE);
2366  tbinfo = (TableInfo *) dobj;
2367  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2368  dobj = (DumpableObject *) tbinfo->dataObj;
2369  if (dobj == NULL)
2370  continue;
2371  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2372 
2373  refdobj = findObjectByCatalogId(refobjId);
2374  if (refdobj == NULL)
2375  continue;
2376 
2377  Assert(refdobj->objType == DO_TABLE);
2378  reftbinfo = (TableInfo *) refdobj;
2379  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2380  refdobj = (DumpableObject *) reftbinfo->dataObj;
2381  if (refdobj == NULL)
2382  continue;
2383  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2384 
2385  addObjectDependency(dobj, refdobj->dumpId);
2386 
2387  if (!reftbinfo->relispopulated)
2388  tbinfo->relispopulated = false;
2389  }
2390 
2391  PQclear(res);
2392 
2393  destroyPQExpBuffer(query);
2394 }
2395 
2396 /*
2397  * getTableDataFKConstraints -
2398  * add dump-order dependencies reflecting foreign key constraints
2399  *
2400  * This code is executed only in a data-only dump --- in schema+data dumps
2401  * we handle foreign key issues by not creating the FK constraints until
2402  * after the data is loaded. In a data-only dump, however, we want to
2403  * order the table data objects in such a way that a table's referenced
2404  * tables are restored first. (In the presence of circular references or
2405  * self-references this may be impossible; we'll detect and complain about
2406  * that during the dependency sorting step.)
2407  */
2408 static void
2410 {
2411  DumpableObject **dobjs;
2412  int numObjs;
2413  int i;
2414 
2415  /* Search through all the dumpable objects for FK constraints */
2416  getDumpableObjects(&dobjs, &numObjs);
2417  for (i = 0; i < numObjs; i++)
2418  {
2419  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2420  {
2421  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2422  TableInfo *ftable;
2423 
2424  /* Not interesting unless both tables are to be dumped */
2425  if (cinfo->contable == NULL ||
2426  cinfo->contable->dataObj == NULL)
2427  continue;
2428  ftable = findTableByOid(cinfo->confrelid);
2429  if (ftable == NULL ||
2430  ftable->dataObj == NULL)
2431  continue;
2432 
2433  /*
2434  * Okay, make referencing table's TABLE_DATA object depend on the
2435  * referenced table's TABLE_DATA object.
2436  */
2438  ftable->dataObj->dobj.dumpId);
2439  }
2440  }
2441  free(dobjs);
2442 }
2443 
2444 
2445 /*
2446  * guessConstraintInheritance:
2447  * In pre-8.4 databases, we can't tell for certain which constraints
2448  * are inherited. We assume a CHECK constraint is inherited if its name
2449  * matches the name of any constraint in the parent. Originally this code
2450  * tried to compare the expression texts, but that can fail for various
2451  * reasons --- for example, if the parent and child tables are in different
2452  * schemas, reverse-listing of function calls may produce different text
2453  * (schema-qualified or not) depending on search path.
2454  *
2455  * In 8.4 and up we can rely on the conislocal field to decide which
2456  * constraints must be dumped; much safer.
2457  *
2458  * This function assumes all conislocal flags were initialized to TRUE.
2459  * It clears the flag on anything that seems to be inherited.
2460  */
2461 static void
2463 {
2464  int i,
2465  j,
2466  k;
2467 
2468  for (i = 0; i < numTables; i++)
2469  {
2470  TableInfo *tbinfo = &(tblinfo[i]);
2471  int numParents;
2472  TableInfo **parents;
2473  TableInfo *parent;
2474 
2475  /* Sequences and views never have parents */
2476  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2477  tbinfo->relkind == RELKIND_VIEW)
2478  continue;
2479 
2480  /* Don't bother computing anything for non-target tables, either */
2481  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2482  continue;
2483 
2484  numParents = tbinfo->numParents;
2485  parents = tbinfo->parents;
2486 
2487  if (numParents == 0)
2488  continue; /* nothing to see here, move along */
2489 
2490  /* scan for inherited CHECK constraints */
2491  for (j = 0; j < tbinfo->ncheck; j++)
2492  {
2493  ConstraintInfo *constr;
2494 
2495  constr = &(tbinfo->checkexprs[j]);
2496 
2497  for (k = 0; k < numParents; k++)
2498  {
2499  int l;
2500 
2501  parent = parents[k];
2502  for (l = 0; l < parent->ncheck; l++)
2503  {
2504  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2505 
2506  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2507  {
2508  constr->conislocal = false;
2509  break;
2510  }
2511  }
2512  if (!constr->conislocal)
2513  break;
2514  }
2515  }
2516  }
2517 }
2518 
2519 
2520 /*
2521  * dumpDatabase:
2522  * dump the database definition
2523  */
2524 static void
2526 {
2527  DumpOptions *dopt = fout->dopt;
2528  PQExpBuffer dbQry = createPQExpBuffer();
2529  PQExpBuffer delQry = createPQExpBuffer();
2530  PQExpBuffer creaQry = createPQExpBuffer();
2531  PGconn *conn = GetConnection(fout);
2532  PGresult *res;
2533  int i_tableoid,
2534  i_oid,
2535  i_dba,
2536  i_encoding,
2537  i_collate,
2538  i_ctype,
2539  i_frozenxid,
2540  i_minmxid,
2541  i_tablespace;
2542  CatalogId dbCatId;
2543  DumpId dbDumpId;
2544  const char *datname,
2545  *dba,
2546  *encoding,
2547  *collate,
2548  *ctype,
2549  *tablespace;
2550  uint32 frozenxid,
2551  minmxid;
2552 
2553  datname = PQdb(conn);
2554 
2555  if (g_verbose)
2556  write_msg(NULL, "saving database definition\n");
2557 
2558  /* Make sure we are in proper schema */
2559  selectSourceSchema(fout, "pg_catalog");
2560 
2561  /* Get the database owner and parameters from pg_database */
2562  if (fout->remoteVersion >= 90300)
2563  {
2564  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2565  "(%s datdba) AS dba, "
2566  "pg_encoding_to_char(encoding) AS encoding, "
2567  "datcollate, datctype, datfrozenxid, datminmxid, "
2568  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2569  "shobj_description(oid, 'pg_database') AS description "
2570 
2571  "FROM pg_database "
2572  "WHERE datname = ",
2574  appendStringLiteralAH(dbQry, datname, fout);
2575  }
2576  else if (fout->remoteVersion >= 80400)
2577  {
2578  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2579  "(%s datdba) AS dba, "
2580  "pg_encoding_to_char(encoding) AS encoding, "
2581  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2582  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2583  "shobj_description(oid, 'pg_database') AS description "
2584 
2585  "FROM pg_database "
2586  "WHERE datname = ",
2588  appendStringLiteralAH(dbQry, datname, fout);
2589  }
2590  else if (fout->remoteVersion >= 80200)
2591  {
2592  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2593  "(%s datdba) AS dba, "
2594  "pg_encoding_to_char(encoding) AS encoding, "
2595  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2596  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2597  "shobj_description(oid, 'pg_database') AS description "
2598 
2599  "FROM pg_database "
2600  "WHERE datname = ",
2602  appendStringLiteralAH(dbQry, datname, fout);
2603  }
2604  else
2605  {
2606  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2607  "(%s datdba) AS dba, "
2608  "pg_encoding_to_char(encoding) AS encoding, "
2609  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2610  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2611  "FROM pg_database "
2612  "WHERE datname = ",
2614  appendStringLiteralAH(dbQry, datname, fout);
2615  }
2616 
2617  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2618 
2619  i_tableoid = PQfnumber(res, "tableoid");
2620  i_oid = PQfnumber(res, "oid");
2621  i_dba = PQfnumber(res, "dba");
2622  i_encoding = PQfnumber(res, "encoding");
2623  i_collate = PQfnumber(res, "datcollate");
2624  i_ctype = PQfnumber(res, "datctype");
2625  i_frozenxid = PQfnumber(res, "datfrozenxid");
2626  i_minmxid = PQfnumber(res, "datminmxid");
2627  i_tablespace = PQfnumber(res, "tablespace");
2628 
2629  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2630  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2631  dba = PQgetvalue(res, 0, i_dba);
2632  encoding = PQgetvalue(res, 0, i_encoding);
2633  collate = PQgetvalue(res, 0, i_collate);
2634  ctype = PQgetvalue(res, 0, i_ctype);
2635  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2636  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2637  tablespace = PQgetvalue(res, 0, i_tablespace);
2638 
2639  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2640  fmtId(datname));
2641  if (strlen(encoding) > 0)
2642  {
2643  appendPQExpBufferStr(creaQry, " ENCODING = ");
2644  appendStringLiteralAH(creaQry, encoding, fout);
2645  }
2646  if (strlen(collate) > 0)
2647  {
2648  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2649  appendStringLiteralAH(creaQry, collate, fout);
2650  }
2651  if (strlen(ctype) > 0)
2652  {
2653  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2654  appendStringLiteralAH(creaQry, ctype, fout);
2655  }
2656  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2657  !dopt->outputNoTablespaces)
2658  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2659  fmtId(tablespace));
2660  appendPQExpBufferStr(creaQry, ";\n");
2661 
2662  if (dopt->binary_upgrade)
2663  {
2664  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
2665  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
2666  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
2667  "WHERE datname = ",
2668  frozenxid, minmxid);
2669  appendStringLiteralAH(creaQry, datname, fout);
2670  appendPQExpBufferStr(creaQry, ";\n");
2671 
2672  }
2673 
2674  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2675  fmtId(datname));
2676 
2677  dbDumpId = createDumpId();
2678 
2679  ArchiveEntry(fout,
2680  dbCatId, /* catalog ID */
2681  dbDumpId, /* dump ID */
2682  datname, /* Name */
2683  NULL, /* Namespace */
2684  NULL, /* Tablespace */
2685  dba, /* Owner */
2686  false, /* with oids */
2687  "DATABASE", /* Desc */
2688  SECTION_PRE_DATA, /* Section */
2689  creaQry->data, /* Create */
2690  delQry->data, /* Del */
2691  NULL, /* Copy */
2692  NULL, /* Deps */
2693  0, /* # Deps */
2694  NULL, /* Dumper */
2695  NULL); /* Dumper Arg */
2696 
2697  /*
2698  * pg_largeobject and pg_largeobject_metadata come from the old system
2699  * intact, so set their relfrozenxids and relminmxids.
2700  */
2701  if (dopt->binary_upgrade)
2702  {
2703  PGresult *lo_res;
2704  PQExpBuffer loFrozenQry = createPQExpBuffer();
2705  PQExpBuffer loOutQry = createPQExpBuffer();
2706  int i_relfrozenxid,
2707  i_relminmxid;
2708 
2709  /*
2710  * pg_largeobject
2711  */
2712  if (fout->remoteVersion >= 90300)
2713  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2714  "FROM pg_catalog.pg_class\n"
2715  "WHERE oid = %u;\n",
2717  else
2718  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2719  "FROM pg_catalog.pg_class\n"
2720  "WHERE oid = %u;\n",
2722 
2723  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2724 
2725  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2726  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2727 
2728  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
2729  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2730  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2731  "WHERE oid = %u;\n",
2732  atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2733  atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
2735  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2736  "pg_largeobject", NULL, NULL, "",
2737  false, "pg_largeobject", SECTION_PRE_DATA,
2738  loOutQry->data, "", NULL,
2739  NULL, 0,
2740  NULL, NULL);
2741 
2742  PQclear(lo_res);
2743 
2744  /*
2745  * pg_largeobject_metadata
2746  */
2747  if (fout->remoteVersion >= 90000)
2748  {
2749  resetPQExpBuffer(loFrozenQry);
2750  resetPQExpBuffer(loOutQry);
2751 
2752  if (fout->remoteVersion >= 90300)
2753  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2754  "FROM pg_catalog.pg_class\n"
2755  "WHERE oid = %u;\n",
2757  else
2758  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2759  "FROM pg_catalog.pg_class\n"
2760  "WHERE oid = %u;\n",
2762 
2763  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2764 
2765  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2766  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2767 
2768  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
2769  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2770  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2771  "WHERE oid = %u;\n",
2772  atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2773  atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
2775  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2776  "pg_largeobject_metadata", NULL, NULL, "",
2777  false, "pg_largeobject_metadata", SECTION_PRE_DATA,
2778  loOutQry->data, "", NULL,
2779  NULL, 0,
2780  NULL, NULL);
2781 
2782  PQclear(lo_res);
2783  }
2784 
2785  destroyPQExpBuffer(loFrozenQry);
2786  destroyPQExpBuffer(loOutQry);
2787  }
2788 
2789  /* Dump DB comment if any */
2790  if (fout->remoteVersion >= 80200)
2791  {
2792  /*
2793  * 8.2 keeps comments on shared objects in a shared table, so we
2794  * cannot use the dumpComment used for other database objects.
2795  */
2796  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2797 
2798  if (comment && strlen(comment))
2799  {
2800  resetPQExpBuffer(dbQry);
2801 
2802  /*
2803  * Generates warning when loaded into a differently-named
2804  * database.
2805  */
2806  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", fmtId(datname));
2807  appendStringLiteralAH(dbQry, comment, fout);
2808  appendPQExpBufferStr(dbQry, ";\n");
2809 
2810  ArchiveEntry(fout, dbCatId, createDumpId(), datname, NULL, NULL,
2811  dba, false, "COMMENT", SECTION_NONE,
2812  dbQry->data, "", NULL,
2813  &dbDumpId, 1, NULL, NULL);
2814  }
2815  }
2816  else
2817  {
2818  resetPQExpBuffer(dbQry);
2819  appendPQExpBuffer(dbQry, "DATABASE %s", fmtId(datname));
2820  dumpComment(fout, dbQry->data, NULL, "",
2821  dbCatId, 0, dbDumpId);
2822  }
2823 
2824  /* Dump shared security label. */
2825  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2826  {
2827  PGresult *shres;
2828  PQExpBuffer seclabelQry;
2829 
2830  seclabelQry = createPQExpBuffer();
2831 
2832  buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry);
2833  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2834  resetPQExpBuffer(seclabelQry);
2835  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2836  if (strlen(seclabelQry->data))
2837  ArchiveEntry(fout, dbCatId, createDumpId(), datname, NULL, NULL,
2838  dba, false, "SECURITY LABEL", SECTION_NONE,
2839  seclabelQry->data, "", NULL,
2840  &dbDumpId, 1, NULL, NULL);
2841  destroyPQExpBuffer(seclabelQry);
2842  PQclear(shres);
2843  }
2844 
2845  PQclear(res);
2846 
2847  destroyPQExpBuffer(dbQry);
2848  destroyPQExpBuffer(delQry);
2849  destroyPQExpBuffer(creaQry);
2850 }
2851 
2852 /*
2853  * dumpEncoding: put the correct encoding into the archive
2854  */
2855 static void
2857 {
2858  const char *encname = pg_encoding_to_char(AH->encoding);
2860 
2861  if (g_verbose)
2862  write_msg(NULL, "saving encoding = %s\n", encname);
2863 
2864  appendPQExpBufferStr(qry, "SET client_encoding = ");
2865  appendStringLiteralAH(qry, encname, AH);
2866  appendPQExpBufferStr(qry, ";\n");
2867 
2868  ArchiveEntry(AH, nilCatalogId, createDumpId(),
2869  "ENCODING", NULL, NULL, "",
2870  false, "ENCODING", SECTION_PRE_DATA,
2871  qry->data, "", NULL,
2872  NULL, 0,
2873  NULL, NULL);
2874 
2875  destroyPQExpBuffer(qry);
2876 }
2877 
2878 
2879 /*
2880  * dumpStdStrings: put the correct escape string behavior into the archive
2881  */
2882 static void
2884 {
2885  const char *stdstrings = AH->std_strings ? "on" : "off";
2887 
2888  if (g_verbose)
2889  write_msg(NULL, "saving standard_conforming_strings = %s\n",
2890  stdstrings);
2891 
2892  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
2893  stdstrings);
2894 
2895  ArchiveEntry(AH, nilCatalogId, createDumpId(),
2896  "STDSTRINGS", NULL, NULL, "",
2897  false, "STDSTRINGS", SECTION_PRE_DATA,
2898  qry->data, "", NULL,
2899  NULL, 0,
2900  NULL, NULL);
2901 
2902  destroyPQExpBuffer(qry);
2903 }
2904 
2905 
2906 /*
2907  * getBlobs:
2908  * Collect schema-level data about large objects
2909  */
2910 static void
2912 {
2913  DumpOptions *dopt = fout->dopt;
2914  PQExpBuffer blobQry = createPQExpBuffer();
2915  BlobInfo *binfo;
2916  DumpableObject *bdata;
2917  PGresult *res;
2918  int ntups;
2919  int i;
2920  int i_oid;
2921  int i_lomowner;
2922  int i_lomacl;
2923  int i_rlomacl;
2924  int i_initlomacl;
2925  int i_initrlomacl;
2926 
2927  /* Verbose message */
2928  if (g_verbose)
2929  write_msg(NULL, "reading large objects\n");
2930 
2931  /* Make sure we are in proper schema */
2932  selectSourceSchema(fout, "pg_catalog");
2933 
2934  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
2935  if (fout->remoteVersion >= 90600)
2936  {
2937  PQExpBuffer acl_subquery = createPQExpBuffer();
2938  PQExpBuffer racl_subquery = createPQExpBuffer();
2939  PQExpBuffer init_acl_subquery = createPQExpBuffer();
2940  PQExpBuffer init_racl_subquery = createPQExpBuffer();
2941 
2942  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
2943  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
2944  dopt->binary_upgrade);
2945 
2946  appendPQExpBuffer(blobQry,
2947  "SELECT l.oid, (%s l.lomowner) AS rolname, "
2948  "%s AS lomacl, "
2949  "%s AS rlomacl, "
2950  "%s AS initlomacl, "
2951  "%s AS initrlomacl "
2952  "FROM pg_largeobject_metadata l "
2953  "LEFT JOIN pg_init_privs pip ON "
2954  "(l.oid = pip.objoid "
2955  "AND pip.classoid = 'pg_largeobject'::regclass "
2956  "AND pip.objsubid = 0) ",
2958  acl_subquery->data,
2959  racl_subquery->data,
2960  init_acl_subquery->data,
2961  init_racl_subquery->data);
2962 
2963  destroyPQExpBuffer(acl_subquery);
2964  destroyPQExpBuffer(racl_subquery);
2965  destroyPQExpBuffer(init_acl_subquery);
2966  destroyPQExpBuffer(init_racl_subquery);
2967  }
2968  else if (fout->remoteVersion >= 90000)
2969  appendPQExpBuffer(blobQry,
2970  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
2971  "NULL AS rlomacl, NULL AS initlomacl, "
2972  "NULL AS initrlomacl "
2973  " FROM pg_largeobject_metadata",
2975  else
2976  appendPQExpBufferStr(blobQry,
2977  "SELECT DISTINCT loid AS oid, "
2978  "NULL::name AS rolname, NULL::oid AS lomacl, "
2979  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
2980  "NULL::oid AS initrlomacl "
2981  " FROM pg_largeobject");
2982 
2983  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
2984 
2985  i_oid = PQfnumber(res, "oid");
2986  i_lomowner = PQfnumber(res, "rolname");
2987  i_lomacl = PQfnumber(res, "lomacl");
2988  i_rlomacl = PQfnumber(res, "rlomacl");
2989  i_initlomacl = PQfnumber(res, "initlomacl");
2990  i_initrlomacl = PQfnumber(res, "initrlomacl");
2991 
2992  ntups = PQntuples(res);
2993 
2994  /*
2995  * Each large object has its own BLOB archive entry.
2996  */
2997  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
2998 
2999  for (i = 0; i < ntups; i++)
3000  {
3001  binfo[i].dobj.objType = DO_BLOB;
3003  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3004  AssignDumpId(&binfo[i].dobj);
3005 
3006  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
3007  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
3008  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
3009  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
3010  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
3011  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
3012 
3013  if (PQgetisnull(res, i, i_lomacl) &&
3014  PQgetisnull(res, i, i_rlomacl) &&
3015  PQgetisnull(res, i, i_initlomacl) &&
3016  PQgetisnull(res, i, i_initrlomacl))
3017  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
3018 
3019  /*
3020  * In binary-upgrade mode for blobs, we do *not* dump out the data or
3021  * the ACLs, should any exist. The data and ACL (if any) will be
3022  * copied by pg_upgrade, which simply copies the pg_largeobject and
3023  * pg_largeobject_metadata tables.
3024  *
3025  * We *do* dump out the definition of the blob because we need that to
3026  * make the restoration of the comments, and anything else, work since
3027  * pg_upgrade copies the files behind pg_largeobject and
3028  * pg_largeobject_metadata after the dump is restored.
3029  */
3030  if (dopt->binary_upgrade)
3032  }
3033 
3034  /*
3035  * If we have any large objects, a "BLOBS" archive entry is needed. This
3036  * is just a placeholder for sorting; it carries no data now.
3037  */
3038  if (ntups > 0)
3039  {
3040  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
3041  bdata->objType = DO_BLOB_DATA;
3042  bdata->catId = nilCatalogId;
3043  AssignDumpId(bdata);
3044  bdata->name = pg_strdup("BLOBS");
3045  }
3046 
3047  PQclear(res);
3048  destroyPQExpBuffer(blobQry);
3049 }
3050 
3051 /*
3052  * dumpBlob
3053  *
3054  * dump the definition (metadata) of the given large object
3055  */
3056 static void
3057 dumpBlob(Archive *fout, BlobInfo *binfo)
3058 {
3059  PQExpBuffer cquery = createPQExpBuffer();
3060  PQExpBuffer dquery = createPQExpBuffer();
3061 
3062  appendPQExpBuffer(cquery,
3063  "SELECT pg_catalog.lo_create('%s');\n",
3064  binfo->dobj.name);
3065 
3066  appendPQExpBuffer(dquery,
3067  "SELECT pg_catalog.lo_unlink('%s');\n",
3068  binfo->dobj.name);
3069 
3070  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
3071  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
3072  binfo->dobj.name,
3073  NULL, NULL,
3074  binfo->rolname, false,
3075  "BLOB", SECTION_PRE_DATA,
3076  cquery->data, dquery->data, NULL,
3077  NULL, 0,
3078  NULL, NULL);
3079 
3080  /* set up tag for comment and/or ACL */
3081  resetPQExpBuffer(cquery);
3082  appendPQExpBuffer(cquery, "LARGE OBJECT %s", binfo->dobj.name);
3083 
3084  /* Dump comment if any */
3085  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3086  dumpComment(fout, cquery->data,
3087  NULL, binfo->rolname,
3088  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3089 
3090  /* Dump security label if any */
3091  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3092  dumpSecLabel(fout, cquery->data,
3093  NULL, binfo->rolname,
3094  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3095 
3096  /* Dump ACL if any */
3097  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3098  dumpACL(fout, binfo->dobj.catId, binfo->dobj.dumpId, "LARGE OBJECT",
3099  binfo->dobj.name, NULL, cquery->data,
3100  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3101  binfo->initblobacl, binfo->initrblobacl);
3102 
3103  destroyPQExpBuffer(cquery);
3104  destroyPQExpBuffer(dquery);
3105 }
3106 
3107 /*
3108  * dumpBlobs:
3109  * dump the data contents of all large objects
3110  */
3111 static int
3112 dumpBlobs(Archive *fout, void *arg)
3113 {
3114  const char *blobQry;
3115  const char *blobFetchQry;
3116  PGconn *conn = GetConnection(fout);
3117  PGresult *res;
3118  char buf[LOBBUFSIZE];
3119  int ntups;
3120  int i;
3121  int cnt;
3122 
3123  if (g_verbose)
3124  write_msg(NULL, "saving large objects\n");
3125 
3126  /* Make sure we are in proper schema */
3127  selectSourceSchema(fout, "pg_catalog");
3128 
3129  /*
3130  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3131  * the already-in-memory dumpable objects instead...
3132  */
3133  if (fout->remoteVersion >= 90000)
3134  blobQry = "DECLARE bloboid CURSOR FOR SELECT oid FROM pg_largeobject_metadata";
3135  else
3136  blobQry = "DECLARE bloboid CURSOR FOR SELECT DISTINCT loid FROM pg_largeobject";
3137 
3138  ExecuteSqlStatement(fout, blobQry);
3139 
3140  /* Command to fetch from cursor */
3141  blobFetchQry = "FETCH 1000 IN bloboid";
3142 
3143  do
3144  {
3145  /* Do a fetch */
3146  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3147 
3148  /* Process the tuples, if any */
3149  ntups = PQntuples(res);
3150  for (i = 0; i < ntups; i++)
3151  {
3152  Oid blobOid;
3153  int loFd;
3154 
3155  blobOid = atooid(PQgetvalue(res, i, 0));
3156  /* Open the BLOB */
3157  loFd = lo_open(conn, blobOid, INV_READ);
3158  if (loFd == -1)
3159  exit_horribly(NULL, "could not open large object %u: %s",
3160  blobOid, PQerrorMessage(conn));
3161 
3162  StartBlob(fout, blobOid);
3163 
3164  /* Now read it in chunks, sending data to archive */
3165  do
3166  {
3167  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3168  if (cnt < 0)
3169  exit_horribly(NULL, "error reading large object %u: %s",
3170  blobOid, PQerrorMessage(conn));
3171 
3172  WriteData(fout, buf, cnt);
3173  } while (cnt > 0);
3174 
3175  lo_close(conn, loFd);
3176 
3177  EndBlob(fout, blobOid);
3178  }
3179 
3180  PQclear(res);
3181  } while (ntups > 0);
3182 
3183  return 1;
3184 }
3185 
3186 /*
3187  * getPolicies
3188  * get information about policies on a dumpable table.
3189  */
3190 void
3191 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3192 {
3193  PQExpBuffer query;
3194  PGresult *res;
3195  PolicyInfo *polinfo;
3196  int i_oid;
3197  int i_tableoid;
3198  int i_polname;
3199  int i_polcmd;
3200  int i_polpermissive;
3201  int i_polroles;
3202  int i_polqual;
3203  int i_polwithcheck;
3204  int i,
3205  j,
3206  ntups;
3207 
3208  if (fout->remoteVersion < 90500)
3209  return;
3210 
3211  query = createPQExpBuffer();
3212 
3213  for (i = 0; i < numTables; i++)
3214  {
3215  TableInfo *tbinfo = &tblinfo[i];
3216 
3217  /* Ignore row security on tables not to be dumped */
3218  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3219  continue;
3220 
3221  if (g_verbose)
3222  write_msg(NULL, "reading row security enabled for table \"%s.%s\"\n",
3223  tbinfo->dobj.namespace->dobj.name,
3224  tbinfo->dobj.name);
3225 
3226  /*
3227  * Get row security enabled information for the table. We represent
3228  * RLS enabled on a table by creating PolicyInfo object with an empty
3229  * policy.
3230  */
3231  if (tbinfo->rowsec)
3232  {
3233  /*
3234  * Note: use tableoid 0 so that this object won't be mistaken for
3235  * something that pg_depend entries apply to.
3236  */
3237  polinfo = pg_malloc(sizeof(PolicyInfo));
3238  polinfo->dobj.objType = DO_POLICY;
3239  polinfo->dobj.catId.tableoid = 0;
3240  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3241  AssignDumpId(&polinfo->dobj);
3242  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3243  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3244  polinfo->poltable = tbinfo;
3245  polinfo->polname = NULL;
3246  polinfo->polcmd = '\0';
3247  polinfo->polpermissive = 0;
3248  polinfo->polroles = NULL;
3249  polinfo->polqual = NULL;
3250  polinfo->polwithcheck = NULL;
3251  }
3252 
3253  if (g_verbose)
3254  write_msg(NULL, "reading policies for table \"%s.%s\"\n",
3255  tbinfo->dobj.namespace->dobj.name,
3256  tbinfo->dobj.name);
3257 
3258  /*
3259  * select table schema to ensure regproc name is qualified if needed
3260  */
3261  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
3262 
3263  resetPQExpBuffer(query);
3264 
3265  /* Get the policies for the table. */
3266  if (fout->remoteVersion >= 100000)
3267  appendPQExpBuffer(query,
3268  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3269  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3270  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3271  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3272  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3273  "FROM pg_catalog.pg_policy pol "
3274  "WHERE polrelid = '%u'",
3275  tbinfo->dobj.catId.oid);
3276  else
3277  appendPQExpBuffer(query,
3278  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3279  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3280  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3281  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3282  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3283  "FROM pg_catalog.pg_policy pol "
3284  "WHERE polrelid = '%u'",
3285  tbinfo->dobj.catId.oid);
3286  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3287 
3288  ntups = PQntuples(res);
3289 
3290  if (ntups == 0)
3291  {
3292  /*
3293  * No explicit policies to handle (only the default-deny policy,
3294  * which is handled as part of the table definition). Clean up
3295  * and return.
3296  */
3297  PQclear(res);
3298  continue;
3299  }
3300 
3301  i_oid = PQfnumber(res, "oid");
3302  i_tableoid = PQfnumber(res, "tableoid");
3303  i_polname = PQfnumber(res, "polname");
3304  i_polcmd = PQfnumber(res, "polcmd");
3305  i_polpermissive = PQfnumber(res, "polpermissive");
3306  i_polroles = PQfnumber(res, "polroles");
3307  i_polqual = PQfnumber(res, "polqual");
3308  i_polwithcheck = PQfnumber(res, "polwithcheck");
3309 
3310  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3311 
3312  for (j = 0; j < ntups; j++)
3313  {
3314  polinfo[j].dobj.objType = DO_POLICY;
3315  polinfo[j].dobj.catId.tableoid =
3316  atooid(PQgetvalue(res, j, i_tableoid));
3317  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3318  AssignDumpId(&polinfo[j].dobj);
3319  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3320  polinfo[j].poltable = tbinfo;
3321  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3322  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3323 
3324  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3325  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3326 
3327  if (PQgetisnull(res, j, i_polroles))
3328  polinfo[j].polroles = NULL;
3329  else
3330  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3331 
3332  if (PQgetisnull(res, j, i_polqual))
3333  polinfo[j].polqual = NULL;
3334  else
3335  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3336 
3337  if (PQgetisnull(res, j, i_polwithcheck))
3338  polinfo[j].polwithcheck = NULL;
3339  else
3340  polinfo[j].polwithcheck
3341  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3342  }
3343  PQclear(res);
3344  }
3345  destroyPQExpBuffer(query);
3346 }
3347 
3348 /*
3349  * dumpPolicy
3350  * dump the definition of the given policy
3351  */
3352 static void
3354 {
3355  DumpOptions *dopt = fout->dopt;
3356  TableInfo *tbinfo = polinfo->poltable;
3357  PQExpBuffer query;
3358  PQExpBuffer delqry;
3359  const char *cmd;
3360  char *tag;
3361 
3362  if (dopt->dataOnly)
3363  return;
3364 
3365  /*
3366  * If polname is NULL, then this record is just indicating that ROW LEVEL
3367  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3368  * ROW LEVEL SECURITY.
3369  */
3370  if (polinfo->polname == NULL)
3371  {
3372  query = createPQExpBuffer();
3373 
3374  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3375  fmtId(polinfo->dobj.name));
3376 
3377  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3378  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3379  polinfo->dobj.name,
3380  polinfo->dobj.namespace->dobj.name,
3381  NULL,
3382  tbinfo->rolname, false,
3383  "ROW SECURITY", SECTION_POST_DATA,
3384  query->data, "", NULL,
3385  NULL, 0,
3386  NULL, NULL);
3387 
3388  destroyPQExpBuffer(query);
3389  return;
3390  }
3391 
3392  if (polinfo->polcmd == '*')
3393  cmd = "";
3394  else if (polinfo->polcmd == 'r')
3395  cmd = " FOR SELECT";
3396  else if (polinfo->polcmd == 'a')
3397  cmd = " FOR INSERT";
3398  else if (polinfo->polcmd == 'w')
3399  cmd = " FOR UPDATE";
3400  else if (polinfo->polcmd == 'd')
3401  cmd = " FOR DELETE";
3402  else
3403  {
3404  write_msg(NULL, "unexpected policy command type: %c\n",
3405  polinfo->polcmd);
3406  exit_nicely(1);
3407  }
3408 
3409  query = createPQExpBuffer();
3410  delqry = createPQExpBuffer();
3411 
3412  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3413 
3414  appendPQExpBuffer(query, " ON %s%s%s", fmtId(tbinfo->dobj.name),
3415  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3416 
3417  if (polinfo->polroles != NULL)
3418  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3419 
3420  if (polinfo->polqual != NULL)
3421  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3422 
3423  if (polinfo->polwithcheck != NULL)
3424  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3425 
3426  appendPQExpBuffer(query, ";\n");
3427 
3428  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3429  appendPQExpBuffer(delqry, " ON %s;\n", fmtId(tbinfo->dobj.name));
3430 
3431  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3432 
3433  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3434  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3435  tag,
3436  polinfo->dobj.namespace->dobj.name,
3437  NULL,
3438  tbinfo->rolname, false,
3439  "POLICY", SECTION_POST_DATA,
3440  query->data, delqry->data, NULL,
3441  NULL, 0,
3442  NULL, NULL);
3443 
3444  free(tag);
3445  destroyPQExpBuffer(query);
3446  destroyPQExpBuffer(delqry);
3447 }
3448 
3449 /*
3450  * getPublications
3451  * get information about publications
3452  */
3453 void
3455 {
3456  DumpOptions *dopt = fout->dopt;
3457  PQExpBuffer query;
3458  PGresult *res;
3459  PublicationInfo *pubinfo;
3460  int i_tableoid;
3461  int i_oid;
3462  int i_pubname;
3463  int i_rolname;
3464  int i_puballtables;
3465  int i_pubinsert;
3466  int i_pubupdate;
3467  int i_pubdelete;
3468  int i,
3469  ntups;
3470 
3471  if (dopt->no_publications || fout->remoteVersion < 100000)
3472  return;
3473 
3474  query = createPQExpBuffer();
3475 
3476  resetPQExpBuffer(query);
3477 
3478  /* Get the publications. */
3479  appendPQExpBuffer(query,
3480  "SELECT p.tableoid, p.oid, p.pubname, "
3481  "(%s p.pubowner) AS rolname, "
3482  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete "
3483  "FROM pg_catalog.pg_publication p",
3485 
3486  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3487 
3488  ntups = PQntuples(res);
3489 
3490  i_tableoid = PQfnumber(res, "tableoid");
3491  i_oid = PQfnumber(res, "oid");
3492  i_pubname = PQfnumber(res, "pubname");
3493  i_rolname = PQfnumber(res, "rolname");
3494  i_puballtables = PQfnumber(res, "puballtables");
3495  i_pubinsert = PQfnumber(res, "pubinsert");
3496  i_pubupdate = PQfnumber(res, "pubupdate");
3497  i_pubdelete = PQfnumber(res, "pubdelete");
3498 
3499  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3500 
3501  for (i = 0; i < ntups; i++)
3502  {
3503  pubinfo[i].dobj.objType = DO_PUBLICATION;
3504  pubinfo[i].dobj.catId.tableoid =
3505  atooid(PQgetvalue(res, i, i_tableoid));
3506  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3507  AssignDumpId(&pubinfo[i].dobj);
3508  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3509  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3510  pubinfo[i].puballtables =
3511  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3512  pubinfo[i].pubinsert =
3513  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3514  pubinfo[i].pubupdate =
3515  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3516  pubinfo[i].pubdelete =
3517  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3518 
3519  if (strlen(pubinfo[i].rolname) == 0)
3520  write_msg(NULL, "WARNING: owner of publication \"%s\" appears to be invalid\n",
3521  pubinfo[i].dobj.name);
3522 
3523  /* Decide whether we want to dump it */
3524  selectDumpableObject(&(pubinfo[i].dobj), fout);
3525  }
3526  PQclear(res);
3527 
3528  destroyPQExpBuffer(query);
3529 }
3530 
3531 /*
3532  * dumpPublication
3533  * dump the definition of the given publication
3534  */
3535 static void
3537 {
3538  PQExpBuffer delq;
3539  PQExpBuffer query;
3540  PQExpBuffer labelq;
3541  bool first = true;
3542 
3543  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3544  return;
3545 
3546  delq = createPQExpBuffer();
3547  query = createPQExpBuffer();
3548  labelq = createPQExpBuffer();
3549 
3550  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3551  fmtId(pubinfo->dobj.name));
3552 
3553  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3554  fmtId(pubinfo->dobj.name));
3555 
3556  appendPQExpBuffer(labelq, "PUBLICATION %s", fmtId(pubinfo->dobj.name));
3557 
3558  if (pubinfo->puballtables)
3559  appendPQExpBufferStr(query, " FOR ALL TABLES");
3560 
3561  appendPQExpBufferStr(query, " WITH (publish = '");
3562  if (pubinfo->pubinsert)
3563  {
3564  appendPQExpBufferStr(query, "insert");
3565  first = false;
3566  }
3567 
3568  if (pubinfo->pubupdate)
3569  {
3570  if (!first)
3571  appendPQExpBufferStr(query, ", ");
3572 
3573  appendPQExpBufferStr(query, "update");
3574  first = false;
3575  }
3576 
3577  if (pubinfo->pubdelete)
3578  {
3579  if (!first)
3580  appendPQExpBufferStr(query, ", ");
3581 
3582  appendPQExpBufferStr(query, "delete");
3583  first = false;
3584  }
3585 
3586  appendPQExpBufferStr(query, "');\n");
3587 
3588  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
3589  pubinfo->dobj.name,
3590  NULL,
3591  NULL,
3592  pubinfo->rolname, false,
3593  "PUBLICATION", SECTION_POST_DATA,
3594  query->data, delq->data, NULL,
3595  NULL, 0,
3596  NULL, NULL);
3597 
3598  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3599  dumpComment(fout, labelq->data,
3600  NULL, pubinfo->rolname,
3601  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3602 
3603  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3604  dumpSecLabel(fout, labelq->data,
3605  NULL, pubinfo->rolname,
3606  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3607 
3608  destroyPQExpBuffer(delq);
3609  destroyPQExpBuffer(query);
3610 }
3611 
3612 /*
3613  * getPublicationTables
3614  * get information about publication membership for dumpable tables.
3615  */
3616 void
3618 {
3619  PQExpBuffer query;
3620  PGresult *res;
3621  PublicationRelInfo *pubrinfo;
3622  int i_tableoid;
3623  int i_oid;
3624  int i_pubname;
3625  int i,
3626  j,
3627  ntups;
3628 
3629  if (fout->remoteVersion < 100000)
3630  return;
3631 
3632  query = createPQExpBuffer();
3633 
3634  for (i = 0; i < numTables; i++)
3635  {
3636  TableInfo *tbinfo = &tblinfo[i];
3637 
3638  /* Only plain tables can be aded to publications. */
3639  if (tbinfo->relkind != RELKIND_RELATION)
3640  continue;
3641 
3642  /*
3643  * Ignore publication membership of tables whose definitions are not
3644  * to be dumped.
3645  */
3646  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3647  continue;
3648 
3649  if (g_verbose)
3650  write_msg(NULL, "reading publication membership for table \"%s.%s\"\n",
3651  tbinfo->dobj.namespace->dobj.name,
3652  tbinfo->dobj.name);
3653 
3654  resetPQExpBuffer(query);
3655 
3656  /* Get the publication membership for the table. */
3657  appendPQExpBuffer(query,
3658  "SELECT pr.tableoid, pr.oid, p.pubname "
3659  "FROM pg_catalog.pg_publication_rel pr,"
3660  " pg_catalog.pg_publication p "
3661  "WHERE pr.prrelid = '%u'"
3662  " AND p.oid = pr.prpubid",
3663  tbinfo->dobj.catId.oid);
3664  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3665 
3666  ntups = PQntuples(res);
3667 
3668  if (ntups == 0)
3669  {
3670  /*
3671  * Table is not member of any publications. Clean up and return.
3672  */
3673  PQclear(res);
3674  continue;
3675  }
3676 
3677  i_tableoid = PQfnumber(res, "tableoid");
3678  i_oid = PQfnumber(res, "oid");
3679  i_pubname = PQfnumber(res, "pubname");
3680 
3681  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
3682 
3683  for (j = 0; j < ntups; j++)
3684  {
3685  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
3686  pubrinfo[j].dobj.catId.tableoid =
3687  atooid(PQgetvalue(res, j, i_tableoid));
3688  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3689  AssignDumpId(&pubrinfo[j].dobj);
3690  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3691  pubrinfo[j].dobj.name = tbinfo->dobj.name;
3692  pubrinfo[j].pubname = pg_strdup(PQgetvalue(res, j, i_pubname));
3693  pubrinfo[j].pubtable = tbinfo;
3694 
3695  /* Decide whether we want to dump it */
3696  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
3697  }
3698  PQclear(res);
3699  }
3700  destroyPQExpBuffer(query);
3701 }
3702 
3703 /*
3704  * dumpPublicationTable
3705  * dump the definition of the given publication table mapping
3706  */
3707 static void
3709 {
3710  TableInfo *tbinfo = pubrinfo->pubtable;
3711  PQExpBuffer query;
3712  char *tag;
3713 
3714  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3715  return;
3716 
3717  tag = psprintf("%s %s", pubrinfo->pubname, tbinfo->dobj.name);
3718 
3719  query = createPQExpBuffer();
3720 
3721  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
3722  fmtId(pubrinfo->pubname));
3723  appendPQExpBuffer(query, " %s;",
3724  fmtId(tbinfo->dobj.name));
3725 
3726  /*
3727  * There is no point in creating drop query as drop query as the drop is
3728  * done by table drop.
3729  */
3730  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
3731  tag,
3732  tbinfo->dobj.namespace->dobj.name,
3733  NULL,
3734  "", false,
3735  "PUBLICATION TABLE", SECTION_POST_DATA,
3736  query->data, "", NULL,
3737  NULL, 0,
3738  NULL, NULL);
3739 
3740  free(tag);
3741  destroyPQExpBuffer(query);
3742 }
3743 
3744 /*
3745  * Is the currently connected user a superuser?
3746  */
3747 static bool
3749 {
3750  ArchiveHandle *AH = (ArchiveHandle *) fout;
3751  const char *val;
3752 
3753  val = PQparameterStatus(AH->connection, "is_superuser");
3754 
3755  if (val && strcmp(val, "on") == 0)
3756  return true;
3757 
3758  return false;
3759 }
3760 
3761 /*
3762  * getSubscriptions
3763  * get information about subscriptions
3764  */
3765 void
3767 {
3768  DumpOptions *dopt = fout->dopt;
3769  PQExpBuffer query;
3770  PGresult *res;
3771  SubscriptionInfo *subinfo;
3772  int i_tableoid;
3773  int i_oid;
3774  int i_subname;
3775  int i_rolname;
3776  int i_subconninfo;
3777  int i_subslotname;
3778  int i_subsynccommit;
3779  int i_subpublications;
3780  int i,
3781  ntups;
3782 
3783  if (dopt->no_subscriptions || fout->remoteVersion < 100000)
3784  return;
3785 
3786  if (!is_superuser(fout))
3787  {
3788  int n;
3789 
3790  res = ExecuteSqlQuery(fout,
3791  "SELECT count(*) FROM pg_subscription "
3792  "WHERE subdbid = (SELECT oid FROM pg_catalog.pg_database"
3793  " WHERE datname = current_database())",
3794  PGRES_TUPLES_OK);
3795  n = atoi(PQgetvalue(res, 0, 0));
3796  if (n > 0)
3797  write_msg(NULL, "WARNING: subscriptions not dumped because current user is not a superuser\n");
3798  PQclear(res);
3799  return;
3800  }
3801 
3802  query = createPQExpBuffer();
3803 
3804  resetPQExpBuffer(query);
3805 
3806  /* Get the subscriptions in current database. */
3807  appendPQExpBuffer(query,
3808  "SELECT s.tableoid, s.oid, s.subname,"
3809  "(%s s.subowner) AS rolname, "
3810  " s.subconninfo, s.subslotname, s.subsynccommit, "
3811  " s.subpublications "
3812  "FROM pg_catalog.pg_subscription s "
3813  "WHERE s.subdbid = (SELECT oid FROM pg_catalog.pg_database"
3814  " WHERE datname = current_database())",
3816  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3817 
3818  ntups = PQntuples(res);
3819 
3820  i_tableoid = PQfnumber(res, "tableoid");
3821  i_oid = PQfnumber(res, "oid");
3822  i_subname = PQfnumber(res, "subname");
3823  i_rolname = PQfnumber(res, "rolname");
3824  i_subconninfo = PQfnumber(res, "subconninfo");
3825  i_subslotname = PQfnumber(res, "subslotname");
3826  i_subsynccommit = PQfnumber(res, "subsynccommit");
3827  i_subpublications = PQfnumber(res, "subpublications");
3828 
3829  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
3830 
3831  for (i = 0; i < ntups; i++)
3832  {
3833  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
3834  subinfo[i].dobj.catId.tableoid =
3835  atooid(PQgetvalue(res, i, i_tableoid));
3836  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3837  AssignDumpId(&subinfo[i].dobj);
3838  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
3839  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3840  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
3841  if (PQgetisnull(res, i, i_subslotname))
3842  subinfo[i].subslotname = NULL;
3843  else
3844  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
3845  subinfo[i].subsynccommit =
3846  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
3847  subinfo[i].subpublications =
3848  pg_strdup(PQgetvalue(res, i, i_subpublications));
3849 
3850  if (strlen(subinfo[i].rolname) == 0)
3851  write_msg(NULL, "WARNING: owner of subscription \"%s\" appears to be invalid\n",
3852  subinfo[i].dobj.name);
3853 
3854  /* Decide whether we want to dump it */
3855  selectDumpableObject(&(subinfo[i].dobj), fout);
3856  }
3857  PQclear(res);
3858 
3859  destroyPQExpBuffer(query);
3860 }
3861 
3862 /*
3863  * dumpSubscription
3864  * dump the definition of the given subscription
3865  */
3866 static void
3868 {
3869  PQExpBuffer delq;
3870  PQExpBuffer query;
3871  PQExpBuffer labelq;
3872  PQExpBuffer publications;
3873  char **pubnames = NULL;
3874  int npubnames = 0;
3875  int i;
3876 
3877  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3878  return;
3879 
3880  delq = createPQExpBuffer();
3881  query = createPQExpBuffer();
3882  labelq = createPQExpBuffer();
3883 
3884  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
3885  fmtId(subinfo->dobj.name));
3886 
3887  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
3888  fmtId(subinfo->dobj.name));
3889  appendStringLiteralAH(query, subinfo->subconninfo, fout);
3890 
3891  /* Build list of quoted publications and append them to query. */
3892  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
3893  {
3894  write_msg(NULL,
3895  "WARNING: could not parse subpublications array\n");
3896  if (pubnames)
3897  free(pubnames);
3898  pubnames = NULL;
3899  npubnames = 0;
3900  }
3901 
3902  publications = createPQExpBuffer();
3903  for (i = 0; i < npubnames; i++)
3904  {
3905  if (i > 0)
3906  appendPQExpBufferStr(publications, ", ");
3907 
3908  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
3909  }
3910 
3911  appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
3912  if (subinfo->subslotname)
3913  appendStringLiteralAH(query, subinfo->subslotname, fout);
3914  else
3915  appendPQExpBufferStr(query, "NONE");
3916 
3917  if (strcmp(subinfo->subsynccommit, "off") != 0)
3918  appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
3919 
3920  appendPQExpBufferStr(query, ");\n");
3921 
3922  appendPQExpBuffer(labelq, "SUBSCRIPTION %s", fmtId(subinfo->dobj.name));
3923 
3924  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
3925  subinfo->dobj.name,
3926  NULL,
3927  NULL,
3928  subinfo->rolname, false,
3929  "SUBSCRIPTION", SECTION_POST_DATA,
3930  query->data, delq->data, NULL,
3931  NULL, 0,
3932  NULL, NULL);
3933 
3934  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3935  dumpComment(fout, labelq->data,
3936  NULL, subinfo->rolname,
3937  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
3938 
3939  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3940  dumpSecLabel(fout, labelq->data,
3941  NULL, subinfo->rolname,
3942  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
3943 
3944  destroyPQExpBuffer(publications);
3945  if (pubnames)
3946  free(pubnames);
3947 
3948  destroyPQExpBuffer(delq);
3949  destroyPQExpBuffer(query);
3950 }
3951 
3952 static void
3954  PQExpBuffer upgrade_buffer,
3955  Oid pg_type_oid,
3956  bool force_array_type)
3957 {
3958  PQExpBuffer upgrade_query = createPQExpBuffer();
3959  PGresult *res;
3960  Oid pg_type_array_oid;
3961 
3962  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
3963  appendPQExpBuffer(upgrade_buffer,
3964  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
3965  pg_type_oid);
3966 
3967  /* we only support old >= 8.3 for binary upgrades */
3968  appendPQExpBuffer(upgrade_query,
3969  "SELECT typarray "
3970  "FROM pg_catalog.pg_type "
3971  "WHERE oid = '%u'::pg_catalog.oid;",
3972  pg_type_oid);
3973 
3974  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
3975 
3976  pg_type_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
3977 
3978  PQclear(res);
3979 
3980  if (!OidIsValid(pg_type_array_oid) && force_array_type)
3981  {
3982  /*
3983  * If the old version didn't assign an array type, but the new version
3984  * does, we must select an unused type OID to assign. This currently
3985  * only happens for domains, when upgrading pre-v11 to v11 and up.
3986  *
3987  * Note: local state here is kind of ugly, but we must have some,
3988  * since we mustn't choose the same unused OID more than once.
3989  */
3990  static Oid next_possible_free_oid = FirstNormalObjectId;
3991  bool is_dup;
3992 
3993  do
3994  {
3995  ++next_possible_free_oid;
3996  printfPQExpBuffer(upgrade_query,
3997  "SELECT EXISTS(SELECT 1 "
3998  "FROM pg_catalog.pg_type "
3999  "WHERE oid = '%u'::pg_catalog.oid);",
4000  next_possible_free_oid);
4001  res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4002  is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
4003  PQclear(res);
4004  } while (is_dup);
4005 
4006  pg_type_array_oid = next_possible_free_oid;
4007  }
4008 
4009  if (OidIsValid(pg_type_array_oid))
4010  {
4011  appendPQExpBufferStr(upgrade_buffer,
4012  "\n-- For binary upgrade, must preserve pg_type array oid\n");
4013  appendPQExpBuffer(upgrade_buffer,
4014  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4015  pg_type_array_oid);
4016  }
4017 
4018  destroyPQExpBuffer(upgrade_query);
4019 }
4020 
4021 static bool
4023  PQExpBuffer upgrade_buffer,
4024  Oid pg_rel_oid)
4025 {
4026  PQExpBuffer upgrade_query = createPQExpBuffer();
4027  PGresult *upgrade_res;
4028  Oid pg_type_oid;
4029  bool toast_set = false;
4030 
4031  /* we only support old >= 8.3 for binary upgrades */
4032  appendPQExpBuffer(upgrade_query,
4033  "SELECT c.reltype AS crel, t.reltype AS trel "
4034  "FROM pg_catalog.pg_class c "
4035  "LEFT JOIN pg_catalog.pg_class t ON "
4036  " (c.reltoastrelid = t.oid) "
4037  "WHERE c.oid = '%u'::pg_catalog.oid;",
4038  pg_rel_oid);
4039 
4040  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4041 
4042  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
4043 
4044  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
4045  pg_type_oid, false);
4046 
4047  if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
4048  {
4049  /* Toast tables do not have pg_type array rows */
4050  Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
4051  PQfnumber(upgrade_res, "trel")));
4052 
4053  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
4054  appendPQExpBuffer(upgrade_buffer,
4055  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4056  pg_type_toast_oid);
4057 
4058  toast_set = true;
4059  }
4060 
4061  PQclear(upgrade_res);
4062  destroyPQExpBuffer(upgrade_query);
4063 
4064  return toast_set;
4065 }
4066 
4067 static void
4069  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
4070  bool is_index)
4071 {
4072  PQExpBuffer upgrade_query = createPQExpBuffer();
4073  PGresult *upgrade_res;
4074  Oid pg_class_reltoastrelid;
4075  Oid pg_index_indexrelid;
4076 
4077  appendPQExpBuffer(upgrade_query,
4078  "SELECT c.reltoastrelid, i.indexrelid "
4079  "FROM pg_catalog.pg_class c LEFT JOIN "
4080  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
4081  "WHERE c.oid = '%u'::pg_catalog.oid;",
4082  pg_class_oid);
4083 
4084  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4085 
4086  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastrelid")));
4087  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
4088 
4089  appendPQExpBufferStr(upgrade_buffer,
4090  "\n-- For binary upgrade, must preserve pg_class oids\n");
4091 
4092  if (!is_index)
4093  {
4094  appendPQExpBuffer(upgrade_buffer,
4095  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
4096  pg_class_oid);
4097  /* only tables have toast tables, not indexes */
4098  if (OidIsValid(pg_class_reltoastrelid))
4099  {
4100  /*
4101  * One complexity is that the table definition might not require
4102  * the creation of a TOAST table, and the TOAST table might have
4103  * been created long after table creation, when the table was
4104  * loaded with wide data. By setting the TOAST oid we force
4105  * creation of the TOAST heap and TOAST index by the backend so we
4106  * can cleanly copy the files during binary upgrade.
4107  */
4108 
4109  appendPQExpBuffer(upgrade_buffer,
4110  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
4111  pg_class_reltoastrelid);
4112 
4113  /* every toast table has an index */
4114  appendPQExpBuffer(upgrade_buffer,
4115  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4116  pg_index_indexrelid);
4117  }
4118  }
4119  else
4120  appendPQExpBuffer(upgrade_buffer,
4121  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4122  pg_class_oid);
4123 
4124  appendPQExpBufferChar(upgrade_buffer, '\n');
4125 
4126  PQclear(upgrade_res);
4127  destroyPQExpBuffer(upgrade_query);
4128 }
4129 
4130 /*
4131  * If the DumpableObject is a member of an extension, add a suitable
4132  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4133  */
4134 static void
4136  DumpableObject *dobj,
4137  const char *objlabel)
4138 {
4139  DumpableObject *extobj = NULL;
4140  int i;
4141 
4142  if (!dobj->ext_member)
4143  return;
4144 
4145  /*
4146  * Find the parent extension. We could avoid this search if we wanted to
4147  * add a link field to DumpableObject, but the space costs of that would
4148  * be considerable. We assume that member objects could only have a
4149  * direct dependency on their own extension, not any others.
4150  */
4151  for (i = 0; i < dobj->nDeps; i++)
4152  {
4153  extobj = findObjectByDumpId(dobj->dependencies[i]);
4154  if (extobj && extobj->objType == DO_EXTENSION)
4155  break;
4156  extobj = NULL;
4157  }
4158  if (extobj == NULL)
4159  exit_horribly(NULL, "could not find parent extension for %s\n", objlabel);
4160 
4161  appendPQExpBufferStr(upgrade_buffer,
4162  "\n-- For binary upgrade, handle extension membership the hard way\n");
4163  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s;\n",
4164  fmtId(extobj->name),
4165  objlabel);
4166 }
4167 
4168 /*
4169  * getNamespaces:
4170  * read all namespaces in the system catalogs and return them in the
4171  * NamespaceInfo* structure
4172  *
4173  * numNamespaces is set to the number of namespaces read in
4174  */
4175 NamespaceInfo *
4177 {
4178  DumpOptions *dopt = fout->dopt;
4179  PGresult *res;
4180  int ntups;
4181  int i;
4182  PQExpBuffer query;
4183  NamespaceInfo *nsinfo;
4184  int i_tableoid;
4185  int i_oid;
4186  int i_nspname;
4187  int i_rolname;
4188  int i_nspacl;
4189  int i_rnspacl;
4190  int i_initnspacl;
4191  int i_initrnspacl;
4192 
4193  query = createPQExpBuffer();
4194 
4195  /* Make sure we are in proper schema */
4196  selectSourceSchema(fout, "pg_catalog");
4197 
4198  /*
4199  * we fetch all namespaces including system ones, so that every object we
4200  * read in can be linked to a containing namespace.
4201  */
4202  if (fout->remoteVersion >= 90600)
4203  {
4204  PQExpBuffer acl_subquery = createPQExpBuffer();
4205  PQExpBuffer racl_subquery = createPQExpBuffer();
4206  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4207  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4208 
4209  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4210  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4211  dopt->binary_upgrade);
4212 
4213  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4214  "(%s nspowner) AS rolname, "
4215  "%s as nspacl, "
4216  "%s as rnspacl, "
4217  "%s as initnspacl, "
4218  "%s as initrnspacl "
4219  "FROM pg_namespace n "
4220  "LEFT JOIN pg_init_privs pip "
4221  "ON (n.oid = pip.objoid "
4222  "AND pip.classoid = 'pg_namespace'::regclass "
4223  "AND pip.objsubid = 0",
4225  acl_subquery->data,
4226  racl_subquery->data,
4227  init_acl_subquery->data,
4228  init_racl_subquery->data);
4229 
4230  /*
4231  * When we are doing a 'clean' run, we will be dropping and recreating
4232  * the 'public' schema (the only object which has that kind of
4233  * treatment in the backend and which has an entry in pg_init_privs)
4234  * and therefore we should not consider any initial privileges in
4235  * pg_init_privs in that case.
4236  *
4237  * See pg_backup_archiver.c:_printTocEntry() for the details on why
4238  * the public schema is special in this regard.
4239  *
4240  * Note that if the public schema is dropped and re-created, this is
4241  * essentially a no-op because the new public schema won't have an
4242  * entry in pg_init_privs anyway, as the entry will be removed when
4243  * the public schema is dropped.
4244  *
4245  * Further, we have to handle the case where the public schema does
4246  * not exist at all.
4247  */
4248  if (dopt->outputClean)
4249  appendPQExpBuffer(query, " AND pip.objoid <> "
4250  "coalesce((select oid from pg_namespace "
4251  "where nspname = 'public'),0)");
4252 
4253  appendPQExpBuffer(query, ") ");
4254 
4255  destroyPQExpBuffer(acl_subquery);
4256  destroyPQExpBuffer(racl_subquery);
4257  destroyPQExpBuffer(init_acl_subquery);
4258  destroyPQExpBuffer(init_racl_subquery);
4259  }
4260  else
4261  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4262  "(%s nspowner) AS rolname, "
4263  "nspacl, NULL as rnspacl, "
4264  "NULL AS initnspacl, NULL as initrnspacl "
4265  "FROM pg_namespace",
4267 
4268  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4269 
4270  ntups = PQntuples(res);
4271 
4272  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4273 
4274  i_tableoid = PQfnumber(res, "tableoid");
4275  i_oid = PQfnumber(res, "oid");
4276  i_nspname = PQfnumber(res, "nspname");
4277  i_rolname = PQfnumber(res, "rolname");
4278  i_nspacl = PQfnumber(res, "nspacl");
4279  i_rnspacl = PQfnumber(res, "rnspacl");
4280  i_initnspacl = PQfnumber(res, "initnspacl");
4281  i_initrnspacl = PQfnumber(res, "initrnspacl");
4282 
4283  for (i = 0; i < ntups; i++)
4284  {
4285  nsinfo[i].dobj.objType = DO_NAMESPACE;
4286  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4287  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4288  AssignDumpId(&nsinfo[i].dobj);
4289  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4290  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4291  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4292  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4293  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4294  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4295 
4296  /* Decide whether to dump this namespace */
4297  selectDumpableNamespace(&nsinfo[i], fout);
4298 
4299  /*
4300  * Do not try to dump ACL if the ACL is empty or the default.
4301  *
4302  * This is useful because, for some schemas/objects, the only
4303  * component we are going to try and dump is the ACL and if we can
4304  * remove that then 'dump' goes to zero/false and we don't consider
4305  * this object for dumping at all later on.
4306  */
4307  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4308  PQgetisnull(res, i, i_initnspacl) &&
4309  PQgetisnull(res, i, i_initrnspacl))
4310  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4311 
4312  if (strlen(nsinfo[i].rolname) == 0)
4313  write_msg(NULL, "WARNING: owner of schema \"%s\" appears to be invalid\n",
4314  nsinfo[i].dobj.name);
4315  }
4316 
4317  PQclear(res);
4318  destroyPQExpBuffer(query);
4319 
4320  *numNamespaces = ntups;
4321 
4322  return nsinfo;
4323 }
4324 
4325 /*
4326  * findNamespace:
4327  * given a namespace OID, look up the info read by getNamespaces
4328  */
4329 static NamespaceInfo *
4331 {
4332  NamespaceInfo *nsinfo;
4333 
4334  nsinfo = findNamespaceByOid(nsoid);
4335  if (nsinfo == NULL)
4336  exit_horribly(NULL, "schema with OID %u does not exist\n", nsoid);
4337  return nsinfo;
4338 }
4339 
4340 /*
4341  * getExtensions:
4342  * read all extensions in the system catalogs and return them in the
4343  * ExtensionInfo* structure
4344  *
4345  * numExtensions is set to the number of extensions read in
4346  */
4347 ExtensionInfo *
4349 {
4350  DumpOptions *dopt = fout->dopt;
4351  PGresult *res;
4352  int ntups;
4353  int i;
4354  PQExpBuffer query;
4355  ExtensionInfo *extinfo;
4356  int i_tableoid;
4357  int i_oid;
4358  int i_extname;
4359  int i_nspname;
4360  int i_extrelocatable;
4361  int i_extversion;
4362  int i_extconfig;
4363  int i_extcondition;
4364 
4365  /*
4366  * Before 9.1, there are no extensions.
4367  */
4368  if (fout->remoteVersion < 90100)
4369  {
4370  *numExtensions = 0;
4371  return NULL;
4372  }
4373 
4374  query = createPQExpBuffer();
4375 
4376  /* Make sure we are in proper schema */
4377  selectSourceSchema(fout, "pg_catalog");
4378 
4379  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4380  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4381  "FROM pg_extension x "
4382  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4383 
4384  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4385 
4386  ntups = PQntuples(res);
4387 
4388  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4389 
4390  i_tableoid = PQfnumber(res, "tableoid");
4391  i_oid = PQfnumber(res, "oid");
4392  i_extname = PQfnumber(res, "extname");
4393  i_nspname = PQfnumber(res, "nspname");
4394  i_extrelocatable = PQfnumber(res, "extrelocatable");
4395  i_extversion = PQfnumber(res, "extversion");
4396  i_extconfig = PQfnumber(res, "extconfig");
4397  i_extcondition = PQfnumber(res, "extcondition");
4398 
4399  for (i = 0; i < ntups; i++)
4400  {
4401  extinfo[i].dobj.objType = DO_EXTENSION;
4402  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4403  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4404  AssignDumpId(&extinfo[i].dobj);
4405  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4406  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4407  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4408  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4409  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4410  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4411 
4412  /* Decide whether we want to dump it */
4413  selectDumpableExtension(&(extinfo[i]), dopt);
4414  }
4415 
4416  PQclear(res);
4417  destroyPQExpBuffer(query);
4418 
4419  *numExtensions = ntups;
4420 
4421  return extinfo;
4422 }
4423 
4424 /*
4425  * getTypes:
4426  * read all types in the system catalogs and return them in the
4427  * TypeInfo* structure
4428  *
4429  * numTypes is set to the number of types read in
4430  *
4431  * NB: this must run after getFuncs() because we assume we can do
4432  * findFuncByOid().
4433  */
4434 TypeInfo *
4436 {
4437  DumpOptions *dopt = fout->dopt;
4438  PGresult *res;
4439  int ntups;
4440  int i;
4441  PQExpBuffer query = createPQExpBuffer();
4442  TypeInfo *tyinfo;
4443  ShellTypeInfo *stinfo;
4444  int i_tableoid;
4445  int i_oid;
4446  int i_typname;
4447  int i_typnamespace;
4448  int i_typacl;
4449  int i_rtypacl;
4450  int i_inittypacl;
4451  int i_initrtypacl;
4452  int i_rolname;
4453  int i_typelem;
4454  int i_typrelid;
4455  int i_typrelkind;
4456  int i_typtype;
4457  int i_typisdefined;
4458  int i_isarray;
4459 
4460  /*
4461  * we include even the built-in types because those may be used as array
4462  * elements by user-defined types
4463  *
4464  * we filter out the built-in types when we dump out the types
4465  *
4466  * same approach for undefined (shell) types and array types
4467  *
4468  * Note: as of 8.3 we can reliably detect whether a type is an
4469  * auto-generated array type by checking the element type's typarray.
4470  * (Before that the test is capable of generating false positives.) We
4471  * still check for name beginning with '_', though, so as to avoid the
4472  * cost of the subselect probe for all standard types. This would have to
4473  * be revisited if the backend ever allows renaming of array types.
4474  */
4475 
4476  /* Make sure we are in proper schema */
4477  selectSourceSchema(fout, "pg_catalog");
4478 
4479  if (fout->remoteVersion >= 90600)
4480  {
4481  PQExpBuffer acl_subquery = createPQExpBuffer();
4482  PQExpBuffer racl_subquery = createPQExpBuffer();
4483  PQExpBuffer initacl_subquery = createPQExpBuffer();
4484  PQExpBuffer initracl_subquery = createPQExpBuffer();
4485 
4486  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
4487  initracl_subquery, "t.typacl", "t.typowner", "'T'",
4488  dopt->binary_upgrade);
4489 
4490  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
4491  "t.typnamespace, "
4492  "%s AS typacl, "
4493  "%s AS rtypacl, "
4494  "%s AS inittypacl, "
4495  "%s AS initrtypacl, "
4496  "(%s t.typowner) AS rolname, "
4497  "t.typelem, t.typrelid, "
4498  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
4499  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
4500  "t.typtype, t.typisdefined, "
4501  "t.typname[0] = '_' AND t.typelem != 0 AND "
4502  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
4503  "FROM pg_type t "
4504  "LEFT JOIN pg_init_privs pip ON "
4505  "(t.oid = pip.objoid "
4506  "AND pip.classoid = 'pg_type'::regclass "
4507  "AND pip.objsubid = 0) ",
4508  acl_subquery->data,
4509  racl_subquery->data,
4510  initacl_subquery->data,
4511  initracl_subquery->data,
4513 
4514  destroyPQExpBuffer(acl_subquery);
4515  destroyPQExpBuffer(racl_subquery);
4516  destroyPQExpBuffer(initacl_subquery);
4517  destroyPQExpBuffer(initracl_subquery);
4518  }
4519  else if (fout->remoteVersion >= 90200)
4520  {
4521  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4522  "typnamespace, typacl, NULL as rtypacl, "
4523  "NULL AS inittypacl, NULL AS initrtypacl, "
4524  "(%s typowner) AS rolname, "
4525  "typelem, typrelid, "
4526  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4527  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4528  "typtype, typisdefined, "
4529  "typname[0] = '_' AND typelem != 0 AND "
4530  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4531  "FROM pg_type",
4533  }
4534  else if (fout->remoteVersion >= 80300)
4535  {
4536  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4537  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4538  "NULL AS inittypacl, NULL AS initrtypacl, "
4539  "(%s typowner) AS rolname, "
4540  "typelem, typrelid, "
4541  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4542  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4543  "typtype, typisdefined, "
4544  "typname[0] = '_' AND typelem != 0 AND "
4545  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4546  "FROM pg_type",
4548  }
4549  else
4550  {
4551  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4552  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4553  "NULL AS inittypacl, NULL AS initrtypacl, "
4554  "(%s typowner) AS rolname, "
4555  "typelem, typrelid, "
4556  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4557  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4558  "typtype, typisdefined, "
4559  "typname[0] = '_' AND typelem != 0 AS isarray "
4560  "FROM pg_type",
4562  }
4563 
4564  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4565 
4566  ntups = PQntuples(res);
4567 
4568  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
4569 
4570  i_tableoid = PQfnumber(res, "tableoid");
4571  i_oid = PQfnumber(res, "oid");
4572  i_typname = PQfnumber(res, "typname");
4573  i_typnamespace = PQfnumber(res, "typnamespace");
4574  i_typacl = PQfnumber(res, "typacl");
4575  i_rtypacl = PQfnumber(res, "rtypacl");
4576  i_inittypacl = PQfnumber(res, "inittypacl");
4577  i_initrtypacl = PQfnumber(res, "initrtypacl");
4578  i_rolname = PQfnumber(res, "rolname");
4579  i_typelem = PQfnumber(res, "typelem");
4580  i_typrelid = PQfnumber(res, "typrelid");
4581  i_typrelkind = PQfnumber(res, "typrelkind");
4582  i_typtype = PQfnumber(res, "typtype");
4583  i_typisdefined = PQfnumber(res, "typisdefined");
4584  i_isarray = PQfnumber(res, "isarray");
4585 
4586  for (i = 0; i < ntups; i++)
4587  {
4588  tyinfo[i].dobj.objType = DO_TYPE;
4589  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4590  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4591  AssignDumpId(&tyinfo[i].dobj);
4592  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
4593  tyinfo[i].dobj.namespace =
4594  findNamespace(fout,
4595  atooid(PQgetvalue(res, i, i_typnamespace)));
4596  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4597  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
4598  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
4599  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
4600  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
4601  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
4602  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
4603  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
4604  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
4605  tyinfo[i].shellType = NULL;
4606 
4607  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
4608  tyinfo[i].isDefined = true;
4609  else
4610  tyinfo[i].isDefined = false;
4611 
4612  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
4613  tyinfo[i].isArray = true;
4614  else
4615  tyinfo[i].isArray = false;
4616 
4617  /* Decide whether we want to dump it */
4618  selectDumpableType(&tyinfo[i], fout);
4619 
4620  /* Do not try to dump ACL if no ACL exists. */
4621  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
4622  PQgetisnull(res, i, i_inittypacl) &&
4623  PQgetisnull(res, i, i_initrtypacl))
4624  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4625 
4626  /*
4627  * If it's a domain, fetch info about its constraints, if any
4628  */
4629  tyinfo[i].nDomChecks = 0;
4630  tyinfo[i].domChecks = NULL;
4631  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4632  tyinfo[i].typtype == TYPTYPE_DOMAIN)
4633  getDomainConstraints(fout, &(tyinfo[i]));
4634 
4635  /*
4636  * If it's a base type, make a DumpableObject representing a shell
4637  * definition of the type. We will need to dump that ahead of the I/O
4638  * functions for the type. Similarly, range types need a shell
4639  * definition in case they have a canonicalize function.
4640  *
4641  * Note: the shell type doesn't have a catId. You might think it
4642  * should copy the base type's catId, but then it might capture the
4643  * pg_depend entries for the type, which we don't want.
4644  */
4645  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4646  (tyinfo[i].typtype == TYPTYPE_BASE ||
4647  tyinfo[i].typtype == TYPTYPE_RANGE))
4648  {
4649  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
4650  stinfo->dobj.objType = DO_SHELL_TYPE;
4651  stinfo->dobj.catId = nilCatalogId;
4652  AssignDumpId(&stinfo->dobj);
4653  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
4654  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
4655  stinfo->baseType = &(tyinfo[i]);
4656  tyinfo[i].shellType = stinfo;
4657 
4658  /*
4659  * Initially mark the shell type as not to be dumped. We'll only
4660  * dump it if the I/O or canonicalize functions need to be dumped;
4661  * this is taken care of while sorting dependencies.
4662  */
4663  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
4664  }
4665 
4666  if (strlen(tyinfo[i].rolname) == 0)
4667  write_msg(NULL, "WARNING: owner of data type \"%s\" appears to be invalid\n",
4668  tyinfo[i].dobj.name);
4669  }
4670 
4671  *numTypes = ntups;
4672 
4673  PQclear(res);
4674 
4675  destroyPQExpBuffer(query);
4676 
4677  return tyinfo;
4678 }
4679 
4680 /*
4681  * getOperators:
4682  * read all operators in the system catalogs and return them in the
4683  * OprInfo* structure
4684  *
4685  * numOprs is set to the number of operators read in
4686  */
4687 OprInfo *
4688 getOperators(Archive *fout, int *numOprs)
4689 {
4690  PGresult *res;
4691  int ntups;
4692  int i;
4693  PQExpBuffer query = createPQExpBuffer();
4694  OprInfo *oprinfo;
4695  int i_tableoid;
4696  int i_oid;
4697  int i_oprname;
4698  int i_oprnamespace;
4699  int i_rolname;
4700  int i_oprkind;
4701  int i_oprcode;
4702 
4703  /*
4704  * find all operators, including builtin operators; we filter out
4705  * system-defined operators at dump-out time.
4706  */
4707 
4708  /* Make sure we are in proper schema */
4709  selectSourceSchema(fout, "pg_catalog");
4710 
4711  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
4712  "oprnamespace, "
4713  "(%s oprowner) AS rolname, "
4714  "oprkind, "
4715  "oprcode::oid AS oprcode "
4716  "FROM pg_operator",
4718 
4719  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4720 
4721  ntups = PQntuples(res);
4722  *numOprs = ntups;
4723 
4724  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
4725 
4726  i_tableoid = PQfnumber(res, "tableoid");
4727  i_oid = PQfnumber(res, "oid");
4728  i_oprname = PQfnumber(res, "oprname");
4729  i_oprnamespace = PQfnumber(res, "oprnamespace");
4730  i_rolname = PQfnumber(res, "rolname");
4731  i_oprkind = PQfnumber(res, "oprkind");
4732  i_oprcode = PQfnumber(res, "oprcode");
4733 
4734  for (i = 0; i < ntups; i++)
4735  {
4736  oprinfo[i].dobj.objType = DO_OPERATOR;
4737  oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4738  oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4739  AssignDumpId(&oprinfo[i].dobj);
4740  oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
4741  oprinfo[i].dobj.namespace =
4742  findNamespace(fout,
4743  atooid(PQgetvalue(res, i, i_oprnamespace)));
4744  oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4745  oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
4746  oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
4747 
4748  /* Decide whether we want to dump it */
4749  selectDumpableObject(&(oprinfo[i].dobj), fout);
4750 
4751  /* Operators do not currently have ACLs. */
4752  oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4753 
4754  if (strlen(oprinfo[i].rolname) == 0)
4755  write_msg(NULL, "WARNING: owner of operator \"%s\" appears to be invalid\n",
4756  oprinfo[i].dobj.name);
4757  }
4758 
4759  PQclear(res);
4760 
4761  destroyPQExpBuffer(query);
4762 
4763  return oprinfo;
4764 }
4765 
4766 /*
4767  * getCollations:
4768  * read all collations in the system catalogs and return them in the
4769  * CollInfo* structure
4770  *
4771  * numCollations is set to the number of collations read in
4772  */
4773 CollInfo *
4775 {
4776  PGresult *res;
4777  int ntups;
4778  int i;
4779  PQExpBuffer query;
4780  CollInfo *collinfo;
4781  int i_tableoid;
4782  int i_oid;
4783  int i_collname;
4784  int i_collnamespace;
4785  int i_rolname;
4786 
4787  /* Collations didn't exist pre-9.1 */
4788  if (fout->remoteVersion < 90100)
4789  {
4790  *numCollations = 0;
4791  return NULL;
4792  }
4793 
4794  query = createPQExpBuffer();
4795 
4796  /*
4797  * find all collations, including builtin collations; we filter out
4798  * system-defined collations at dump-out time.
4799  */
4800 
4801  /* Make sure we are in proper schema */
4802  selectSourceSchema(fout, "pg_catalog");
4803 
4804  appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
4805  "collnamespace, "
4806  "(%s collowner) AS rolname "
4807  "FROM pg_collation",
4809 
4810  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4811 
4812  ntups = PQntuples(res);
4813  *numCollations = ntups;
4814 
4815  collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
4816 
4817  i_tableoid = PQfnumber(res, "tableoid");
4818  i_oid = PQfnumber(res, "oid");
4819  i_collname = PQfnumber(res, "collname");
4820  i_collnamespace = PQfnumber(res, "collnamespace");
4821  i_rolname = PQfnumber(res, "rolname");
4822 
4823  for (i = 0; i < ntups; i++)
4824  {
4825  collinfo[i].dobj.objType = DO_COLLATION;
4826  collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4827  collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4828  AssignDumpId(&collinfo[i].dobj);
4829  collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
4830  collinfo[i].dobj.namespace =
4831  findNamespace(fout,
4832  atooid(PQgetvalue(res, i, i_collnamespace)));
4833  collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4834 
4835  /* Decide whether we want to dump it */
4836  selectDumpableObject(&(collinfo[i].dobj), fout);
4837 
4838  /* Collations do not currently have ACLs. */
4839  collinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4840  }
4841 
4842  PQclear(res);
4843 
4844  destroyPQExpBuffer(query);
4845 
4846  return collinfo;
4847 }
4848 
4849 /*
4850  * getConversions:
4851  * read all conversions in the system catalogs and return them in the
4852  * ConvInfo* structure
4853  *
4854  * numConversions is set to the number of conversions read in
4855  */
4856 ConvInfo *
4857 getConversions(Archive *fout, int *numConversions)
4858 {
4859  PGresult *res;
4860  int ntups;
4861  int i;
4862  PQExpBuffer query;
4863  ConvInfo *convinfo;
4864  int i_tableoid;
4865  int i_oid;
4866  int i_conname;
4867  int i_connamespace;
4868  int i_rolname;
4869 
4870  query = createPQExpBuffer();
4871 
4872  /*
4873  * find all conversions, including builtin conversions; we filter out
4874  * system-defined conversions at dump-out time.
4875  */
4876 
4877  /* Make sure we are in proper schema */
4878  selectSourceSchema(fout, "pg_catalog");
4879 
4880  appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
4881  "connamespace, "
4882  "(%s conowner) AS rolname "
4883  "FROM pg_conversion",
4885 
4886  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4887 
4888  ntups = PQntuples(res);
4889  *numConversions = ntups;
4890 
4891  convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
4892 
4893  i_tableoid = PQfnumber(res, "tableoid");
4894  i_oid = PQfnumber(res, "oid");
4895  i_conname = PQfnumber(res, "conname");
4896  i_connamespace = PQfnumber(res, "connamespace");
4897  i_rolname = PQfnumber(res, "rolname");
4898 
4899  for (i = 0; i < ntups; i++)
4900  {
4901  convinfo[i].dobj.objType = DO_CONVERSION;
4902  convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4903  convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4904  AssignDumpId(&convinfo[i].dobj);
4905  convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
4906  convinfo[i].dobj.namespace =
4907  findNamespace(fout,
4908  atooid(PQgetvalue(res, i, i_connamespace)));
4909  convinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4910 
4911  /* Decide whether we want to dump it */
4912  selectDumpableObject(&(convinfo[i].dobj), fout);
4913 
4914  /* Conversions do not currently have ACLs. */
4915  convinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4916  }
4917 
4918  PQclear(res);
4919 
4920  destroyPQExpBuffer(query);
4921 
4922  return convinfo;
4923 }
4924 
4925 /*
4926  * getAccessMethods:
4927  * read all user-defined access methods in the system catalogs and return
4928  * them in the AccessMethodInfo* structure
4929  *
4930  * numAccessMethods is set to the number of access methods read in
4931  */
4933 getAccessMethods(Archive *fout, int *numAccessMethods)
4934 {
4935  PGresult *res;
4936  int ntups;
4937  int i;
4938  PQExpBuffer query;
4939  AccessMethodInfo *aminfo;
4940  int i_tableoid;
4941  int i_oid;
4942  int i_amname;
4943  int i_amhandler;
4944  int i_amtype;
4945 
4946  /* Before 9.6, there are no user-defined access methods */
4947  if (fout->remoteVersion < 90600)
4948  {
4949  *numAccessMethods = 0;
4950  return NULL;
4951  }
4952 
4953  query = createPQExpBuffer();
4954 
4955  /* Make sure we are in proper schema */
4956  selectSourceSchema(fout, "pg_catalog");
4957 
4958  /* Select all access methods from pg_am table */
4959  appendPQExpBuffer(query, "SELECT tableoid, oid, amname, amtype, "
4960  "amhandler::pg_catalog.regproc AS amhandler "
4961  "FROM pg_am");
4962 
4963  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4964 
4965  ntups = PQntuples(res);
4966  *numAccessMethods = ntups;
4967 
4968  aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
4969 
4970  i_tableoid = PQfnumber(res, "tableoid");
4971  i_oid = PQfnumber(res, "oid");
4972  i_amname = PQfnumber(res, "amname");
4973  i_amhandler = PQfnumber(res, "amhandler");
4974  i_amtype = PQfnumber(res, "amtype");
4975 
4976  for (i = 0; i < ntups; i++)
4977  {
4978  aminfo[i].dobj.objType = DO_ACCESS_METHOD;
4979  aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4980  aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4981  AssignDumpId(&aminfo[i].dobj);
4982  aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
4983  aminfo[i].dobj.namespace = NULL;
4984  aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
4985  aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
4986 
4987  /* Decide whether we want to dump it */
4988  selectDumpableAccessMethod(&(aminfo[i]), fout);
4989 
4990  /* Access methods do not currently have ACLs. */
4991  aminfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4992  }
4993 
4994  PQclear(res);
4995 
4996  destroyPQExpBuffer(query);
4997 
4998  return aminfo;
4999 }
5000 
5001 
5002 /*
5003  * getOpclasses:
5004  * read all opclasses in the system catalogs and return them in the
5005  * OpclassInfo* structure
5006  *
5007  * numOpclasses is set to the number of opclasses read in
5008  */
5009 OpclassInfo *
5010 getOpclasses(Archive *fout, int *numOpclasses)
5011 {
5012  PGresult *res;
5013  int ntups;
5014  int i;
5015  PQExpBuffer query = createPQExpBuffer();
5016  OpclassInfo *opcinfo;
5017  int i_tableoid;
5018  int i_oid;
5019  int i_opcname;
5020  int i_opcnamespace;
5021  int i_rolname;
5022 
5023  /*
5024  * find all opclasses, including builtin opclasses; we filter out
5025  * system-defined opclasses at dump-out time.
5026  */
5027 
5028  /* Make sure we are in proper schema */
5029  selectSourceSchema(fout, "pg_catalog");
5030 
5031  appendPQExpBuffer(query, "SELECT tableoid, oid, opcname, "
5032  "opcnamespace, "
5033  "(%s opcowner) AS rolname "
5034  "FROM pg_opclass",
5036 
5037  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5038 
5039  ntups = PQntuples(res);
5040  *numOpclasses = ntups;
5041 
5042  opcinfo = (OpclassInfo *) pg_malloc(ntups * sizeof(OpclassInfo));
5043 
5044  i_tableoid = PQfnumber(res, "tableoid");
5045  i_oid = PQfnumber(res, "oid");
5046  i_opcname = PQfnumber(res, "opcname");
5047  i_opcnamespace = PQfnumber(res, "opcnamespace");
5048  i_rolname = PQfnumber(res, "rolname");
5049 
5050  for (i = 0; i < ntups; i++)
5051  {
5052  opcinfo[i].dobj.objType = DO_OPCLASS;
5053  opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5054  opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5055  AssignDumpId(&opcinfo[i].dobj);
5056  opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
5057  opcinfo[i].dobj.namespace =
5058  findNamespace(fout,
5059  atooid(PQgetvalue(res, i, i_opcnamespace)));
5060  opcinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5061 
5062  /* Decide whether we want to dump it */
5063  selectDumpableObject(&(opcinfo[i].dobj), fout);
5064 
5065  /* Op Classes do not currently have ACLs. */
5066  opcinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5067 
5068  if (strlen(opcinfo[i].rolname) == 0)
5069  write_msg(NULL, "WARNING: owner of operator class \"%s\" appears to be invalid\n",
5070  opcinfo[i].dobj.name);
5071  }
5072 
5073  PQclear(res);
5074 
5075  destroyPQExpBuffer(query);
5076 
5077  return opcinfo;
5078 }
5079 
5080 /*
5081  * getOpfamilies:
5082  * read all opfamilies in the system catalogs and return them in the
5083  * OpfamilyInfo* structure
5084  *
5085  * numOpfamilies is set to the number of opfamilies read in
5086  */
5087 OpfamilyInfo *
5088 getOpfamilies(Archive *fout, int *numOpfamilies)
5089 {
5090  PGresult *res;
5091  int ntups;
5092  int i;
5093  PQExpBuffer query;
5094  OpfamilyInfo *opfinfo;
5095  int i_tableoid;
5096  int i_oid;
5097  int i_opfname;
5098  int i_opfnamespace;
5099  int i_rolname;
5100 
5101  /* Before 8.3, there is no separate concept of opfamilies */
5102  if (fout->remoteVersion < 80300)
5103  {
5104  *numOpfamilies = 0;
5105  return NULL;
5106  }
5107 
5108  query = createPQExpBuffer();
5109 
5110  /*
5111  * find all opfamilies, including builtin opfamilies; we filter out
5112  * system-defined opfamilies at dump-out time.
5113  */
5114 
5115  /* Make sure we are in proper schema */
5116  selectSourceSchema(fout, "pg_catalog");
5117 
5118  appendPQExpBuffer(query, "SELECT tableoid, oid, opfname, "
5119  "opfnamespace, "
5120  "(%s opfowner) AS rolname "
5121  "FROM pg_opfamily",
5123 
5124  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5125 
5126  ntups = PQntuples(res);
5127  *numOpfamilies = ntups;
5128 
5129  opfinfo = (OpfamilyInfo *) pg_malloc(ntups * sizeof(OpfamilyInfo));
5130 
5131  i_tableoid = PQfnumber(res, "tableoid");
5132  i_oid = PQfnumber(res, "oid");
5133  i_opfname = PQfnumber(res, "opfname");
5134  i_opfnamespace = PQfnumber(res, "opfnamespace");
5135  i_rolname = PQfnumber(res, "rolname");
5136 
5137  for (i = 0; i < ntups; i++)
5138  {
5139  opfinfo[i].dobj.objType = DO_OPFAMILY;
5140  opfinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5141  opfinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5142  AssignDumpId(&opfinfo[i].dobj);
5143  opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
5144  opfinfo[i].dobj.namespace =
5145  findNamespace(fout,
5146  atooid(PQgetvalue(res, i, i_opfnamespace)));
5147  opfinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5148 
5149  /* Decide whether we want to dump it */
5150  selectDumpableObject(&(opfinfo[i].dobj), fout);
5151 
5152  /* Extensions do not currently have ACLs. */
5153  opfinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5154 
5155  if (strlen(opfinfo[i].rolname) == 0)
5156  write_msg(NULL, "WARNING: owner of operator family \"%s\" appears to be invalid\n",
5157  opfinfo[i].dobj.name);
5158  }
5159 
5160  PQclear(res);
5161 
5162  destroyPQExpBuffer(query);
5163 
5164  return opfinfo;
5165 }
5166 
5167 /*
5168  * getAggregates:
5169  * read all the user-defined aggregates in the system catalogs and
5170  * return them in the AggInfo* structure
5171  *
5172  * numAggs is set to the number of aggregates read in
5173  */
5174 AggInfo *
5175 getAggregates(Archive *fout, int *numAggs)
5176 {
5177  DumpOptions *dopt = fout->dopt;
5178  PGresult *res;
5179  int ntups;
5180  int i;
5181  PQExpBuffer query = createPQExpBuffer();
5182  AggInfo *agginfo;
5183  int i_tableoid;
5184  int i_oid;
5185  int i_aggname;
5186  int i_aggnamespace;
5187  int i_pronargs;
5188  int i_proargtypes;
5189  int i_rolname;
5190  int i_aggacl;
5191  int i_raggacl;
5192  int i_initaggacl;
5193  int i_initraggacl;
5194 
5195  /* Make sure we are in proper schema */
5196  selectSourceSchema(fout, "pg_catalog");
5197 
5198  /*
5199  * Find all interesting aggregates. See comment in getFuncs() for the
5200  * rationale behind the filtering logic.
5201  */
5202  if (fout->remoteVersion >= 90600)
5203  {
5204  PQExpBuffer acl_subquery = createPQExpBuffer();
5205  PQExpBuffer racl_subquery = createPQExpBuffer();
5206  PQExpBuffer initacl_subquery = createPQExpBuffer();
5207  PQExpBuffer initracl_subquery = createPQExpBuffer();
5208 
5209  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
5210  initracl_subquery, "p.proacl", "p.proowner", "'f'",
5211  dopt->binary_upgrade);
5212 
5213  appendPQExpBuffer(query, "SELECT p.tableoid, p.oid, "
5214  "p.proname AS aggname, "
5215  "p.pronamespace AS aggnamespace, "
5216  "p.pronargs, p.proargtypes, "
5217  "(%s p.proowner) AS rolname, "
5218  "%s AS aggacl, "
5219  "%s AS raggacl, "
5220  "%s AS initaggacl, "
5221  "%s AS initraggacl "
5222  "FROM pg_proc p "
5223  "LEFT JOIN pg_init_privs pip ON "
5224  "(p.oid = pip.objoid "
5225  "AND pip.classoid = 'pg_proc'::regclass "
5226  "AND pip.objsubid = 0) "
5227  "WHERE p.proisagg AND ("
5228  "p.pronamespace != "
5229  "(SELECT oid FROM pg_namespace "
5230  "WHERE nspname = 'pg_catalog') OR "
5231  "p.proacl IS DISTINCT FROM pip.initprivs",
5233  acl_subquery->data,
5234  racl_subquery->data,
5235  initacl_subquery->data,
5236  initracl_subquery->data);
5237  if (dopt->binary_upgrade)
5238  appendPQExpBufferStr(query,
5239  " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
5240  "classid = 'pg_proc'::regclass AND "
5241  "objid = p.oid AND "
5242  "refclassid = 'pg_extension'::regclass AND "
5243  "deptype = 'e')");
5244  appendPQExpBufferChar(query, ')');
5245 
5246  destroyPQExpBuffer(acl_subquery);
5247  destroyPQExpBuffer(racl_subquery);
5248  destroyPQExpBuffer(initacl_subquery);
5249