PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #ifdef HAVE_TERMIOS_H
37 #include <termios.h>
38 #endif
39 
40 #include "getopt_long.h"
41 
42 #include "access/attnum.h"
43 #include "access/sysattr.h"
44 #include "access/transam.h"
45 #include "catalog/pg_am.h"
46 #include "catalog/pg_cast.h"
47 #include "catalog/pg_class.h"
48 #include "catalog/pg_default_acl.h"
49 #include "catalog/pg_largeobject.h"
51 #include "catalog/pg_proc.h"
52 #include "catalog/pg_trigger.h"
53 #include "catalog/pg_type.h"
54 #include "libpq/libpq-fs.h"
55 
56 #include "dumputils.h"
57 #include "parallel.h"
58 #include "pg_backup_db.h"
59 #include "pg_backup_utils.h"
60 #include "pg_dump.h"
61 #include "fe_utils/string_utils.h"
62 
63 
64 typedef struct
65 {
66  const char *descr; /* comment for an object */
67  Oid classoid; /* object class (catalog OID) */
68  Oid objoid; /* object OID */
69  int objsubid; /* subobject (table column #) */
70 } CommentItem;
71 
72 typedef struct
73 {
74  const char *provider; /* label provider of this security label */
75  const char *label; /* security label for an object */
76  Oid classoid; /* object class (catalog OID) */
77  Oid objoid; /* object OID */
78  int objsubid; /* subobject (table column #) */
79 } SecLabelItem;
80 
81 typedef enum OidOptions
82 {
84  zeroAsAny = 2,
87 } OidOptions;
88 
89 /* global decls */
90 bool g_verbose; /* User wants verbose narration of our
91  * activities. */
92 static bool dosync = true; /* Issue fsync() to make dump durable
93  * on disk. */
94 
95 /* subquery used to convert user ID (eg, datdba) to user name */
96 static const char *username_subquery;
97 
98 /*
99  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
100  * FirstNormalObjectId - 1.
101  */
102 static Oid g_last_builtin_oid; /* value of the last builtin oid */
103 
104 /* The specified names/patterns should to match at least one entity */
105 static int strict_names = 0;
106 
107 /*
108  * Object inclusion/exclusion lists
109  *
110  * The string lists record the patterns given by command-line switches,
111  * which we then convert to lists of OIDs of matching objects.
112  */
117 
124 
125 
126 char g_opaque_type[10]; /* name for the opaque type */
127 
128 /* placeholders for the delimiters for comments */
130 char g_comment_end[10];
131 
132 static const CatalogId nilCatalogId = {0, 0};
133 
134 static void help(const char *progname);
135 static void setup_connection(Archive *AH,
136  const char *dumpencoding, const char *dumpsnapshot,
137  char *use_role);
138 static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
139 static void expand_schema_name_patterns(Archive *fout,
140  SimpleStringList *patterns,
141  SimpleOidList *oids,
142  bool strict_names);
143 static void expand_table_name_patterns(Archive *fout,
144  SimpleStringList *patterns,
145  SimpleOidList *oids,
146  bool strict_names);
147 static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
148 static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
149 static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
150 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
151 static void dumpComment(Archive *fout, const char *target,
152  const char *namespace, const char *owner,
153  CatalogId catalogId, int subid, DumpId dumpId);
154 static int findComments(Archive *fout, Oid classoid, Oid objoid,
155  CommentItem **items);
156 static int collectComments(Archive *fout, CommentItem **items);
157 static void dumpSecLabel(Archive *fout, const char *target,
158  const char *namespace, const char *owner,
159  CatalogId catalogId, int subid, DumpId dumpId);
160 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
161  SecLabelItem **items);
162 static int collectSecLabels(Archive *fout, SecLabelItem **items);
163 static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
164 static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
165 static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
166 static void dumpType(Archive *fout, TypeInfo *tyinfo);
167 static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
168 static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
169 static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
170 static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
171 static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
172 static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
173 static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
174 static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
175 static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
176 static void dumpFunc(Archive *fout, FuncInfo *finfo);
177 static void dumpCast(Archive *fout, CastInfo *cast);
178 static void dumpTransform(Archive *fout, TransformInfo *transform);
179 static void dumpOpr(Archive *fout, OprInfo *oprinfo);
180 static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
181 static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
182 static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
183 static void dumpCollation(Archive *fout, CollInfo *collinfo);
184 static void dumpConversion(Archive *fout, ConvInfo *convinfo);
185 static void dumpRule(Archive *fout, RuleInfo *rinfo);
186 static void dumpAgg(Archive *fout, AggInfo *agginfo);
187 static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
188 static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
189 static void dumpTable(Archive *fout, TableInfo *tbinfo);
190 static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
191 static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
192 static void dumpSequence(Archive *fout, TableInfo *tbinfo);
193 static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
194 static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
195 static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
196 static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
197 static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
198 static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
199 static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
200 static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
201 static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
202 static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
203 static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
204 static void dumpUserMappings(Archive *fout,
205  const char *servername, const char *namespace,
206  const char *owner, CatalogId catalogId, DumpId dumpId);
207 static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
208 
209 static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
210  const char *type, const char *name, const char *subname,
211  const char *tag, const char *nspname, const char *owner,
212  const char *acls, const char *racls,
213  const char *initacls, const char *initracls);
214 
215 static void getDependencies(Archive *fout);
216 static void BuildArchiveDependencies(Archive *fout);
218  DumpId **dependencies, int *nDeps, int *allocDeps);
219 
221 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
222  DumpableObject *boundaryObjs);
223 
224 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
225 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind);
226 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids);
227 static void buildMatViewRefreshDependencies(Archive *fout);
228 static void getTableDataFKConstraints(void);
229 static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
230  bool is_agg);
231 static char *format_function_arguments_old(Archive *fout,
232  FuncInfo *finfo, int nallargs,
233  char **allargtypes,
234  char **argmodes,
235  char **argnames);
236 static char *format_function_signature(Archive *fout,
237  FuncInfo *finfo, bool honor_quotes);
238 static char *convertRegProcReference(Archive *fout,
239  const char *proc);
240 static char *convertOperatorReference(Archive *fout, const char *opr);
241 static char *convertTSFunction(Archive *fout, Oid funcOid);
242 static Oid findLastBuiltinOid_V71(Archive *fout, const char *);
243 static void selectSourceSchema(Archive *fout, const char *schemaName);
244 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
245 static void getBlobs(Archive *fout);
246 static void dumpBlob(Archive *fout, BlobInfo *binfo);
247 static int dumpBlobs(Archive *fout, void *arg);
248 static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
249 static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
250 static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
251 static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
252 static void dumpDatabase(Archive *AH);
253 static void dumpEncoding(Archive *AH);
254 static void dumpStdStrings(Archive *AH);
256  PQExpBuffer upgrade_buffer, Oid pg_type_oid);
258  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
259 static void binary_upgrade_set_pg_class_oids(Archive *fout,
260  PQExpBuffer upgrade_buffer,
261  Oid pg_class_oid, bool is_index);
262 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
263  DumpableObject *dobj,
264  const char *objlabel);
265 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
266 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
267 static bool nonemptyReloptions(const char *reloptions);
268 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
269  const char *prefix, Archive *fout);
270 static char *get_synchronized_snapshot(Archive *fout);
271 static void setupDumpWorker(Archive *AHX);
272 
273 
274 int
275 main(int argc, char **argv)
276 {
277  int c;
278  const char *filename = NULL;
279  const char *format = "p";
280  TableInfo *tblinfo;
281  int numTables;
282  DumpableObject **dobjs;
283  int numObjs;
284  DumpableObject *boundaryObjs;
285  int i;
286  int optindex;
287  RestoreOptions *ropt;
288  Archive *fout; /* the script file */
289  const char *dumpencoding = NULL;
290  const char *dumpsnapshot = NULL;
291  char *use_role = NULL;
292  int numWorkers = 1;
293  trivalue prompt_password = TRI_DEFAULT;
294  int compressLevel = -1;
295  int plainText = 0;
296  ArchiveFormat archiveFormat = archUnknown;
297  ArchiveMode archiveMode;
298 
299  static DumpOptions dopt;
300 
301  static struct option long_options[] = {
302  {"data-only", no_argument, NULL, 'a'},
303  {"blobs", no_argument, NULL, 'b'},
304  {"no-blobs", no_argument, NULL, 'B'},
305  {"clean", no_argument, NULL, 'c'},
306  {"create", no_argument, NULL, 'C'},
307  {"dbname", required_argument, NULL, 'd'},
308  {"file", required_argument, NULL, 'f'},
309  {"format", required_argument, NULL, 'F'},
310  {"host", required_argument, NULL, 'h'},
311  {"jobs", 1, NULL, 'j'},
312  {"no-reconnect", no_argument, NULL, 'R'},
313  {"oids", no_argument, NULL, 'o'},
314  {"no-owner", no_argument, NULL, 'O'},
315  {"port", required_argument, NULL, 'p'},
316  {"schema", required_argument, NULL, 'n'},
317  {"exclude-schema", required_argument, NULL, 'N'},
318  {"schema-only", no_argument, NULL, 's'},
319  {"superuser", required_argument, NULL, 'S'},
320  {"table", required_argument, NULL, 't'},
321  {"exclude-table", required_argument, NULL, 'T'},
322  {"no-password", no_argument, NULL, 'w'},
323  {"password", no_argument, NULL, 'W'},
324  {"username", required_argument, NULL, 'U'},
325  {"verbose", no_argument, NULL, 'v'},
326  {"no-privileges", no_argument, NULL, 'x'},
327  {"no-acl", no_argument, NULL, 'x'},
328  {"compress", required_argument, NULL, 'Z'},
329  {"encoding", required_argument, NULL, 'E'},
330  {"help", no_argument, NULL, '?'},
331  {"version", no_argument, NULL, 'V'},
332 
333  /*
334  * the following options don't have an equivalent short option letter
335  */
336  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
337  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
338  {"column-inserts", no_argument, &dopt.column_inserts, 1},
339  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
340  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
341  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
342  {"exclude-table-data", required_argument, NULL, 4},
343  {"if-exists", no_argument, &dopt.if_exists, 1},
344  {"include-subscriptions", no_argument, &dopt.include_subscriptions, 1},
345  {"inserts", no_argument, &dopt.dump_inserts, 1},
346  {"lock-wait-timeout", required_argument, NULL, 2},
347  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
348  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
349  {"role", required_argument, NULL, 3},
350  {"section", required_argument, NULL, 5},
351  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
352  {"snapshot", required_argument, NULL, 6},
353  {"strict-names", no_argument, &strict_names, 1},
354  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
355  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
356  {"no-subscription-connect", no_argument, &dopt.no_subscription_connect, 1},
357  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
358  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
359  {"no-sync", no_argument, NULL, 7},
360 
361  {NULL, 0, NULL, 0}
362  };
363 
364  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
365 
366  /*
367  * Initialize what we need for parallel execution, especially for thread
368  * support on Windows.
369  */
371 
372  g_verbose = false;
373 
374  strcpy(g_comment_start, "-- ");
375  g_comment_end[0] = '\0';
376  strcpy(g_opaque_type, "opaque");
377 
378  progname = get_progname(argv[0]);
379 
380  if (argc > 1)
381  {
382  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
383  {
384  help(progname);
385  exit_nicely(0);
386  }
387  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
388  {
389  puts("pg_dump (PostgreSQL) " PG_VERSION);
390  exit_nicely(0);
391  }
392  }
393 
394  InitDumpOptions(&dopt);
395 
396  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:oOp:RsS:t:T:U:vwWxZ:",
397  long_options, &optindex)) != -1)
398  {
399  switch (c)
400  {
401  case 'a': /* Dump data only */
402  dopt.dataOnly = true;
403  break;
404 
405  case 'b': /* Dump blobs */
406  dopt.outputBlobs = true;
407  break;
408 
409  case 'B': /* Don't dump blobs */
410  dopt.dontOutputBlobs = true;
411  break;
412 
413  case 'c': /* clean (i.e., drop) schema prior to create */
414  dopt.outputClean = 1;
415  break;
416 
417  case 'C': /* Create DB */
418  dopt.outputCreateDB = 1;
419  break;
420 
421  case 'd': /* database name */
422  dopt.dbname = pg_strdup(optarg);
423  break;
424 
425  case 'E': /* Dump encoding */
426  dumpencoding = pg_strdup(optarg);
427  break;
428 
429  case 'f':
430  filename = pg_strdup(optarg);
431  break;
432 
433  case 'F':
434  format = pg_strdup(optarg);
435  break;
436 
437  case 'h': /* server host */
438  dopt.pghost = pg_strdup(optarg);
439  break;
440 
441  case 'j': /* number of dump jobs */
442  numWorkers = atoi(optarg);
443  break;
444 
445  case 'n': /* include schema(s) */
446  simple_string_list_append(&schema_include_patterns, optarg);
447  dopt.include_everything = false;
448  break;
449 
450  case 'N': /* exclude schema(s) */
451  simple_string_list_append(&schema_exclude_patterns, optarg);
452  break;
453 
454  case 'o': /* Dump oids */
455  dopt.oids = true;
456  break;
457 
458  case 'O': /* Don't reconnect to match owner */
459  dopt.outputNoOwner = 1;
460  break;
461 
462  case 'p': /* server port */
463  dopt.pgport = pg_strdup(optarg);
464  break;
465 
466  case 'R':
467  /* no-op, still accepted for backwards compatibility */
468  break;
469 
470  case 's': /* dump schema only */
471  dopt.schemaOnly = true;
472  break;
473 
474  case 'S': /* Username for superuser in plain text output */
476  break;
477 
478  case 't': /* include table(s) */
479  simple_string_list_append(&table_include_patterns, optarg);
480  dopt.include_everything = false;
481  break;
482 
483  case 'T': /* exclude table(s) */
484  simple_string_list_append(&table_exclude_patterns, optarg);
485  break;
486 
487  case 'U':
488  dopt.username = pg_strdup(optarg);
489  break;
490 
491  case 'v': /* verbose */
492  g_verbose = true;
493  break;
494 
495  case 'w':
496  prompt_password = TRI_NO;
497  break;
498 
499  case 'W':
500  prompt_password = TRI_YES;
501  break;
502 
503  case 'x': /* skip ACL dump */
504  dopt.aclsSkip = true;
505  break;
506 
507  case 'Z': /* Compression Level */
508  compressLevel = atoi(optarg);
509  if (compressLevel < 0 || compressLevel > 9)
510  {
511  write_msg(NULL, "compression level must be in range 0..9\n");
512  exit_nicely(1);
513  }
514  break;
515 
516  case 0:
517  /* This covers the long options. */
518  break;
519 
520  case 2: /* lock-wait-timeout */
522  break;
523 
524  case 3: /* SET ROLE */
525  use_role = pg_strdup(optarg);
526  break;
527 
528  case 4: /* exclude table(s) data */
529  simple_string_list_append(&tabledata_exclude_patterns, optarg);
530  break;
531 
532  case 5: /* section */
534  break;
535 
536  case 6: /* snapshot */
537  dumpsnapshot = pg_strdup(optarg);
538  break;
539 
540  case 7: /* no-sync */
541  dosync = false;
542  break;
543 
544  default:
545  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
546  exit_nicely(1);
547  }
548  }
549 
550  /*
551  * Non-option argument specifies database name as long as it wasn't
552  * already specified with -d / --dbname
553  */
554  if (optind < argc && dopt.dbname == NULL)
555  dopt.dbname = argv[optind++];
556 
557  /* Complain if any arguments remain */
558  if (optind < argc)
559  {
560  fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
561  progname, argv[optind]);
562  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
563  progname);
564  exit_nicely(1);
565  }
566 
567  /* --column-inserts implies --inserts */
568  if (dopt.column_inserts)
569  dopt.dump_inserts = 1;
570 
571  /*
572  * Binary upgrade mode implies dumping sequence data even in schema-only
573  * mode. This is not exposed as a separate option, but kept separate
574  * internally for clarity.
575  */
576  if (dopt.binary_upgrade)
577  dopt.sequence_data = 1;
578 
579  if (dopt.dataOnly && dopt.schemaOnly)
580  {
581  write_msg(NULL, "options -s/--schema-only and -a/--data-only cannot be used together\n");
582  exit_nicely(1);
583  }
584 
585  if (dopt.dataOnly && dopt.outputClean)
586  {
587  write_msg(NULL, "options -c/--clean and -a/--data-only cannot be used together\n");
588  exit_nicely(1);
589  }
590 
591  if (dopt.dump_inserts && dopt.oids)
592  {
593  write_msg(NULL, "options --inserts/--column-inserts and -o/--oids cannot be used together\n");
594  write_msg(NULL, "(The INSERT command cannot set OIDs.)\n");
595  exit_nicely(1);
596  }
597 
598  if (dopt.if_exists && !dopt.outputClean)
599  exit_horribly(NULL, "option --if-exists requires option -c/--clean\n");
600 
601  /* Identify archive format to emit */
602  archiveFormat = parseArchiveFormat(format, &archiveMode);
603 
604  /* archiveFormat specific setup */
605  if (archiveFormat == archNull)
606  plainText = 1;
607 
608  /* Custom and directory formats are compressed by default, others not */
609  if (compressLevel == -1)
610  {
611 #ifdef HAVE_LIBZ
612  if (archiveFormat == archCustom || archiveFormat == archDirectory)
613  compressLevel = Z_DEFAULT_COMPRESSION;
614  else
615 #endif
616  compressLevel = 0;
617  }
618 
619 #ifndef HAVE_LIBZ
620  if (compressLevel != 0)
621  write_msg(NULL, "WARNING: requested compression not available in this "
622  "installation -- archive will be uncompressed\n");
623  compressLevel = 0;
624 #endif
625 
626  /*
627  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
628  * parallel jobs because that's the maximum limit for the
629  * WaitForMultipleObjects() call.
630  */
631  if (numWorkers <= 0
632 #ifdef WIN32
633  || numWorkers > MAXIMUM_WAIT_OBJECTS
634 #endif
635  )
636  exit_horribly(NULL, "invalid number of parallel jobs\n");
637 
638  /* Parallel backup only in the directory archive format so far */
639  if (archiveFormat != archDirectory && numWorkers > 1)
640  exit_horribly(NULL, "parallel backup only supported by the directory format\n");
641 
642  /* Open the output file */
643  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
644  archiveMode, setupDumpWorker);
645 
646  /* Make dump options accessible right away */
647  SetArchiveOptions(fout, &dopt, NULL);
648 
649  /* Register the cleanup hook */
650  on_exit_close_archive(fout);
651 
652  /* Let the archiver know how noisy to be */
653  fout->verbose = g_verbose;
654 
655  /*
656  * We allow the server to be back to 8.0, and up to any minor release of
657  * our own major version. (See also version check in pg_dumpall.c.)
658  */
659  fout->minRemoteVersion = 80000;
660  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
661 
662  fout->numWorkers = numWorkers;
663 
664  /*
665  * Open the database using the Archiver, so it knows about it. Errors mean
666  * death.
667  */
668  ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
669  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
670 
671  /*
672  * Disable security label support if server version < v9.1.x (prevents
673  * access to nonexistent pg_seclabel catalog)
674  */
675  if (fout->remoteVersion < 90100)
676  dopt.no_security_labels = 1;
677 
678  /*
679  * On hot standby slaves, never try to dump unlogged table data, since it
680  * will just throw an error.
681  */
682  if (fout->isStandby)
683  dopt.no_unlogged_table_data = true;
684 
685  /* Select the appropriate subquery to convert user IDs to names */
686  if (fout->remoteVersion >= 80100)
687  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
688  else
689  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
690 
691  /* check the version for the synchronized snapshots feature */
692  if (numWorkers > 1 && fout->remoteVersion < 90200
693  && !dopt.no_synchronized_snapshots)
695  "Synchronized snapshots are not supported by this server version.\n"
696  "Run with --no-synchronized-snapshots instead if you do not need\n"
697  "synchronized snapshots.\n");
698 
699  /* check the version when a snapshot is explicitly specified by user */
700  if (dumpsnapshot && fout->remoteVersion < 90200)
702  "Exported snapshots are not supported by this server version.\n");
703 
704  /*
705  * Find the last built-in OID, if needed (prior to 8.1)
706  *
707  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
708  */
709  if (fout->remoteVersion < 80100)
711  PQdb(GetConnection(fout)));
712  else
714 
715  if (g_verbose)
716  write_msg(NULL, "last built-in OID is %u\n", g_last_builtin_oid);
717 
718  /* Expand schema selection patterns into OID lists */
719  if (schema_include_patterns.head != NULL)
720  {
721  expand_schema_name_patterns(fout, &schema_include_patterns,
722  &schema_include_oids,
723  strict_names);
724  if (schema_include_oids.head == NULL)
725  exit_horribly(NULL, "no matching schemas were found\n");
726  }
727  expand_schema_name_patterns(fout, &schema_exclude_patterns,
728  &schema_exclude_oids,
729  false);
730  /* non-matching exclusion patterns aren't an error */
731 
732  /* Expand table selection patterns into OID lists */
733  if (table_include_patterns.head != NULL)
734  {
735  expand_table_name_patterns(fout, &table_include_patterns,
736  &table_include_oids,
737  strict_names);
738  if (table_include_oids.head == NULL)
739  exit_horribly(NULL, "no matching tables were found\n");
740  }
741  expand_table_name_patterns(fout, &table_exclude_patterns,
742  &table_exclude_oids,
743  false);
744 
745  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
746  &tabledata_exclude_oids,
747  false);
748 
749  /* non-matching exclusion patterns aren't an error */
750 
751  /*
752  * Dumping blobs is the default for dumps where an inclusion switch is not
753  * used (an "include everything" dump). -B can be used to exclude blobs
754  * from those dumps. -b can be used to include blobs even when an
755  * inclusion switch is used.
756  *
757  * -s means "schema only" and blobs are data, not schema, so we never
758  * include blobs when -s is used.
759  */
760  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
761  dopt.outputBlobs = true;
762 
763  /*
764  * Now scan the database and create DumpableObject structs for all the
765  * objects we intend to dump.
766  */
767  tblinfo = getSchemaData(fout, &numTables);
768 
769  if (fout->remoteVersion < 80400)
770  guessConstraintInheritance(tblinfo, numTables);
771 
772  if (!dopt.schemaOnly)
773  {
774  getTableData(&dopt, tblinfo, numTables, dopt.oids, 0);
776  if (dopt.dataOnly)
778  }
779 
780  if (dopt.schemaOnly && dopt.sequence_data)
781  getTableData(&dopt, tblinfo, numTables, dopt.oids, RELKIND_SEQUENCE);
782 
783  /*
784  * In binary-upgrade mode, we do not have to worry about the actual blob
785  * data or the associated metadata that resides in the pg_largeobject and
786  * pg_largeobject_metadata tables, respectivly.
787  *
788  * However, we do need to collect blob information as there may be
789  * comments or other information on blobs that we do need to dump out.
790  */
791  if (dopt.outputBlobs || dopt.binary_upgrade)
792  getBlobs(fout);
793 
794  /*
795  * Collect dependency data to assist in ordering the objects.
796  */
797  getDependencies(fout);
798 
799  /* Lastly, create dummy objects to represent the section boundaries */
800  boundaryObjs = createBoundaryObjects();
801 
802  /* Get pointers to all the known DumpableObjects */
803  getDumpableObjects(&dobjs, &numObjs);
804 
805  /*
806  * Add dummy dependencies to enforce the dump section ordering.
807  */
808  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
809 
810  /*
811  * Sort the objects into a safe dump order (no forward references).
812  *
813  * We rely on dependency information to help us determine a safe order, so
814  * the initial sort is mostly for cosmetic purposes: we sort by name to
815  * ensure that logically identical schemas will dump identically.
816  */
817  sortDumpableObjectsByTypeName(dobjs, numObjs);
818 
819  /* If we do a parallel dump, we want the largest tables to go first */
820  if (archiveFormat == archDirectory && numWorkers > 1)
821  sortDataAndIndexObjectsBySize(dobjs, numObjs);
822 
823  sortDumpableObjects(dobjs, numObjs,
824  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
825 
826  /*
827  * Create archive TOC entries for all the objects to be dumped, in a safe
828  * order.
829  */
830 
831  /* First the special ENCODING and STDSTRINGS entries. */
832  dumpEncoding(fout);
833  dumpStdStrings(fout);
834 
835  /* The database item is always next, unless we don't want it at all */
836  if (dopt.include_everything && !dopt.dataOnly)
837  dumpDatabase(fout);
838 
839  /* Now the rearrangeable objects. */
840  for (i = 0; i < numObjs; i++)
841  dumpDumpableObject(fout, dobjs[i]);
842 
843  /*
844  * Set up options info to ensure we dump what we want.
845  */
846  ropt = NewRestoreOptions();
847  ropt->filename = filename;
848 
849  /* if you change this list, see dumpOptionsFromRestoreOptions */
850  ropt->dropSchema = dopt.outputClean;
851  ropt->dataOnly = dopt.dataOnly;
852  ropt->schemaOnly = dopt.schemaOnly;
853  ropt->if_exists = dopt.if_exists;
854  ropt->column_inserts = dopt.column_inserts;
855  ropt->dumpSections = dopt.dumpSections;
856  ropt->aclsSkip = dopt.aclsSkip;
857  ropt->superuser = dopt.outputSuperuser;
858  ropt->createDB = dopt.outputCreateDB;
859  ropt->noOwner = dopt.outputNoOwner;
860  ropt->noTablespace = dopt.outputNoTablespaces;
861  ropt->disable_triggers = dopt.disable_triggers;
862  ropt->use_setsessauth = dopt.use_setsessauth;
864  ropt->dump_inserts = dopt.dump_inserts;
866  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
869  ropt->sequence_data = dopt.sequence_data;
871  ropt->binary_upgrade = dopt.binary_upgrade;
872 
873  if (compressLevel == -1)
874  ropt->compression = 0;
875  else
876  ropt->compression = compressLevel;
877 
878  ropt->suppressDumpWarnings = true; /* We've already shown them */
879 
880  SetArchiveOptions(fout, &dopt, ropt);
881 
882  /* Mark which entries should be output */
884 
885  /*
886  * The archive's TOC entries are now marked as to which ones will actually
887  * be output, so we can set up their dependency lists properly. This isn't
888  * necessary for plain-text output, though.
889  */
890  if (!plainText)
892 
893  /*
894  * And finally we can do the actual output.
895  *
896  * Note: for non-plain-text output formats, the output file is written
897  * inside CloseArchive(). This is, um, bizarre; but not worth changing
898  * right now.
899  */
900  if (plainText)
901  RestoreArchive(fout);
902 
903  CloseArchive(fout);
904 
905  exit_nicely(0);
906 }
907 
908 
909 static void
910 help(const char *progname)
911 {
912  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
913  printf(_("Usage:\n"));
914  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
915 
916  printf(_("\nGeneral options:\n"));
917  printf(_(" -f, --file=FILENAME output file or directory name\n"));
918  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
919  " plain text (default))\n"));
920  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
921  printf(_(" -v, --verbose verbose mode\n"));
922  printf(_(" -V, --version output version information, then exit\n"));
923  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
924  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
925  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
926  printf(_(" -?, --help show this help, then exit\n"));
927 
928  printf(_("\nOptions controlling the output content:\n"));
929  printf(_(" -a, --data-only dump only the data, not the schema\n"));
930  printf(_(" -b, --blobs include large objects in dump\n"));
931  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
932  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
933  printf(_(" -C, --create include commands to create database in dump\n"));
934  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
935  printf(_(" -n, --schema=SCHEMA dump the named schema(s) only\n"));
936  printf(_(" -N, --exclude-schema=SCHEMA do NOT dump the named schema(s)\n"));
937  printf(_(" -o, --oids include OIDs in dump\n"));
938  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
939  " plain-text format\n"));
940  printf(_(" -s, --schema-only dump only the schema, no data\n"));
941  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
942  printf(_(" -t, --table=TABLE dump the named table(s) only\n"));
943  printf(_(" -T, --exclude-table=TABLE do NOT dump the named table(s)\n"));
944  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
945  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
946  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
947  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
948  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
949  printf(_(" --enable-row-security enable row security (dump only content user has\n"
950  " access to)\n"));
951  printf(_(" --exclude-table-data=TABLE do NOT dump data for the named table(s)\n"));
952  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
953  printf(_(" --include-subscriptions dump logical replication subscriptions\n"));
954  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
955  printf(_(" --no-security-labels do not dump security label assignments\n"));
956  printf(_(" --no-subscription-connect dump subscriptions so they don't connect on restore\n"));
957  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
958  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
959  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
960  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
961  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
962  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
963  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
964  printf(_(" --strict-names require table and/or schema include patterns to\n"
965  " match at least one entity each\n"));
966  printf(_(" --use-set-session-authorization\n"
967  " use SET SESSION AUTHORIZATION commands instead of\n"
968  " ALTER OWNER commands to set ownership\n"));
969 
970  printf(_("\nConnection options:\n"));
971  printf(_(" -d, --dbname=DBNAME database to dump\n"));
972  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
973  printf(_(" -p, --port=PORT database server port number\n"));
974  printf(_(" -U, --username=NAME connect as specified database user\n"));
975  printf(_(" -w, --no-password never prompt for password\n"));
976  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
977  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
978 
979  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
980  "variable value is used.\n\n"));
981  printf(_("Report bugs to <pgsql-bugs@postgresql.org>.\n"));
982 }
983 
984 static void
985 setup_connection(Archive *AH, const char *dumpencoding,
986  const char *dumpsnapshot, char *use_role)
987 {
988  DumpOptions *dopt = AH->dopt;
989  PGconn *conn = GetConnection(AH);
990  const char *std_strings;
991 
992  /*
993  * Set the client encoding if requested.
994  */
995  if (dumpencoding)
996  {
997  if (PQsetClientEncoding(conn, dumpencoding) < 0)
998  exit_horribly(NULL, "invalid client encoding \"%s\" specified\n",
999  dumpencoding);
1000  }
1001 
1002  /*
1003  * Get the active encoding and the standard_conforming_strings setting, so
1004  * we know how to escape strings.
1005  */
1006  AH->encoding = PQclientEncoding(conn);
1007 
1008  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1009  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1010 
1011  /*
1012  * Set the role if requested. In a parallel dump worker, we'll be passed
1013  * use_role == NULL, but AH->use_role is already set (if user specified it
1014  * originally) and we should use that.
1015  */
1016  if (!use_role && AH->use_role)
1017  use_role = AH->use_role;
1018 
1019  /* Set the role if requested */
1020  if (use_role && AH->remoteVersion >= 80100)
1021  {
1022  PQExpBuffer query = createPQExpBuffer();
1023 
1024  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1025  ExecuteSqlStatement(AH, query->data);
1026  destroyPQExpBuffer(query);
1027 
1028  /* save it for possible later use by parallel workers */
1029  if (!AH->use_role)
1030  AH->use_role = pg_strdup(use_role);
1031  }
1032 
1033  /* Set the datestyle to ISO to ensure the dump's portability */
1034  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1035 
1036  /* Likewise, avoid using sql_standard intervalstyle */
1037  if (AH->remoteVersion >= 80400)
1038  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1039 
1040  /*
1041  * Set extra_float_digits so that we can dump float data exactly (given
1042  * correctly implemented float I/O code, anyway)
1043  */
1044  if (AH->remoteVersion >= 90000)
1045  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1046  else
1047  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1048 
1049  /*
1050  * If synchronized scanning is supported, disable it, to prevent
1051  * unpredictable changes in row ordering across a dump and reload.
1052  */
1053  if (AH->remoteVersion >= 80300)
1054  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1055 
1056  /*
1057  * Disable timeouts if supported.
1058  */
1059  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1060  if (AH->remoteVersion >= 90300)
1061  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1062  if (AH->remoteVersion >= 90600)
1063  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1064 
1065  /*
1066  * Quote all identifiers, if requested.
1067  */
1068  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1069  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1070 
1071  /*
1072  * Adjust row-security mode, if supported.
1073  */
1074  if (AH->remoteVersion >= 90500)
1075  {
1076  if (dopt->enable_row_security)
1077  ExecuteSqlStatement(AH, "SET row_security = on");
1078  else
1079  ExecuteSqlStatement(AH, "SET row_security = off");
1080  }
1081 
1082  /*
1083  * Start transaction-snapshot mode transaction to dump consistent data.
1084  */
1085  ExecuteSqlStatement(AH, "BEGIN");
1086  if (AH->remoteVersion >= 90100)
1087  {
1088  /*
1089  * To support the combination of serializable_deferrable with the jobs
1090  * option we use REPEATABLE READ for the worker connections that are
1091  * passed a snapshot. As long as the snapshot is acquired in a
1092  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1093  * REPEATABLE READ transaction provides the appropriate integrity
1094  * guarantees. This is a kluge, but safe for back-patching.
1095  */
1096  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1098  "SET TRANSACTION ISOLATION LEVEL "
1099  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1100  else
1102  "SET TRANSACTION ISOLATION LEVEL "
1103  "REPEATABLE READ, READ ONLY");
1104  }
1105  else
1106  {
1108  "SET TRANSACTION ISOLATION LEVEL "
1109  "SERIALIZABLE, READ ONLY");
1110  }
1111 
1112  /*
1113  * If user specified a snapshot to use, select that. In a parallel dump
1114  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1115  * is already set (if the server can handle it) and we should use that.
1116  */
1117  if (dumpsnapshot)
1118  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1119 
1120  if (AH->sync_snapshot_id)
1121  {
1122  PQExpBuffer query = createPQExpBuffer();
1123 
1124  appendPQExpBuffer(query, "SET TRANSACTION SNAPSHOT ");
1125  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1126  ExecuteSqlStatement(AH, query->data);
1127  destroyPQExpBuffer(query);
1128  }
1129  else if (AH->numWorkers > 1 &&
1130  AH->remoteVersion >= 90200 &&
1132  {
1133  if (AH->isStandby)
1135  "Synchronized snapshots are not supported on standby servers.\n"
1136  "Run with --no-synchronized-snapshots instead if you do not need\n"
1137  "synchronized snapshots.\n");
1138 
1139 
1141  }
1142 }
1143 
1144 /* Set up connection for a parallel worker process */
1145 static void
1147 {
1148  /*
1149  * We want to re-select all the same values the master connection is
1150  * using. We'll have inherited directly-usable values in
1151  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1152  * inherited encoding value back to a string to pass to setup_connection.
1153  */
1154  setup_connection(AH,
1156  NULL,
1157  NULL);
1158 }
1159 
1160 static char *
1162 {
1163  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1164  char *result;
1165  PGresult *res;
1166 
1167  res = ExecuteSqlQueryForSingleRow(fout, query);
1168  result = pg_strdup(PQgetvalue(res, 0, 0));
1169  PQclear(res);
1170 
1171  return result;
1172 }
1173 
1174 static ArchiveFormat
1176 {
1177  ArchiveFormat archiveFormat;
1178 
1179  *mode = archModeWrite;
1180 
1181  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1182  {
1183  /* This is used by pg_dumpall, and is not documented */
1184  archiveFormat = archNull;
1185  *mode = archModeAppend;
1186  }
1187  else if (pg_strcasecmp(format, "c") == 0)
1188  archiveFormat = archCustom;
1189  else if (pg_strcasecmp(format, "custom") == 0)
1190  archiveFormat = archCustom;
1191  else if (pg_strcasecmp(format, "d") == 0)
1192  archiveFormat = archDirectory;
1193  else if (pg_strcasecmp(format, "directory") == 0)
1194  archiveFormat = archDirectory;
1195  else if (pg_strcasecmp(format, "p") == 0)
1196  archiveFormat = archNull;
1197  else if (pg_strcasecmp(format, "plain") == 0)
1198  archiveFormat = archNull;
1199  else if (pg_strcasecmp(format, "t") == 0)
1200  archiveFormat = archTar;
1201  else if (pg_strcasecmp(format, "tar") == 0)
1202  archiveFormat = archTar;
1203  else
1204  exit_horribly(NULL, "invalid output format \"%s\" specified\n", format);
1205  return archiveFormat;
1206 }
1207 
1208 /*
1209  * Find the OIDs of all schemas matching the given list of patterns,
1210  * and append them to the given OID list.
1211  */
1212 static void
1214  SimpleStringList *patterns,
1215  SimpleOidList *oids,
1216  bool strict_names)
1217 {
1218  PQExpBuffer query;
1219  PGresult *res;
1220  SimpleStringListCell *cell;
1221  int i;
1222 
1223  if (patterns->head == NULL)
1224  return; /* nothing to do */
1225 
1226  query = createPQExpBuffer();
1227 
1228  /*
1229  * The loop below runs multiple SELECTs might sometimes result in
1230  * duplicate entries in the OID list, but we don't care.
1231  */
1232 
1233  for (cell = patterns->head; cell; cell = cell->next)
1234  {
1235  appendPQExpBuffer(query,
1236  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1237  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1238  false, NULL, "n.nspname", NULL, NULL);
1239 
1240  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1241  if (strict_names && PQntuples(res) == 0)
1242  exit_horribly(NULL, "no matching schemas were found for pattern \"%s\"\n", cell->val);
1243 
1244  for (i = 0; i < PQntuples(res); i++)
1245  {
1246  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1247  }
1248 
1249  PQclear(res);
1250  resetPQExpBuffer(query);
1251  }
1252 
1253  destroyPQExpBuffer(query);
1254 }
1255 
1256 /*
1257  * Find the OIDs of all tables matching the given list of patterns,
1258  * and append them to the given OID list.
1259  */
1260 static void
1262  SimpleStringList *patterns, SimpleOidList *oids,
1263  bool strict_names)
1264 {
1265  PQExpBuffer query;
1266  PGresult *res;
1267  SimpleStringListCell *cell;
1268  int i;
1269 
1270  if (patterns->head == NULL)
1271  return; /* nothing to do */
1272 
1273  query = createPQExpBuffer();
1274 
1275  /*
1276  * this might sometimes result in duplicate entries in the OID list, but
1277  * we don't care.
1278  */
1279 
1280  for (cell = patterns->head; cell; cell = cell->next)
1281  {
1282  appendPQExpBuffer(query,
1283  "SELECT c.oid"
1284  "\nFROM pg_catalog.pg_class c"
1285  "\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace"
1286  "\nWHERE c.relkind in ('%c', '%c', '%c', '%c', '%c', '%c')\n",
1290  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1291  false, "n.nspname", "c.relname", NULL,
1292  "pg_catalog.pg_table_is_visible(c.oid)");
1293 
1294  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1295  if (strict_names && PQntuples(res) == 0)
1296  exit_horribly(NULL, "no matching tables were found for pattern \"%s\"\n", cell->val);
1297 
1298  for (i = 0; i < PQntuples(res); i++)
1299  {
1300  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1301  }
1302 
1303  PQclear(res);
1304  resetPQExpBuffer(query);
1305  }
1306 
1307  destroyPQExpBuffer(query);
1308 }
1309 
1310 /*
1311  * checkExtensionMembership
1312  * Determine whether object is an extension member, and if so,
1313  * record an appropriate dependency and set the object's dump flag.
1314  *
1315  * It's important to call this for each object that could be an extension
1316  * member. Generally, we integrate this with determining the object's
1317  * to-be-dumped-ness, since extension membership overrides other rules for that.
1318  *
1319  * Returns true if object is an extension member, else false.
1320  */
1321 static bool
1323 {
1324  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1325 
1326  if (ext == NULL)
1327  return false;
1328 
1329  dobj->ext_member = true;
1330 
1331  /* Record dependency so that getDependencies needn't deal with that */
1332  addObjectDependency(dobj, ext->dobj.dumpId);
1333 
1334  /*
1335  * In 9.6 and above, mark the member object to have any non-initial ACL,
1336  * policies, and security labels dumped.
1337  *
1338  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1339  * extract the information about the object. We don't provide support for
1340  * initial policies and security labels and it seems unlikely for those to
1341  * ever exist, but we may have to revisit this later.
1342  *
1343  * Prior to 9.6, we do not include any extension member components.
1344  *
1345  * In binary upgrades, we still dump all components of the members
1346  * individually, since the idea is to exactly reproduce the database
1347  * contents rather than replace the extension contents with something
1348  * different.
1349  */
1350  if (fout->dopt->binary_upgrade)
1351  dobj->dump = ext->dobj.dump;
1352  else
1353  {
1354  if (fout->remoteVersion < 90600)
1355  dobj->dump = DUMP_COMPONENT_NONE;
1356  else
1357  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1360  }
1361 
1362  return true;
1363 }
1364 
1365 /*
1366  * selectDumpableNamespace: policy-setting subroutine
1367  * Mark a namespace as to be dumped or not
1368  */
1369 static void
1371 {
1372  /*
1373  * If specific tables are being dumped, do not dump any complete
1374  * namespaces. If specific namespaces are being dumped, dump just those
1375  * namespaces. Otherwise, dump all non-system namespaces.
1376  */
1377  if (table_include_oids.head != NULL)
1378  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1379  else if (schema_include_oids.head != NULL)
1380  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1381  simple_oid_list_member(&schema_include_oids,
1382  nsinfo->dobj.catId.oid) ?
1384  else if (fout->remoteVersion >= 90600 &&
1385  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1386  {
1387  /*
1388  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1389  * they are interesting (and not the original ACLs which were set at
1390  * initdb time, see pg_init_privs).
1391  */
1392  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1393  }
1394  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1395  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1396  {
1397  /* Other system schemas don't get dumped */
1398  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1399  }
1400  else
1401  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1402 
1403  /*
1404  * In any case, a namespace can be excluded by an exclusion switch
1405  */
1406  if (nsinfo->dobj.dump_contains &&
1407  simple_oid_list_member(&schema_exclude_oids,
1408  nsinfo->dobj.catId.oid))
1409  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1410 
1411  /*
1412  * If the schema belongs to an extension, allow extension membership to
1413  * override the dump decision for the schema itself. However, this does
1414  * not change dump_contains, so this won't change what we do with objects
1415  * within the schema. (If they belong to the extension, they'll get
1416  * suppressed by it, otherwise not.)
1417  */
1418  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1419 }
1420 
1421 /*
1422  * selectDumpableTable: policy-setting subroutine
1423  * Mark a table as to be dumped or not
1424  */
1425 static void
1427 {
1428  if (checkExtensionMembership(&tbinfo->dobj, fout))
1429  return; /* extension membership overrides all else */
1430 
1431  /*
1432  * If specific tables are being dumped, dump just those tables; else, dump
1433  * according to the parent namespace's dump flag.
1434  */
1435  if (table_include_oids.head != NULL)
1436  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1437  tbinfo->dobj.catId.oid) ?
1439  else
1440  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1441 
1442  /*
1443  * In any case, a table can be excluded by an exclusion switch
1444  */
1445  if (tbinfo->dobj.dump &&
1446  simple_oid_list_member(&table_exclude_oids,
1447  tbinfo->dobj.catId.oid))
1448  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1449 }
1450 
1451 /*
1452  * selectDumpableType: policy-setting subroutine
1453  * Mark a type as to be dumped or not
1454  *
1455  * If it's a table's rowtype or an autogenerated array type, we also apply a
1456  * special type code to facilitate sorting into the desired order. (We don't
1457  * want to consider those to be ordinary types because that would bring tables
1458  * up into the datatype part of the dump order.) We still set the object's
1459  * dump flag; that's not going to cause the dummy type to be dumped, but we
1460  * need it so that casts involving such types will be dumped correctly -- see
1461  * dumpCast. This means the flag should be set the same as for the underlying
1462  * object (the table or base type).
1463  */
1464 static void
1466 {
1467  /* skip complex types, except for standalone composite types */
1468  if (OidIsValid(tyinfo->typrelid) &&
1470  {
1471  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1472 
1473  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1474  if (tytable != NULL)
1475  tyinfo->dobj.dump = tytable->dobj.dump;
1476  else
1477  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1478  return;
1479  }
1480 
1481  /* skip auto-generated array types */
1482  if (tyinfo->isArray)
1483  {
1484  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1485 
1486  /*
1487  * Fall through to set the dump flag; we assume that the subsequent
1488  * rules will do the same thing as they would for the array's base
1489  * type. (We cannot reliably look up the base type here, since
1490  * getTypes may not have processed it yet.)
1491  */
1492  }
1493 
1494  if (checkExtensionMembership(&tyinfo->dobj, fout))
1495  return; /* extension membership overrides all else */
1496 
1497  /* Dump based on if the contents of the namespace are being dumped */
1498  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1499 }
1500 
1501 /*
1502  * selectDumpableDefaultACL: policy-setting subroutine
1503  * Mark a default ACL as to be dumped or not
1504  *
1505  * For per-schema default ACLs, dump if the schema is to be dumped.
1506  * Otherwise dump if we are dumping "everything". Note that dataOnly
1507  * and aclsSkip are checked separately.
1508  */
1509 static void
1511 {
1512  /* Default ACLs can't be extension members */
1513 
1514  if (dinfo->dobj.namespace)
1515  /* default ACLs are considered part of the namespace */
1516  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1517  else
1518  dinfo->dobj.dump = dopt->include_everything ?
1520 }
1521 
1522 /*
1523  * selectDumpableCast: policy-setting subroutine
1524  * Mark a cast as to be dumped or not
1525  *
1526  * Casts do not belong to any particular namespace (since they haven't got
1527  * names), nor do they have identifiable owners. To distinguish user-defined
1528  * casts from built-in ones, we must resort to checking whether the cast's
1529  * OID is in the range reserved for initdb.
1530  */
1531 static void
1533 {
1534  if (checkExtensionMembership(&cast->dobj, fout))
1535  return; /* extension membership overrides all else */
1536 
1537  /*
1538  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1539  * support ACLs currently.
1540  */
1541  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1542  cast->dobj.dump = DUMP_COMPONENT_NONE;
1543  else
1544  cast->dobj.dump = fout->dopt->include_everything ?
1546 }
1547 
1548 /*
1549  * selectDumpableProcLang: policy-setting subroutine
1550  * Mark a procedural language as to be dumped or not
1551  *
1552  * Procedural languages do not belong to any particular namespace. To
1553  * identify built-in languages, we must resort to checking whether the
1554  * language's OID is in the range reserved for initdb.
1555  */
1556 static void
1558 {
1559  if (checkExtensionMembership(&plang->dobj, fout))
1560  return; /* extension membership overrides all else */
1561 
1562  /*
1563  * Only include procedural languages when we are dumping everything.
1564  *
1565  * For from-initdb procedural languages, only include ACLs, as we do for
1566  * the pg_catalog namespace. We need this because procedural languages do
1567  * not live in any namespace.
1568  */
1569  if (!fout->dopt->include_everything)
1570  plang->dobj.dump = DUMP_COMPONENT_NONE;
1571  else
1572  {
1573  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1574  plang->dobj.dump = fout->remoteVersion < 90600 ?
1576  else
1577  plang->dobj.dump = DUMP_COMPONENT_ALL;
1578  }
1579 }
1580 
1581 /*
1582  * selectDumpableAccessMethod: policy-setting subroutine
1583  * Mark an access method as to be dumped or not
1584  *
1585  * Access methods do not belong to any particular namespace. To identify
1586  * built-in access methods, we must resort to checking whether the
1587  * method's OID is in the range reserved for initdb.
1588  */
1589 static void
1591 {
1592  if (checkExtensionMembership(&method->dobj, fout))
1593  return; /* extension membership overrides all else */
1594 
1595  /*
1596  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1597  * they do not support ACLs currently.
1598  */
1599  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1600  method->dobj.dump = DUMP_COMPONENT_NONE;
1601  else
1602  method->dobj.dump = fout->dopt->include_everything ?
1604 }
1605 
1606 /*
1607  * selectDumpableExtension: policy-setting subroutine
1608  * Mark an extension as to be dumped or not
1609  *
1610  * Normally, we dump all extensions, or none of them if include_everything
1611  * is false (i.e., a --schema or --table switch was given). However, in
1612  * binary-upgrade mode it's necessary to skip built-in extensions, since we
1613  * assume those will already be installed in the target database. We identify
1614  * such extensions by their having OIDs in the range reserved for initdb.
1615  */
1616 static void
1618 {
1619  /*
1620  * Use DUMP_COMPONENT_ACL for from-initdb extensions, to allow users to
1621  * change permissions on those objects, if they wish to, and have those
1622  * changes preserved.
1623  */
1624  if (dopt->binary_upgrade && extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1625  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1626  else
1627  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1630 }
1631 
1632 /*
1633  * selectDumpablePublicationTable: policy-setting subroutine
1634  * Mark a publication table as to be dumped or not
1635  *
1636  * Publication tables have schemas, but those are ignored in decision making,
1637  * because publications are only dumped when we are dumping everything.
1638  */
1639 static void
1641 {
1642  if (checkExtensionMembership(dobj, fout))
1643  return; /* extension membership overrides all else */
1644 
1645  dobj->dump = fout->dopt->include_everything ?
1647 }
1648 
1649 /*
1650  * selectDumpableObject: policy-setting subroutine
1651  * Mark a generic dumpable object as to be dumped or not
1652  *
1653  * Use this only for object types without a special-case routine above.
1654  */
1655 static void
1657 {
1658  if (checkExtensionMembership(dobj, fout))
1659  return; /* extension membership overrides all else */
1660 
1661  /*
1662  * Default policy is to dump if parent namespace is dumpable, or for
1663  * non-namespace-associated items, dump if we're dumping "everything".
1664  */
1665  if (dobj->namespace)
1666  dobj->dump = dobj->namespace->dobj.dump_contains;
1667  else
1668  dobj->dump = fout->dopt->include_everything ?
1670 }
1671 
1672 /*
1673  * Dump a table's contents for loading using the COPY command
1674  * - this routine is called by the Archiver when it wants the table
1675  * to be dumped.
1676  */
1677 
1678 static int
1679 dumpTableData_copy(Archive *fout, void *dcontext)
1680 {
1681  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1682  TableInfo *tbinfo = tdinfo->tdtable;
1683  const char *classname = tbinfo->dobj.name;
1684  const bool hasoids = tbinfo->hasoids;
1685  const bool oids = tdinfo->oids;
1687 
1688  /*
1689  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1690  * which uses it already.
1691  */
1692  PQExpBuffer clistBuf = createPQExpBuffer();
1693  PGconn *conn = GetConnection(fout);
1694  PGresult *res;
1695  int ret;
1696  char *copybuf;
1697  const char *column_list;
1698 
1699  if (g_verbose)
1700  write_msg(NULL, "dumping contents of table \"%s.%s\"\n",
1701  tbinfo->dobj.namespace->dobj.name, classname);
1702 
1703  /*
1704  * Make sure we are in proper schema. We will qualify the table name
1705  * below anyway (in case its name conflicts with a pg_catalog table); but
1706  * this ensures reproducible results in case the table contains regproc,
1707  * regclass, etc columns.
1708  */
1709  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
1710 
1711  /*
1712  * Specify the column list explicitly so that we have no possibility of
1713  * retrieving data in the wrong column order. (The default column
1714  * ordering of COPY will not be what we want in certain corner cases
1715  * involving ADD COLUMN and inheritance.)
1716  */
1717  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1718 
1719  if (oids && hasoids)
1720  {
1721  appendPQExpBuffer(q, "COPY %s %s WITH OIDS TO stdout;",
1723  tbinfo->dobj.namespace->dobj.name,
1724  classname),
1725  column_list);
1726  }
1727  else if (tdinfo->filtercond)
1728  {
1729  /* Note: this syntax is only supported in 8.2 and up */
1730  appendPQExpBufferStr(q, "COPY (SELECT ");
1731  /* klugery to get rid of parens in column list */
1732  if (strlen(column_list) > 2)
1733  {
1734  appendPQExpBufferStr(q, column_list + 1);
1735  q->data[q->len - 1] = ' ';
1736  }
1737  else
1738  appendPQExpBufferStr(q, "* ");
1739  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1741  tbinfo->dobj.namespace->dobj.name,
1742  classname),
1743  tdinfo->filtercond);
1744  }
1745  else
1746  {
1747  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1749  tbinfo->dobj.namespace->dobj.name,
1750  classname),
1751  column_list);
1752  }
1753  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1754  PQclear(res);
1755  destroyPQExpBuffer(clistBuf);
1756 
1757  for (;;)
1758  {
1759  ret = PQgetCopyData(conn, &copybuf, 0);
1760 
1761  if (ret < 0)
1762  break; /* done or error */
1763 
1764  if (copybuf)
1765  {
1766  WriteData(fout, copybuf, ret);
1767  PQfreemem(copybuf);
1768  }
1769 
1770  /* ----------
1771  * THROTTLE:
1772  *
1773  * There was considerable discussion in late July, 2000 regarding
1774  * slowing down pg_dump when backing up large tables. Users with both
1775  * slow & fast (multi-processor) machines experienced performance
1776  * degradation when doing a backup.
1777  *
1778  * Initial attempts based on sleeping for a number of ms for each ms
1779  * of work were deemed too complex, then a simple 'sleep in each loop'
1780  * implementation was suggested. The latter failed because the loop
1781  * was too tight. Finally, the following was implemented:
1782  *
1783  * If throttle is non-zero, then
1784  * See how long since the last sleep.
1785  * Work out how long to sleep (based on ratio).
1786  * If sleep is more than 100ms, then
1787  * sleep
1788  * reset timer
1789  * EndIf
1790  * EndIf
1791  *
1792  * where the throttle value was the number of ms to sleep per ms of
1793  * work. The calculation was done in each loop.
1794  *
1795  * Most of the hard work is done in the backend, and this solution
1796  * still did not work particularly well: on slow machines, the ratio
1797  * was 50:1, and on medium paced machines, 1:1, and on fast
1798  * multi-processor machines, it had little or no effect, for reasons
1799  * that were unclear.
1800  *
1801  * Further discussion ensued, and the proposal was dropped.
1802  *
1803  * For those people who want this feature, it can be implemented using
1804  * gettimeofday in each loop, calculating the time since last sleep,
1805  * multiplying that by the sleep ratio, then if the result is more
1806  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1807  * function to sleep for a subsecond period ie.
1808  *
1809  * select(0, NULL, NULL, NULL, &tvi);
1810  *
1811  * This will return after the interval specified in the structure tvi.
1812  * Finally, call gettimeofday again to save the 'last sleep time'.
1813  * ----------
1814  */
1815  }
1816  archprintf(fout, "\\.\n\n\n");
1817 
1818  if (ret == -2)
1819  {
1820  /* copy data transfer failed */
1821  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.\n", classname);
1822  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1823  write_msg(NULL, "The command was: %s\n", q->data);
1824  exit_nicely(1);
1825  }
1826 
1827  /* Check command status and return to normal libpq state */
1828  res = PQgetResult(conn);
1829  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1830  {
1831  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetResult() failed.\n", classname);
1832  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1833  write_msg(NULL, "The command was: %s\n", q->data);
1834  exit_nicely(1);
1835  }
1836  PQclear(res);
1837 
1838  /* Do this to ensure we've pumped libpq back to idle state */
1839  if (PQgetResult(conn) != NULL)
1840  write_msg(NULL, "WARNING: unexpected extra results during COPY of table \"%s\"\n",
1841  classname);
1842 
1843  destroyPQExpBuffer(q);
1844  return 1;
1845 }
1846 
1847 /*
1848  * Dump table data using INSERT commands.
1849  *
1850  * Caution: when we restore from an archive file direct to database, the
1851  * INSERT commands emitted by this function have to be parsed by
1852  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
1853  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
1854  */
1855 static int
1856 dumpTableData_insert(Archive *fout, void *dcontext)
1857 {
1858  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1859  TableInfo *tbinfo = tdinfo->tdtable;
1860  const char *classname = tbinfo->dobj.name;
1861  DumpOptions *dopt = fout->dopt;
1863  PQExpBuffer insertStmt = NULL;
1864  PGresult *res;
1865  int tuple;
1866  int nfields;
1867  int field;
1868 
1869  /*
1870  * Make sure we are in proper schema. We will qualify the table name
1871  * below anyway (in case its name conflicts with a pg_catalog table); but
1872  * this ensures reproducible results in case the table contains regproc,
1873  * regclass, etc columns.
1874  */
1875  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
1876 
1877  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
1878  "SELECT * FROM ONLY %s",
1880  tbinfo->dobj.namespace->dobj.name,
1881  classname));
1882  if (tdinfo->filtercond)
1883  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
1884 
1885  ExecuteSqlStatement(fout, q->data);
1886 
1887  while (1)
1888  {
1889  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
1890  PGRES_TUPLES_OK);
1891  nfields = PQnfields(res);
1892  for (tuple = 0; tuple < PQntuples(res); tuple++)
1893  {
1894  /*
1895  * First time through, we build as much of the INSERT statement as
1896  * possible in "insertStmt", which we can then just print for each
1897  * line. If the table happens to have zero columns then this will
1898  * be a complete statement, otherwise it will end in "VALUES(" and
1899  * be ready to have the row's column values appended.
1900  */
1901  if (insertStmt == NULL)
1902  {
1903  insertStmt = createPQExpBuffer();
1904  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
1905  fmtId(classname));
1906 
1907  /* corner case for zero-column table */
1908  if (nfields == 0)
1909  {
1910  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
1911  }
1912  else
1913  {
1914  /* append the list of column names if required */
1915  if (dopt->column_inserts)
1916  {
1917  appendPQExpBufferChar(insertStmt, '(');
1918  for (field = 0; field < nfields; field++)
1919  {
1920  if (field > 0)
1921  appendPQExpBufferStr(insertStmt, ", ");
1922  appendPQExpBufferStr(insertStmt,
1923  fmtId(PQfname(res, field)));
1924  }
1925  appendPQExpBufferStr(insertStmt, ") ");
1926  }
1927 
1928  appendPQExpBufferStr(insertStmt, "VALUES (");
1929  }
1930  }
1931 
1932  archputs(insertStmt->data, fout);
1933 
1934  /* if it is zero-column table then we're done */
1935  if (nfields == 0)
1936  continue;
1937 
1938  for (field = 0; field < nfields; field++)
1939  {
1940  if (field > 0)
1941  archputs(", ", fout);
1942  if (PQgetisnull(res, tuple, field))
1943  {
1944  archputs("NULL", fout);
1945  continue;
1946  }
1947 
1948  /* XXX This code is partially duplicated in ruleutils.c */
1949  switch (PQftype(res, field))
1950  {
1951  case INT2OID:
1952  case INT4OID:
1953  case INT8OID:
1954  case OIDOID:
1955  case FLOAT4OID:
1956  case FLOAT8OID:
1957  case NUMERICOID:
1958  {
1959  /*
1960  * These types are printed without quotes unless
1961  * they contain values that aren't accepted by the
1962  * scanner unquoted (e.g., 'NaN'). Note that
1963  * strtod() and friends might accept NaN, so we
1964  * can't use that to test.
1965  *
1966  * In reality we only need to defend against
1967  * infinity and NaN, so we need not get too crazy
1968  * about pattern matching here.
1969  */
1970  const char *s = PQgetvalue(res, tuple, field);
1971 
1972  if (strspn(s, "0123456789 +-eE.") == strlen(s))
1973  archputs(s, fout);
1974  else
1975  archprintf(fout, "'%s'", s);
1976  }
1977  break;
1978 
1979  case BITOID:
1980  case VARBITOID:
1981  archprintf(fout, "B'%s'",
1982  PQgetvalue(res, tuple, field));
1983  break;
1984 
1985  case BOOLOID:
1986  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
1987  archputs("true", fout);
1988  else
1989  archputs("false", fout);
1990  break;
1991 
1992  default:
1993  /* All other types are printed as string literals. */
1994  resetPQExpBuffer(q);
1996  PQgetvalue(res, tuple, field),
1997  fout);
1998  archputs(q->data, fout);
1999  break;
2000  }
2001  }
2002  archputs(");\n", fout);
2003  }
2004 
2005  if (PQntuples(res) <= 0)
2006  {
2007  PQclear(res);
2008  break;
2009  }
2010  PQclear(res);
2011  }
2012 
2013  archputs("\n\n", fout);
2014 
2015  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2016 
2017  destroyPQExpBuffer(q);
2018  if (insertStmt != NULL)
2019  destroyPQExpBuffer(insertStmt);
2020 
2021  return 1;
2022 }
2023 
2024 
2025 /*
2026  * dumpTableData -
2027  * dump the contents of a single table
2028  *
2029  * Actually, this just makes an ArchiveEntry for the table contents.
2030  */
2031 static void
2033 {
2034  DumpOptions *dopt = fout->dopt;
2035  TableInfo *tbinfo = tdinfo->tdtable;
2036  PQExpBuffer copyBuf = createPQExpBuffer();
2037  PQExpBuffer clistBuf = createPQExpBuffer();
2038  DataDumperPtr dumpFn;
2039  char *copyStmt;
2040 
2041  if (!dopt->dump_inserts)
2042  {
2043  /* Dump/restore using COPY */
2044  dumpFn = dumpTableData_copy;
2045  /* must use 2 steps here 'cause fmtId is nonreentrant */
2046  appendPQExpBuffer(copyBuf, "COPY %s ",
2047  fmtId(tbinfo->dobj.name));
2048  appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n",
2049  fmtCopyColumnList(tbinfo, clistBuf),
2050  (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
2051  copyStmt = copyBuf->data;
2052  }
2053  else
2054  {
2055  /* Restore using INSERT */
2056  dumpFn = dumpTableData_insert;
2057  copyStmt = NULL;
2058  }
2059 
2060  /*
2061  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2062  * dependency on its table as "special" and pass it to ArchiveEntry now.
2063  * See comments for BuildArchiveDependencies.
2064  */
2065  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2066  ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2067  tbinfo->dobj.name, tbinfo->dobj.namespace->dobj.name,
2068  NULL, tbinfo->rolname,
2069  false, "TABLE DATA", SECTION_DATA,
2070  "", "", copyStmt,
2071  &(tbinfo->dobj.dumpId), 1,
2072  dumpFn, tdinfo);
2073 
2074  destroyPQExpBuffer(copyBuf);
2075  destroyPQExpBuffer(clistBuf);
2076 }
2077 
2078 /*
2079  * refreshMatViewData -
2080  * load or refresh the contents of a single materialized view
2081  *
2082  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2083  * statement.
2084  */
2085 static void
2087 {
2088  TableInfo *tbinfo = tdinfo->tdtable;
2089  PQExpBuffer q;
2090 
2091  /* If the materialized view is not flagged as populated, skip this. */
2092  if (!tbinfo->relispopulated)
2093  return;
2094 
2095  q = createPQExpBuffer();
2096 
2097  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2098  fmtId(tbinfo->dobj.name));
2099 
2100  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2101  ArchiveEntry(fout,
2102  tdinfo->dobj.catId, /* catalog ID */
2103  tdinfo->dobj.dumpId, /* dump ID */
2104  tbinfo->dobj.name, /* Name */
2105  tbinfo->dobj.namespace->dobj.name, /* Namespace */
2106  NULL, /* Tablespace */
2107  tbinfo->rolname, /* Owner */
2108  false, /* with oids */
2109  "MATERIALIZED VIEW DATA", /* Desc */
2110  SECTION_POST_DATA, /* Section */
2111  q->data, /* Create */
2112  "", /* Del */
2113  NULL, /* Copy */
2114  tdinfo->dobj.dependencies, /* Deps */
2115  tdinfo->dobj.nDeps, /* # Deps */
2116  NULL, /* Dumper */
2117  NULL); /* Dumper Arg */
2118 
2119  destroyPQExpBuffer(q);
2120 }
2121 
2122 /*
2123  * getTableData -
2124  * set up dumpable objects representing the contents of tables
2125  */
2126 static void
2127 getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind)
2128 {
2129  int i;
2130 
2131  for (i = 0; i < numTables; i++)
2132  {
2133  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2134  (!relkind || tblinfo[i].relkind == relkind))
2135  makeTableDataInfo(dopt, &(tblinfo[i]), oids);
2136  }
2137 }
2138 
2139 /*
2140  * Make a dumpable object for the data of this specific table
2141  *
2142  * Note: we make a TableDataInfo if and only if we are going to dump the
2143  * table data; the "dump" flag in such objects isn't used.
2144  */
2145 static void
2146 makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids)
2147 {
2148  TableDataInfo *tdinfo;
2149 
2150  /*
2151  * Nothing to do if we already decided to dump the table. This will
2152  * happen for "config" tables.
2153  */
2154  if (tbinfo->dataObj != NULL)
2155  return;
2156 
2157  /* Skip VIEWs (no data to dump) */
2158  if (tbinfo->relkind == RELKIND_VIEW)
2159  return;
2160  /* Skip FOREIGN TABLEs (no data to dump) */
2161  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2162  return;
2163  /* Skip partitioned tables (data in partitions) */
2164  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2165  return;
2166 
2167  /* Don't dump data in unlogged tables, if so requested */
2168  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2169  dopt->no_unlogged_table_data)
2170  return;
2171 
2172  /* Check that the data is not explicitly excluded */
2173  if (simple_oid_list_member(&tabledata_exclude_oids,
2174  tbinfo->dobj.catId.oid))
2175  return;
2176 
2177  /* OK, let's dump it */
2178  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2179 
2180  if (tbinfo->relkind == RELKIND_MATVIEW)
2181  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2182  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2183  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2184  else
2185  tdinfo->dobj.objType = DO_TABLE_DATA;
2186 
2187  /*
2188  * Note: use tableoid 0 so that this object won't be mistaken for
2189  * something that pg_depend entries apply to.
2190  */
2191  tdinfo->dobj.catId.tableoid = 0;
2192  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2193  AssignDumpId(&tdinfo->dobj);
2194  tdinfo->dobj.name = tbinfo->dobj.name;
2195  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2196  tdinfo->tdtable = tbinfo;
2197  tdinfo->oids = oids;
2198  tdinfo->filtercond = NULL; /* might get set later */
2199  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2200 
2201  tbinfo->dataObj = tdinfo;
2202 }
2203 
2204 /*
2205  * The refresh for a materialized view must be dependent on the refresh for
2206  * any materialized view that this one is dependent on.
2207  *
2208  * This must be called after all the objects are created, but before they are
2209  * sorted.
2210  */
2211 static void
2213 {
2214  PQExpBuffer query;
2215  PGresult *res;
2216  int ntups,
2217  i;
2218  int i_classid,
2219  i_objid,
2220  i_refobjid;
2221 
2222  /* No Mat Views before 9.3. */
2223  if (fout->remoteVersion < 90300)
2224  return;
2225 
2226  /* Make sure we are in proper schema */
2227  selectSourceSchema(fout, "pg_catalog");
2228 
2229  query = createPQExpBuffer();
2230 
2231  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2232  "( "
2233  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2234  "FROM pg_depend d1 "
2235  "JOIN pg_class c1 ON c1.oid = d1.objid "
2236  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2237  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2238  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2239  "AND d2.objid = r1.oid "
2240  "AND d2.refobjid <> d1.objid "
2241  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2242  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2244  "WHERE d1.classid = 'pg_class'::regclass "
2245  "UNION "
2246  "SELECT w.objid, d3.refobjid, c3.relkind "
2247  "FROM w "
2248  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2249  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2250  "AND d3.objid = r3.oid "
2251  "AND d3.refobjid <> w.refobjid "
2252  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2253  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2255  ") "
2256  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2257  "FROM w "
2258  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2259 
2260  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2261 
2262  ntups = PQntuples(res);
2263 
2264  i_classid = PQfnumber(res, "classid");
2265  i_objid = PQfnumber(res, "objid");
2266  i_refobjid = PQfnumber(res, "refobjid");
2267 
2268  for (i = 0; i < ntups; i++)
2269  {
2270  CatalogId objId;
2271  CatalogId refobjId;
2272  DumpableObject *dobj;
2273  DumpableObject *refdobj;
2274  TableInfo *tbinfo;
2275  TableInfo *reftbinfo;
2276 
2277  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2278  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2279  refobjId.tableoid = objId.tableoid;
2280  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2281 
2282  dobj = findObjectByCatalogId(objId);
2283  if (dobj == NULL)
2284  continue;
2285 
2286  Assert(dobj->objType == DO_TABLE);
2287  tbinfo = (TableInfo *) dobj;
2288  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2289  dobj = (DumpableObject *) tbinfo->dataObj;
2290  if (dobj == NULL)
2291  continue;
2292  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2293 
2294  refdobj = findObjectByCatalogId(refobjId);
2295  if (refdobj == NULL)
2296  continue;
2297 
2298  Assert(refdobj->objType == DO_TABLE);
2299  reftbinfo = (TableInfo *) refdobj;
2300  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2301  refdobj = (DumpableObject *) reftbinfo->dataObj;
2302  if (refdobj == NULL)
2303  continue;
2304  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2305 
2306  addObjectDependency(dobj, refdobj->dumpId);
2307 
2308  if (!reftbinfo->relispopulated)
2309  tbinfo->relispopulated = false;
2310  }
2311 
2312  PQclear(res);
2313 
2314  destroyPQExpBuffer(query);
2315 }
2316 
2317 /*
2318  * getTableDataFKConstraints -
2319  * add dump-order dependencies reflecting foreign key constraints
2320  *
2321  * This code is executed only in a data-only dump --- in schema+data dumps
2322  * we handle foreign key issues by not creating the FK constraints until
2323  * after the data is loaded. In a data-only dump, however, we want to
2324  * order the table data objects in such a way that a table's referenced
2325  * tables are restored first. (In the presence of circular references or
2326  * self-references this may be impossible; we'll detect and complain about
2327  * that during the dependency sorting step.)
2328  */
2329 static void
2331 {
2332  DumpableObject **dobjs;
2333  int numObjs;
2334  int i;
2335 
2336  /* Search through all the dumpable objects for FK constraints */
2337  getDumpableObjects(&dobjs, &numObjs);
2338  for (i = 0; i < numObjs; i++)
2339  {
2340  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2341  {
2342  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2343  TableInfo *ftable;
2344 
2345  /* Not interesting unless both tables are to be dumped */
2346  if (cinfo->contable == NULL ||
2347  cinfo->contable->dataObj == NULL)
2348  continue;
2349  ftable = findTableByOid(cinfo->confrelid);
2350  if (ftable == NULL ||
2351  ftable->dataObj == NULL)
2352  continue;
2353 
2354  /*
2355  * Okay, make referencing table's TABLE_DATA object depend on the
2356  * referenced table's TABLE_DATA object.
2357  */
2359  ftable->dataObj->dobj.dumpId);
2360  }
2361  }
2362  free(dobjs);
2363 }
2364 
2365 
2366 /*
2367  * guessConstraintInheritance:
2368  * In pre-8.4 databases, we can't tell for certain which constraints
2369  * are inherited. We assume a CHECK constraint is inherited if its name
2370  * matches the name of any constraint in the parent. Originally this code
2371  * tried to compare the expression texts, but that can fail for various
2372  * reasons --- for example, if the parent and child tables are in different
2373  * schemas, reverse-listing of function calls may produce different text
2374  * (schema-qualified or not) depending on search path.
2375  *
2376  * In 8.4 and up we can rely on the conislocal field to decide which
2377  * constraints must be dumped; much safer.
2378  *
2379  * This function assumes all conislocal flags were initialized to TRUE.
2380  * It clears the flag on anything that seems to be inherited.
2381  */
2382 static void
2384 {
2385  int i,
2386  j,
2387  k;
2388 
2389  for (i = 0; i < numTables; i++)
2390  {
2391  TableInfo *tbinfo = &(tblinfo[i]);
2392  int numParents;
2393  TableInfo **parents;
2394  TableInfo *parent;
2395 
2396  /* Sequences and views never have parents */
2397  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2398  tbinfo->relkind == RELKIND_VIEW)
2399  continue;
2400 
2401  /* Don't bother computing anything for non-target tables, either */
2402  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2403  continue;
2404 
2405  numParents = tbinfo->numParents;
2406  parents = tbinfo->parents;
2407 
2408  if (numParents == 0)
2409  continue; /* nothing to see here, move along */
2410 
2411  /* scan for inherited CHECK constraints */
2412  for (j = 0; j < tbinfo->ncheck; j++)
2413  {
2414  ConstraintInfo *constr;
2415 
2416  constr = &(tbinfo->checkexprs[j]);
2417 
2418  for (k = 0; k < numParents; k++)
2419  {
2420  int l;
2421 
2422  parent = parents[k];
2423  for (l = 0; l < parent->ncheck; l++)
2424  {
2425  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2426 
2427  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2428  {
2429  constr->conislocal = false;
2430  break;
2431  }
2432  }
2433  if (!constr->conislocal)
2434  break;
2435  }
2436  }
2437  }
2438 }
2439 
2440 
2441 /*
2442  * dumpDatabase:
2443  * dump the database definition
2444  */
2445 static void
2447 {
2448  DumpOptions *dopt = fout->dopt;
2449  PQExpBuffer dbQry = createPQExpBuffer();
2450  PQExpBuffer delQry = createPQExpBuffer();
2451  PQExpBuffer creaQry = createPQExpBuffer();
2452  PGconn *conn = GetConnection(fout);
2453  PGresult *res;
2454  int i_tableoid,
2455  i_oid,
2456  i_dba,
2457  i_encoding,
2458  i_collate,
2459  i_ctype,
2460  i_frozenxid,
2461  i_minmxid,
2462  i_tablespace;
2463  CatalogId dbCatId;
2464  DumpId dbDumpId;
2465  const char *datname,
2466  *dba,
2467  *encoding,
2468  *collate,
2469  *ctype,
2470  *tablespace;
2471  uint32 frozenxid,
2472  minmxid;
2473 
2474  datname = PQdb(conn);
2475 
2476  if (g_verbose)
2477  write_msg(NULL, "saving database definition\n");
2478 
2479  /* Make sure we are in proper schema */
2480  selectSourceSchema(fout, "pg_catalog");
2481 
2482  /* Get the database owner and parameters from pg_database */
2483  if (fout->remoteVersion >= 90300)
2484  {
2485  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2486  "(%s datdba) AS dba, "
2487  "pg_encoding_to_char(encoding) AS encoding, "
2488  "datcollate, datctype, datfrozenxid, datminmxid, "
2489  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2490  "shobj_description(oid, 'pg_database') AS description "
2491 
2492  "FROM pg_database "
2493  "WHERE datname = ",
2495  appendStringLiteralAH(dbQry, datname, fout);
2496  }
2497  else if (fout->remoteVersion >= 80400)
2498  {
2499  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2500  "(%s datdba) AS dba, "
2501  "pg_encoding_to_char(encoding) AS encoding, "
2502  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2503  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2504  "shobj_description(oid, 'pg_database') AS description "
2505 
2506  "FROM pg_database "
2507  "WHERE datname = ",
2509  appendStringLiteralAH(dbQry, datname, fout);
2510  }
2511  else if (fout->remoteVersion >= 80200)
2512  {
2513  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2514  "(%s datdba) AS dba, "
2515  "pg_encoding_to_char(encoding) AS encoding, "
2516  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2517  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2518  "shobj_description(oid, 'pg_database') AS description "
2519 
2520  "FROM pg_database "
2521  "WHERE datname = ",
2523  appendStringLiteralAH(dbQry, datname, fout);
2524  }
2525  else
2526  {
2527  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2528  "(%s datdba) AS dba, "
2529  "pg_encoding_to_char(encoding) AS encoding, "
2530  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2531  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2532  "FROM pg_database "
2533  "WHERE datname = ",
2535  appendStringLiteralAH(dbQry, datname, fout);
2536  }
2537 
2538  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2539 
2540  i_tableoid = PQfnumber(res, "tableoid");
2541  i_oid = PQfnumber(res, "oid");
2542  i_dba = PQfnumber(res, "dba");
2543  i_encoding = PQfnumber(res, "encoding");
2544  i_collate = PQfnumber(res, "datcollate");
2545  i_ctype = PQfnumber(res, "datctype");
2546  i_frozenxid = PQfnumber(res, "datfrozenxid");
2547  i_minmxid = PQfnumber(res, "datminmxid");
2548  i_tablespace = PQfnumber(res, "tablespace");
2549 
2550  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2551  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2552  dba = PQgetvalue(res, 0, i_dba);
2553  encoding = PQgetvalue(res, 0, i_encoding);
2554  collate = PQgetvalue(res, 0, i_collate);
2555  ctype = PQgetvalue(res, 0, i_ctype);
2556  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2557  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2558  tablespace = PQgetvalue(res, 0, i_tablespace);
2559 
2560  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2561  fmtId(datname));
2562  if (strlen(encoding) > 0)
2563  {
2564  appendPQExpBufferStr(creaQry, " ENCODING = ");
2565  appendStringLiteralAH(creaQry, encoding, fout);
2566  }
2567  if (strlen(collate) > 0)
2568  {
2569  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2570  appendStringLiteralAH(creaQry, collate, fout);
2571  }
2572  if (strlen(ctype) > 0)
2573  {
2574  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2575  appendStringLiteralAH(creaQry, ctype, fout);
2576  }
2577  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2578  !dopt->outputNoTablespaces)
2579  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2580  fmtId(tablespace));
2581  appendPQExpBufferStr(creaQry, ";\n");
2582 
2583  if (dopt->binary_upgrade)
2584  {
2585  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
2586  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
2587  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
2588  "WHERE datname = ",
2589  frozenxid, minmxid);
2590  appendStringLiteralAH(creaQry, datname, fout);
2591  appendPQExpBufferStr(creaQry, ";\n");
2592 
2593  }
2594 
2595  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2596  fmtId(datname));
2597 
2598  dbDumpId = createDumpId();
2599 
2600  ArchiveEntry(fout,
2601  dbCatId, /* catalog ID */
2602  dbDumpId, /* dump ID */
2603  datname, /* Name */
2604  NULL, /* Namespace */
2605  NULL, /* Tablespace */
2606  dba, /* Owner */
2607  false, /* with oids */
2608  "DATABASE", /* Desc */
2609  SECTION_PRE_DATA, /* Section */
2610  creaQry->data, /* Create */
2611  delQry->data, /* Del */
2612  NULL, /* Copy */
2613  NULL, /* Deps */
2614  0, /* # Deps */
2615  NULL, /* Dumper */
2616  NULL); /* Dumper Arg */
2617 
2618  /*
2619  * pg_largeobject and pg_largeobject_metadata come from the old system
2620  * intact, so set their relfrozenxids and relminmxids.
2621  */
2622  if (dopt->binary_upgrade)
2623  {
2624  PGresult *lo_res;
2625  PQExpBuffer loFrozenQry = createPQExpBuffer();
2626  PQExpBuffer loOutQry = createPQExpBuffer();
2627  int i_relfrozenxid,
2628  i_relminmxid;
2629 
2630  /*
2631  * pg_largeobject
2632  */
2633  if (fout->remoteVersion >= 90300)
2634  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2635  "FROM pg_catalog.pg_class\n"
2636  "WHERE oid = %u;\n",
2638  else
2639  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2640  "FROM pg_catalog.pg_class\n"
2641  "WHERE oid = %u;\n",
2643 
2644  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2645 
2646  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2647  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2648 
2649  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
2650  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2651  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2652  "WHERE oid = %u;\n",
2653  atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2654  atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
2656  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2657  "pg_largeobject", NULL, NULL, "",
2658  false, "pg_largeobject", SECTION_PRE_DATA,
2659  loOutQry->data, "", NULL,
2660  NULL, 0,
2661  NULL, NULL);
2662 
2663  PQclear(lo_res);
2664 
2665  /*
2666  * pg_largeobject_metadata
2667  */
2668  if (fout->remoteVersion >= 90000)
2669  {
2670  resetPQExpBuffer(loFrozenQry);
2671  resetPQExpBuffer(loOutQry);
2672 
2673  if (fout->remoteVersion >= 90300)
2674  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2675  "FROM pg_catalog.pg_class\n"
2676  "WHERE oid = %u;\n",
2678  else
2679  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2680  "FROM pg_catalog.pg_class\n"
2681  "WHERE oid = %u;\n",
2683 
2684  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2685 
2686  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2687  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2688 
2689  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
2690  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2691  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2692  "WHERE oid = %u;\n",
2693  atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2694  atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
2696  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2697  "pg_largeobject_metadata", NULL, NULL, "",
2698  false, "pg_largeobject_metadata", SECTION_PRE_DATA,
2699  loOutQry->data, "", NULL,
2700  NULL, 0,
2701  NULL, NULL);
2702 
2703  PQclear(lo_res);
2704  }
2705 
2706  destroyPQExpBuffer(loFrozenQry);
2707  destroyPQExpBuffer(loOutQry);
2708  }
2709 
2710  /* Dump DB comment if any */
2711  if (fout->remoteVersion >= 80200)
2712  {
2713  /*
2714  * 8.2 keeps comments on shared objects in a shared table, so we
2715  * cannot use the dumpComment used for other database objects.
2716  */
2717  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2718 
2719  if (comment && strlen(comment))
2720  {
2721  resetPQExpBuffer(dbQry);
2722 
2723  /*
2724  * Generates warning when loaded into a differently-named
2725  * database.
2726  */
2727  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", fmtId(datname));
2728  appendStringLiteralAH(dbQry, comment, fout);
2729  appendPQExpBufferStr(dbQry, ";\n");
2730 
2731  ArchiveEntry(fout, dbCatId, createDumpId(), datname, NULL, NULL,
2732  dba, false, "COMMENT", SECTION_NONE,
2733  dbQry->data, "", NULL,
2734  &dbDumpId, 1, NULL, NULL);
2735  }
2736  }
2737  else
2738  {
2739  resetPQExpBuffer(dbQry);
2740  appendPQExpBuffer(dbQry, "DATABASE %s", fmtId(datname));
2741  dumpComment(fout, dbQry->data, NULL, "",
2742  dbCatId, 0, dbDumpId);
2743  }
2744 
2745  /* Dump shared security label. */
2746  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2747  {
2748  PGresult *shres;
2749  PQExpBuffer seclabelQry;
2750 
2751  seclabelQry = createPQExpBuffer();
2752 
2753  buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry);
2754  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2755  resetPQExpBuffer(seclabelQry);
2756  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2757  if (strlen(seclabelQry->data))
2758  ArchiveEntry(fout, dbCatId, createDumpId(), datname, NULL, NULL,
2759  dba, false, "SECURITY LABEL", SECTION_NONE,
2760  seclabelQry->data, "", NULL,
2761  &dbDumpId, 1, NULL, NULL);
2762  destroyPQExpBuffer(seclabelQry);
2763  PQclear(shres);
2764  }
2765 
2766  PQclear(res);
2767 
2768  destroyPQExpBuffer(dbQry);
2769  destroyPQExpBuffer(delQry);
2770  destroyPQExpBuffer(creaQry);
2771 }
2772 
2773 /*
2774  * dumpEncoding: put the correct encoding into the archive
2775  */
2776 static void
2778 {
2779  const char *encname = pg_encoding_to_char(AH->encoding);
2781 
2782  if (g_verbose)
2783  write_msg(NULL, "saving encoding = %s\n", encname);
2784 
2785  appendPQExpBufferStr(qry, "SET client_encoding = ");
2786  appendStringLiteralAH(qry, encname, AH);
2787  appendPQExpBufferStr(qry, ";\n");
2788 
2789  ArchiveEntry(AH, nilCatalogId, createDumpId(),
2790  "ENCODING", NULL, NULL, "",
2791  false, "ENCODING", SECTION_PRE_DATA,
2792  qry->data, "", NULL,
2793  NULL, 0,
2794  NULL, NULL);
2795 
2796  destroyPQExpBuffer(qry);
2797 }
2798 
2799 
2800 /*
2801  * dumpStdStrings: put the correct escape string behavior into the archive
2802  */
2803 static void
2805 {
2806  const char *stdstrings = AH->std_strings ? "on" : "off";
2808 
2809  if (g_verbose)
2810  write_msg(NULL, "saving standard_conforming_strings = %s\n",
2811  stdstrings);
2812 
2813  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
2814  stdstrings);
2815 
2816  ArchiveEntry(AH, nilCatalogId, createDumpId(),
2817  "STDSTRINGS", NULL, NULL, "",
2818  false, "STDSTRINGS", SECTION_PRE_DATA,
2819  qry->data, "", NULL,
2820  NULL, 0,
2821  NULL, NULL);
2822 
2823  destroyPQExpBuffer(qry);
2824 }
2825 
2826 
2827 /*
2828  * getBlobs:
2829  * Collect schema-level data about large objects
2830  */
2831 static void
2833 {
2834  DumpOptions *dopt = fout->dopt;
2835  PQExpBuffer blobQry = createPQExpBuffer();
2836  BlobInfo *binfo;
2837  DumpableObject *bdata;
2838  PGresult *res;
2839  int ntups;
2840  int i;
2841  int i_oid;
2842  int i_lomowner;
2843  int i_lomacl;
2844  int i_rlomacl;
2845  int i_initlomacl;
2846  int i_initrlomacl;
2847 
2848  /* Verbose message */
2849  if (g_verbose)
2850  write_msg(NULL, "reading large objects\n");
2851 
2852  /* Make sure we are in proper schema */
2853  selectSourceSchema(fout, "pg_catalog");
2854 
2855  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
2856  if (fout->remoteVersion >= 90600)
2857  {
2858  PQExpBuffer acl_subquery = createPQExpBuffer();
2859  PQExpBuffer racl_subquery = createPQExpBuffer();
2860  PQExpBuffer init_acl_subquery = createPQExpBuffer();
2861  PQExpBuffer init_racl_subquery = createPQExpBuffer();
2862 
2863  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
2864  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
2865  dopt->binary_upgrade);
2866 
2867  appendPQExpBuffer(blobQry,
2868  "SELECT l.oid, (%s l.lomowner) AS rolname, "
2869  "%s AS lomacl, "
2870  "%s AS rlomacl, "
2871  "%s AS initlomacl, "
2872  "%s AS initrlomacl "
2873  "FROM pg_largeobject_metadata l "
2874  "LEFT JOIN pg_init_privs pip ON "
2875  "(l.oid = pip.objoid "
2876  "AND pip.classoid = 'pg_largeobject'::regclass "
2877  "AND pip.objsubid = 0) ",
2879  acl_subquery->data,
2880  racl_subquery->data,
2881  init_acl_subquery->data,
2882  init_racl_subquery->data);
2883 
2884  destroyPQExpBuffer(acl_subquery);
2885  destroyPQExpBuffer(racl_subquery);
2886  destroyPQExpBuffer(init_acl_subquery);
2887  destroyPQExpBuffer(init_racl_subquery);
2888  }
2889  else if (fout->remoteVersion >= 90000)
2890  appendPQExpBuffer(blobQry,
2891  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
2892  "NULL AS rlomacl, NULL AS initlomacl, "
2893  "NULL AS initrlomacl "
2894  " FROM pg_largeobject_metadata",
2896  else
2897  appendPQExpBufferStr(blobQry,
2898  "SELECT DISTINCT loid AS oid, "
2899  "NULL::name AS rolname, NULL::oid AS lomacl, "
2900  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
2901  "NULL::oid AS initrlomacl "
2902  " FROM pg_largeobject");
2903 
2904  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
2905 
2906  i_oid = PQfnumber(res, "oid");
2907  i_lomowner = PQfnumber(res, "rolname");
2908  i_lomacl = PQfnumber(res, "lomacl");
2909  i_rlomacl = PQfnumber(res, "rlomacl");
2910  i_initlomacl = PQfnumber(res, "initlomacl");
2911  i_initrlomacl = PQfnumber(res, "initrlomacl");
2912 
2913  ntups = PQntuples(res);
2914 
2915  /*
2916  * Each large object has its own BLOB archive entry.
2917  */
2918  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
2919 
2920  for (i = 0; i < ntups; i++)
2921  {
2922  binfo[i].dobj.objType = DO_BLOB;
2924  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
2925  AssignDumpId(&binfo[i].dobj);
2926 
2927  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
2928  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
2929  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
2930  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
2931  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
2932  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
2933 
2934  if (PQgetisnull(res, i, i_lomacl) &&
2935  PQgetisnull(res, i, i_rlomacl) &&
2936  PQgetisnull(res, i, i_initlomacl) &&
2937  PQgetisnull(res, i, i_initrlomacl))
2938  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
2939 
2940  /*
2941  * In binary-upgrade mode for blobs, we do *not* dump out the data or
2942  * the ACLs, should any exist. The data and ACL (if any) will be
2943  * copied by pg_upgrade, which simply copies the pg_largeobject and
2944  * pg_largeobject_metadata tables.
2945  *
2946  * We *do* dump out the definition of the blob because we need that to
2947  * make the restoration of the comments, and anything else, work since
2948  * pg_upgrade copies the files behind pg_largeobject and
2949  * pg_largeobject_metadata after the dump is restored.
2950  */
2951  if (dopt->binary_upgrade)
2953  }
2954 
2955  /*
2956  * If we have any large objects, a "BLOBS" archive entry is needed. This
2957  * is just a placeholder for sorting; it carries no data now.
2958  */
2959  if (ntups > 0)
2960  {
2961  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
2962  bdata->objType = DO_BLOB_DATA;
2963  bdata->catId = nilCatalogId;
2964  AssignDumpId(bdata);
2965  bdata->name = pg_strdup("BLOBS");
2966  }
2967 
2968  PQclear(res);
2969  destroyPQExpBuffer(blobQry);
2970 }
2971 
2972 /*
2973  * dumpBlob
2974  *
2975  * dump the definition (metadata) of the given large object
2976  */
2977 static void
2978 dumpBlob(Archive *fout, BlobInfo *binfo)
2979 {
2980  PQExpBuffer cquery = createPQExpBuffer();
2981  PQExpBuffer dquery = createPQExpBuffer();
2982 
2983  appendPQExpBuffer(cquery,
2984  "SELECT pg_catalog.lo_create('%s');\n",
2985  binfo->dobj.name);
2986 
2987  appendPQExpBuffer(dquery,
2988  "SELECT pg_catalog.lo_unlink('%s');\n",
2989  binfo->dobj.name);
2990 
2991  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
2992  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
2993  binfo->dobj.name,
2994  NULL, NULL,
2995  binfo->rolname, false,
2996  "BLOB", SECTION_PRE_DATA,
2997  cquery->data, dquery->data, NULL,
2998  NULL, 0,
2999  NULL, NULL);
3000 
3001  /* set up tag for comment and/or ACL */
3002  resetPQExpBuffer(cquery);
3003  appendPQExpBuffer(cquery, "LARGE OBJECT %s", binfo->dobj.name);
3004 
3005  /* Dump comment if any */
3006  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3007  dumpComment(fout, cquery->data,
3008  NULL, binfo->rolname,
3009  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3010 
3011  /* Dump security label if any */
3012  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3013  dumpSecLabel(fout, cquery->data,
3014  NULL, binfo->rolname,
3015  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3016 
3017  /* Dump ACL if any */
3018  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3019  dumpACL(fout, binfo->dobj.catId, binfo->dobj.dumpId, "LARGE OBJECT",
3020  binfo->dobj.name, NULL, cquery->data,
3021  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3022  binfo->initblobacl, binfo->initrblobacl);
3023 
3024  destroyPQExpBuffer(cquery);
3025  destroyPQExpBuffer(dquery);
3026 }
3027 
3028 /*
3029  * dumpBlobs:
3030  * dump the data contents of all large objects
3031  */
3032 static int
3033 dumpBlobs(Archive *fout, void *arg)
3034 {
3035  const char *blobQry;
3036  const char *blobFetchQry;
3037  PGconn *conn = GetConnection(fout);
3038  PGresult *res;
3039  char buf[LOBBUFSIZE];
3040  int ntups;
3041  int i;
3042  int cnt;
3043 
3044  if (g_verbose)
3045  write_msg(NULL, "saving large objects\n");
3046 
3047  /* Make sure we are in proper schema */
3048  selectSourceSchema(fout, "pg_catalog");
3049 
3050  /*
3051  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3052  * the already-in-memory dumpable objects instead...
3053  */
3054  if (fout->remoteVersion >= 90000)
3055  blobQry = "DECLARE bloboid CURSOR FOR SELECT oid FROM pg_largeobject_metadata";
3056  else
3057  blobQry = "DECLARE bloboid CURSOR FOR SELECT DISTINCT loid FROM pg_largeobject";
3058 
3059  ExecuteSqlStatement(fout, blobQry);
3060 
3061  /* Command to fetch from cursor */
3062  blobFetchQry = "FETCH 1000 IN bloboid";
3063 
3064  do
3065  {
3066  /* Do a fetch */
3067  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3068 
3069  /* Process the tuples, if any */
3070  ntups = PQntuples(res);
3071  for (i = 0; i < ntups; i++)
3072  {
3073  Oid blobOid;
3074  int loFd;
3075 
3076  blobOid = atooid(PQgetvalue(res, i, 0));
3077  /* Open the BLOB */
3078  loFd = lo_open(conn, blobOid, INV_READ);
3079  if (loFd == -1)
3080  exit_horribly(NULL, "could not open large object %u: %s",
3081  blobOid, PQerrorMessage(conn));
3082 
3083  StartBlob(fout, blobOid);
3084 
3085  /* Now read it in chunks, sending data to archive */
3086  do
3087  {
3088  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3089  if (cnt < 0)
3090  exit_horribly(NULL, "error reading large object %u: %s",
3091  blobOid, PQerrorMessage(conn));
3092 
3093  WriteData(fout, buf, cnt);
3094  } while (cnt > 0);
3095 
3096  lo_close(conn, loFd);
3097 
3098  EndBlob(fout, blobOid);
3099  }
3100 
3101  PQclear(res);
3102  } while (ntups > 0);
3103 
3104  return 1;
3105 }
3106 
3107 /*
3108  * getPolicies
3109  * get information about policies on a dumpable table.
3110  */
3111 void
3112 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3113 {
3114  PQExpBuffer query;
3115  PGresult *res;
3116  PolicyInfo *polinfo;
3117  int i_oid;
3118  int i_tableoid;
3119  int i_polname;
3120  int i_polcmd;
3121  int i_polpermissive;
3122  int i_polroles;
3123  int i_polqual;
3124  int i_polwithcheck;
3125  int i,
3126  j,
3127  ntups;
3128 
3129  if (fout->remoteVersion < 90500)
3130  return;
3131 
3132  query = createPQExpBuffer();
3133 
3134  for (i = 0; i < numTables; i++)
3135  {
3136  TableInfo *tbinfo = &tblinfo[i];
3137 
3138  /* Ignore row security on tables not to be dumped */
3139  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3140  continue;
3141 
3142  if (g_verbose)
3143  write_msg(NULL, "reading row security enabled for table \"%s.%s\"\n",
3144  tbinfo->dobj.namespace->dobj.name,
3145  tbinfo->dobj.name);
3146 
3147  /*
3148  * Get row security enabled information for the table. We represent
3149  * RLS enabled on a table by creating PolicyInfo object with an empty
3150  * policy.
3151  */
3152  if (tbinfo->rowsec)
3153  {
3154  /*
3155  * Note: use tableoid 0 so that this object won't be mistaken for
3156  * something that pg_depend entries apply to.
3157  */
3158  polinfo = pg_malloc(sizeof(PolicyInfo));
3159  polinfo->dobj.objType = DO_POLICY;
3160  polinfo->dobj.catId.tableoid = 0;
3161  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3162  AssignDumpId(&polinfo->dobj);
3163  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3164  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3165  polinfo->poltable = tbinfo;
3166  polinfo->polname = NULL;
3167  polinfo->polcmd = '\0';
3168  polinfo->polpermissive = 0;
3169  polinfo->polroles = NULL;
3170  polinfo->polqual = NULL;
3171  polinfo->polwithcheck = NULL;
3172  }
3173 
3174  if (g_verbose)
3175  write_msg(NULL, "reading policies for table \"%s.%s\"\n",
3176  tbinfo->dobj.namespace->dobj.name,
3177  tbinfo->dobj.name);
3178 
3179  /*
3180  * select table schema to ensure regproc name is qualified if needed
3181  */
3182  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
3183 
3184  resetPQExpBuffer(query);
3185 
3186  /* Get the policies for the table. */
3187  if (fout->remoteVersion >= 100000)
3188  appendPQExpBuffer(query,
3189  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3190  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3191  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3192  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3193  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3194  "FROM pg_catalog.pg_policy pol "
3195  "WHERE polrelid = '%u'",
3196  tbinfo->dobj.catId.oid);
3197  else
3198  appendPQExpBuffer(query,
3199  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3200  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3201  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3202  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3203  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3204  "FROM pg_catalog.pg_policy pol "
3205  "WHERE polrelid = '%u'",
3206  tbinfo->dobj.catId.oid);
3207  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3208 
3209  ntups = PQntuples(res);
3210 
3211  if (ntups == 0)
3212  {
3213  /*
3214  * No explicit policies to handle (only the default-deny policy,
3215  * which is handled as part of the table definition). Clean up
3216  * and return.
3217  */
3218  PQclear(res);
3219  continue;
3220  }
3221 
3222  i_oid = PQfnumber(res, "oid");
3223  i_tableoid = PQfnumber(res, "tableoid");
3224  i_polname = PQfnumber(res, "polname");
3225  i_polcmd = PQfnumber(res, "polcmd");
3226  i_polpermissive = PQfnumber(res, "polpermissive");
3227  i_polroles = PQfnumber(res, "polroles");
3228  i_polqual = PQfnumber(res, "polqual");
3229  i_polwithcheck = PQfnumber(res, "polwithcheck");
3230 
3231  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3232 
3233  for (j = 0; j < ntups; j++)
3234  {
3235  polinfo[j].dobj.objType = DO_POLICY;
3236  polinfo[j].dobj.catId.tableoid =
3237  atooid(PQgetvalue(res, j, i_tableoid));
3238  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3239  AssignDumpId(&polinfo[j].dobj);
3240  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3241  polinfo[j].poltable = tbinfo;
3242  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3243  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3244 
3245  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3246  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3247 
3248  if (PQgetisnull(res, j, i_polroles))
3249  polinfo[j].polroles = NULL;
3250  else
3251  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3252 
3253  if (PQgetisnull(res, j, i_polqual))
3254  polinfo[j].polqual = NULL;
3255  else
3256  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3257 
3258  if (PQgetisnull(res, j, i_polwithcheck))
3259  polinfo[j].polwithcheck = NULL;
3260  else
3261  polinfo[j].polwithcheck
3262  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3263  }
3264  PQclear(res);
3265  }
3266  destroyPQExpBuffer(query);
3267 }
3268 
3269 /*
3270  * dumpPolicy
3271  * dump the definition of the given policy
3272  */
3273 static void
3275 {
3276  DumpOptions *dopt = fout->dopt;
3277  TableInfo *tbinfo = polinfo->poltable;
3278  PQExpBuffer query;
3279  PQExpBuffer delqry;
3280  const char *cmd;
3281  char *tag;
3282 
3283  if (dopt->dataOnly)
3284  return;
3285 
3286  /*
3287  * If polname is NULL, then this record is just indicating that ROW LEVEL
3288  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3289  * ROW LEVEL SECURITY.
3290  */
3291  if (polinfo->polname == NULL)
3292  {
3293  query = createPQExpBuffer();
3294 
3295  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3296  fmtId(polinfo->dobj.name));
3297 
3298  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3299  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3300  polinfo->dobj.name,
3301  polinfo->dobj.namespace->dobj.name,
3302  NULL,
3303  tbinfo->rolname, false,
3304  "ROW SECURITY", SECTION_POST_DATA,
3305  query->data, "", NULL,
3306  NULL, 0,
3307  NULL, NULL);
3308 
3309  destroyPQExpBuffer(query);
3310  return;
3311  }
3312 
3313  if (polinfo->polcmd == '*')
3314  cmd = "";
3315  else if (polinfo->polcmd == 'r')
3316  cmd = " FOR SELECT";
3317  else if (polinfo->polcmd == 'a')
3318  cmd = " FOR INSERT";
3319  else if (polinfo->polcmd == 'w')
3320  cmd = " FOR UPDATE";
3321  else if (polinfo->polcmd == 'd')
3322  cmd = " FOR DELETE";
3323  else
3324  {
3325  write_msg(NULL, "unexpected policy command type: %c\n",
3326  polinfo->polcmd);
3327  exit_nicely(1);
3328  }
3329 
3330  query = createPQExpBuffer();
3331  delqry = createPQExpBuffer();
3332 
3333  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3334 
3335  appendPQExpBuffer(query, " ON %s%s%s", fmtId(tbinfo->dobj.name),
3336  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3337 
3338  if (polinfo->polroles != NULL)
3339  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3340 
3341  if (polinfo->polqual != NULL)
3342  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3343 
3344  if (polinfo->polwithcheck != NULL)
3345  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3346 
3347  appendPQExpBuffer(query, ";\n");
3348 
3349  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3350  appendPQExpBuffer(delqry, " ON %s;\n", fmtId(tbinfo->dobj.name));
3351 
3352  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3353 
3354  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3355  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3356  tag,
3357  polinfo->dobj.namespace->dobj.name,
3358  NULL,
3359  tbinfo->rolname, false,
3360  "POLICY", SECTION_POST_DATA,
3361  query->data, delqry->data, NULL,
3362  NULL, 0,
3363  NULL, NULL);
3364 
3365  free(tag);
3366  destroyPQExpBuffer(query);
3367  destroyPQExpBuffer(delqry);
3368 }
3369 
3370 /*
3371  * getPublications
3372  * get information about publications
3373  */
3374 void
3376 {
3377  PQExpBuffer query;
3378  PGresult *res;
3379  PublicationInfo *pubinfo;
3380  int i_tableoid;
3381  int i_oid;
3382  int i_pubname;
3383  int i_rolname;
3384  int i_puballtables;
3385  int i_pubinsert;
3386  int i_pubupdate;
3387  int i_pubdelete;
3388  int i,
3389  ntups;
3390 
3391  if (fout->remoteVersion < 100000)
3392  return;
3393 
3394  query = createPQExpBuffer();
3395 
3396  resetPQExpBuffer(query);
3397 
3398  /* Get the publications. */
3399  appendPQExpBuffer(query,
3400  "SELECT p.tableoid, p.oid, p.pubname, "
3401  "(%s p.pubowner) AS rolname, "
3402  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete "
3403  "FROM pg_catalog.pg_publication p",
3405 
3406  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3407 
3408  ntups = PQntuples(res);
3409 
3410  i_tableoid = PQfnumber(res, "tableoid");
3411  i_oid = PQfnumber(res, "oid");
3412  i_pubname = PQfnumber(res, "pubname");
3413  i_rolname = PQfnumber(res, "rolname");
3414  i_puballtables = PQfnumber(res, "puballtables");
3415  i_pubinsert = PQfnumber(res, "pubinsert");
3416  i_pubupdate = PQfnumber(res, "pubupdate");
3417  i_pubdelete = PQfnumber(res, "pubdelete");
3418 
3419  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3420 
3421  for (i = 0; i < ntups; i++)
3422  {
3423  pubinfo[i].dobj.objType = DO_PUBLICATION;
3424  pubinfo[i].dobj.catId.tableoid =
3425  atooid(PQgetvalue(res, i, i_tableoid));
3426  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3427  AssignDumpId(&pubinfo[i].dobj);
3428  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3429  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3430  pubinfo[i].puballtables =
3431  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3432  pubinfo[i].pubinsert =
3433  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3434  pubinfo[i].pubupdate =
3435  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3436  pubinfo[i].pubdelete =
3437  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3438 
3439  if (strlen(pubinfo[i].rolname) == 0)
3440  write_msg(NULL, "WARNING: owner of publication \"%s\" appears to be invalid\n",
3441  pubinfo[i].dobj.name);
3442 
3443  /* Decide whether we want to dump it */
3444  selectDumpableObject(&(pubinfo[i].dobj), fout);
3445  }
3446  PQclear(res);
3447 
3448  destroyPQExpBuffer(query);
3449 }
3450 
3451 /*
3452  * dumpPublication
3453  * dump the definition of the given publication
3454  */
3455 static void
3457 {
3458  PQExpBuffer delq;
3459  PQExpBuffer query;
3460 
3461  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3462  return;
3463 
3464  delq = createPQExpBuffer();
3465  query = createPQExpBuffer();
3466 
3467  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3468  fmtId(pubinfo->dobj.name));
3469 
3470  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3471  fmtId(pubinfo->dobj.name));
3472 
3473  if (pubinfo->puballtables)
3474  appendPQExpBufferStr(query, " FOR ALL TABLES");
3475 
3476  appendPQExpBufferStr(query, " WITH (");
3477  if (pubinfo->pubinsert)
3478  appendPQExpBufferStr(query, "PUBLISH INSERT");
3479  else
3480  appendPQExpBufferStr(query, "NOPUBLISH INSERT");
3481 
3482  if (pubinfo->pubupdate)
3483  appendPQExpBufferStr(query, ", PUBLISH UPDATE");
3484  else
3485  appendPQExpBufferStr(query, ", NOPUBLISH UPDATE");
3486 
3487  if (pubinfo->pubdelete)
3488  appendPQExpBufferStr(query, ", PUBLISH DELETE");
3489  else
3490  appendPQExpBufferStr(query, ", NOPUBLISH DELETE");
3491 
3492  appendPQExpBufferStr(query, ");\n");
3493 
3494  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
3495  pubinfo->dobj.name,
3496  NULL,
3497  NULL,
3498  pubinfo->rolname, false,
3499  "PUBLICATION", SECTION_POST_DATA,
3500  query->data, delq->data, NULL,
3501  NULL, 0,
3502  NULL, NULL);
3503 
3504  destroyPQExpBuffer(delq);
3505  destroyPQExpBuffer(query);
3506 }
3507 
3508 /*
3509  * getPublicationTables
3510  * get information about publication membership for dumpable tables.
3511  */
3512 void
3514 {
3515  PQExpBuffer query;
3516  PGresult *res;
3517  PublicationRelInfo *pubrinfo;
3518  int i_tableoid;
3519  int i_oid;
3520  int i_pubname;
3521  int i,
3522  j,
3523  ntups;
3524 
3525  if (fout->remoteVersion < 100000)
3526  return;
3527 
3528  query = createPQExpBuffer();
3529 
3530  for (i = 0; i < numTables; i++)
3531  {
3532  TableInfo *tbinfo = &tblinfo[i];
3533 
3534  /* Only plain tables can be aded to publications. */
3535  if (tbinfo->relkind != RELKIND_RELATION)
3536  continue;
3537 
3538  /*
3539  * Ignore publication membership of tables whose definitions are
3540  * not to be dumped.
3541  */
3542  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3543  continue;
3544 
3545  if (g_verbose)
3546  write_msg(NULL, "reading publication membership for table \"%s.%s\"\n",
3547  tbinfo->dobj.namespace->dobj.name,
3548  tbinfo->dobj.name);
3549 
3550  resetPQExpBuffer(query);
3551 
3552  /* Get the publication membership for the table. */
3553  appendPQExpBuffer(query,
3554  "SELECT pr.tableoid, pr.oid, p.pubname "
3555  "FROM pg_catalog.pg_publication_rel pr,"
3556  " pg_catalog.pg_publication p "
3557  "WHERE pr.prrelid = '%u'"
3558  " AND p.oid = pr.prpubid",
3559  tbinfo->dobj.catId.oid);
3560  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3561 
3562  ntups = PQntuples(res);
3563 
3564  if (ntups == 0)
3565  {
3566  /*
3567  * Table is not member of any publications. Clean up and return.
3568  */
3569  PQclear(res);
3570  continue;
3571  }
3572 
3573  i_tableoid = PQfnumber(res, "tableoid");
3574  i_oid = PQfnumber(res, "oid");
3575  i_pubname = PQfnumber(res, "pubname");
3576 
3577  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
3578 
3579  for (j = 0; j < ntups; j++)
3580  {
3581  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
3582  pubrinfo[j].dobj.catId.tableoid =
3583  atooid(PQgetvalue(res, j, i_tableoid));
3584  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3585  AssignDumpId(&pubrinfo[j].dobj);
3586  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3587  pubrinfo[j].dobj.name = tbinfo->dobj.name;
3588  pubrinfo[j].pubname = pg_strdup(PQgetvalue(res, j, i_pubname));
3589  pubrinfo[j].pubtable = tbinfo;
3590 
3591  /* Decide whether we want to dump it */
3592  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
3593  }
3594  PQclear(res);
3595  }
3596  destroyPQExpBuffer(query);
3597 }
3598 
3599 /*
3600  * dumpPublicationTable
3601  * dump the definition of the given publication table mapping
3602  */
3603 static void
3605 {
3606  TableInfo *tbinfo = pubrinfo->pubtable;
3607  PQExpBuffer query;
3608  char *tag;
3609 
3610  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3611  return;
3612 
3613  tag = psprintf("%s %s", pubrinfo->pubname, tbinfo->dobj.name);
3614 
3615  query = createPQExpBuffer();
3616 
3617  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE",
3618  fmtId(pubrinfo->pubname));
3619  appendPQExpBuffer(query, " %s;",
3620  fmtId(tbinfo->dobj.name));
3621 
3622  /*
3623  * There is no point in creating drop query as drop query as the drop
3624  * is done by table drop.
3625  */
3626  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
3627  tag,
3628  tbinfo->dobj.namespace->dobj.name,
3629  NULL,
3630  "", false,
3631  "PUBLICATION TABLE", SECTION_POST_DATA,
3632  query->data, "", NULL,
3633  NULL, 0,
3634  NULL, NULL);
3635 
3636  free(tag);
3637  destroyPQExpBuffer(query);
3638 }
3639 
3640 
3641 /*
3642  * getSubscriptions
3643  * get information about subscriptions
3644  */
3645 void
3647 {
3648  DumpOptions *dopt = fout->dopt;
3649  PQExpBuffer query;
3650  PGresult *res;
3651  SubscriptionInfo *subinfo;
3652  int i_tableoid;
3653  int i_oid;
3654  int i_subname;
3655  int i_rolname;
3656  int i_subenabled;
3657  int i_subconninfo;
3658  int i_subslotname;
3659  int i_subpublications;
3660  int i,
3661  ntups;
3662 
3663  if (!dopt->include_subscriptions || fout->remoteVersion < 100000)
3664  return;
3665 
3666  query = createPQExpBuffer();
3667 
3668  resetPQExpBuffer(query);
3669 
3670  /* Get the subscriptions in current database. */
3671  appendPQExpBuffer(query,
3672  "SELECT s.tableoid, s.oid, s.subname,"
3673  "(%s s.subowner) AS rolname, s.subenabled, "
3674  " s.subconninfo, s.subslotname, s.subpublications "
3675  "FROM pg_catalog.pg_subscription s "
3676  "WHERE s.subdbid = (SELECT oid FROM pg_catalog.pg_database"
3677  " WHERE datname = current_database())",
3679  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3680 
3681  ntups = PQntuples(res);
3682 
3683  i_tableoid = PQfnumber(res, "tableoid");
3684  i_oid = PQfnumber(res, "oid");
3685  i_subname = PQfnumber(res, "subname");
3686  i_rolname = PQfnumber(res, "rolname");
3687  i_subenabled = PQfnumber(res, "subenabled");
3688  i_subconninfo = PQfnumber(res, "subconninfo");
3689  i_subslotname = PQfnumber(res, "subslotname");
3690  i_subpublications = PQfnumber(res, "subpublications");
3691 
3692  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
3693 
3694  for (i = 0; i < ntups; i++)
3695  {
3696  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
3697  subinfo[i].dobj.catId.tableoid =
3698  atooid(PQgetvalue(res, i, i_tableoid));
3699  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3700  AssignDumpId(&subinfo[i].dobj);
3701  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
3702  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3703  subinfo[i].subenabled =
3704  (strcmp(PQgetvalue(res, i, i_subenabled), "t") == 0);
3705  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
3706  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
3707  subinfo[i].subpublications =
3708  pg_strdup(PQgetvalue(res, i, i_subpublications));
3709 
3710  if (strlen(subinfo[i].rolname) == 0)
3711  write_msg(NULL, "WARNING: owner of subscription \"%s\" appears to be invalid\n",
3712  subinfo[i].dobj.name);
3713  }
3714  PQclear(res);
3715 
3716  destroyPQExpBuffer(query);
3717 }
3718 
3719 /*
3720  * dumpSubscription
3721  * dump the definition of the given subscription
3722  */
3723 static void
3725 {
3726  DumpOptions *dopt = fout->dopt;
3727  PQExpBuffer delq;
3728  PQExpBuffer query;
3729  PQExpBuffer publications;
3730  char **pubnames = NULL;
3731  int npubnames = 0;
3732  int i;
3733 
3734  if (dopt->dataOnly)
3735  return;
3736 
3737  delq = createPQExpBuffer();
3738  query = createPQExpBuffer();
3739 
3740  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
3741  fmtId(subinfo->dobj.name));
3742 
3743  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
3744  fmtId(subinfo->dobj.name));
3745  appendStringLiteralAH(query, subinfo->subconninfo, fout);
3746 
3747  /* Build list of quoted publications and append them to query. */
3748  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
3749  {
3750  write_msg(NULL,
3751  "WARNING: could not parse subpublications array\n");
3752  if (pubnames)
3753  free(pubnames);
3754  pubnames = NULL;
3755  npubnames = 0;
3756  }
3757 
3758  publications = createPQExpBuffer();
3759  for (i = 0; i < npubnames; i++)
3760  {
3761  if (i > 0)
3762  appendPQExpBufferStr(publications, ", ");
3763 
3764  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
3765  }
3766 
3767  appendPQExpBuffer(query, " PUBLICATION %s WITH (", publications->data);
3768 
3769  if (subinfo->subenabled)
3770  appendPQExpBufferStr(query, "ENABLED");
3771  else
3772  appendPQExpBufferStr(query, "DISABLED");
3773 
3774  appendPQExpBufferStr(query, ", SLOT NAME = ");
3775  appendStringLiteralAH(query, subinfo->subslotname, fout);
3776 
3777  if (dopt->no_subscription_connect)
3778  appendPQExpBufferStr(query, ", NOCONNECT");
3779 
3780  appendPQExpBufferStr(query, ");\n");
3781 
3782  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
3783  subinfo->dobj.name,
3784  NULL,
3785  NULL,
3786  subinfo->rolname, false,
3787  "SUBSCRIPTION", SECTION_POST_DATA,
3788  query->data, delq->data, NULL,
3789  NULL, 0,
3790  NULL, NULL);
3791 
3792  destroyPQExpBuffer(publications);
3793  if (pubnames)
3794  free(pubnames);
3795 
3796  destroyPQExpBuffer(delq);
3797  destroyPQExpBuffer(query);
3798 }
3799 
3800 static void
3802  PQExpBuffer upgrade_buffer,
3803  Oid pg_type_oid)
3804 {
3805  PQExpBuffer upgrade_query = createPQExpBuffer();
3806  PGresult *upgrade_res;
3807  Oid pg_type_array_oid;
3808 
3809  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
3810  appendPQExpBuffer(upgrade_buffer,
3811  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
3812  pg_type_oid);
3813 
3814  /* we only support old >= 8.3 for binary upgrades */
3815  appendPQExpBuffer(upgrade_query,
3816  "SELECT typarray "
3817  "FROM pg_catalog.pg_type "
3818  "WHERE pg_type.oid = '%u'::pg_catalog.oid;",
3819  pg_type_oid);
3820 
3821  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
3822 
3823  pg_type_array_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "typarray")));
3824 
3825  if (OidIsValid(pg_type_array_oid))
3826  {
3827  appendPQExpBufferStr(upgrade_buffer,
3828  "\n-- For binary upgrade, must preserve pg_type array oid\n");
3829  appendPQExpBuffer(upgrade_buffer,
3830  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
3831  pg_type_array_oid);
3832  }
3833 
3834  PQclear(upgrade_res);
3835  destroyPQExpBuffer(upgrade_query);
3836 }
3837 
3838 static bool
3840  PQExpBuffer upgrade_buffer,
3841  Oid pg_rel_oid)
3842 {
3843  PQExpBuffer upgrade_query = createPQExpBuffer();
3844  PGresult *upgrade_res;
3845  Oid pg_type_oid;
3846  bool toast_set = false;
3847 
3848  /* we only support old >= 8.3 for binary upgrades */
3849  appendPQExpBuffer(upgrade_query,
3850  "SELECT c.reltype AS crel, t.reltype AS trel "
3851  "FROM pg_catalog.pg_class c "
3852  "LEFT JOIN pg_catalog.pg_class t ON "
3853  " (c.reltoastrelid = t.oid) "
3854  "WHERE c.oid = '%u'::pg_catalog.oid;",
3855  pg_rel_oid);
3856 
3857  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
3858 
3859  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
3860 
3861  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
3862  pg_type_oid);
3863 
3864  if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
3865  {
3866  /* Toast tables do not have pg_type array rows */
3867  Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
3868  PQfnumber(upgrade_res, "trel")));
3869 
3870  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
3871  appendPQExpBuffer(upgrade_buffer,
3872  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_type_oid('%u'::pg_catalog.oid);\n\n",
3873  pg_type_toast_oid);
3874 
3875  toast_set = true;
3876  }
3877 
3878  PQclear(upgrade_res);
3879  destroyPQExpBuffer(upgrade_query);
3880 
3881  return toast_set;
3882 }
3883 
3884 static void
3886  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
3887  bool is_index)
3888 {
3889  PQExpBuffer upgrade_query = createPQExpBuffer();
3890  PGresult *upgrade_res;
3891  Oid pg_class_reltoastrelid;
3892  Oid pg_index_indexrelid;
3893 
3894  appendPQExpBuffer(upgrade_query,
3895  "SELECT c.reltoastrelid, i.indexrelid "
3896  "FROM pg_catalog.pg_class c LEFT JOIN "
3897  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
3898  "WHERE c.oid = '%u'::pg_catalog.oid;",
3899  pg_class_oid);
3900 
3901  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
3902 
3903  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastrelid")));
3904  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
3905 
3906  appendPQExpBufferStr(upgrade_buffer,
3907  "\n-- For binary upgrade, must preserve pg_class oids\n");
3908 
3909  if (!is_index)
3910  {
3911  appendPQExpBuffer(upgrade_buffer,
3912  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
3913  pg_class_oid);
3914  /* only tables have toast tables, not indexes */
3915  if (OidIsValid(pg_class_reltoastrelid))
3916  {
3917  /*
3918  * One complexity is that the table definition might not require
3919  * the creation of a TOAST table, and the TOAST table might have
3920  * been created long after table creation, when the table was
3921  * loaded with wide data. By setting the TOAST oid we force
3922  * creation of the TOAST heap and TOAST index by the backend so we
3923  * can cleanly copy the files during binary upgrade.
3924  */
3925 
3926  appendPQExpBuffer(upgrade_buffer,
3927  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
3928  pg_class_reltoastrelid);
3929 
3930  /* every toast table has an index */
3931  appendPQExpBuffer(upgrade_buffer,
3932  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
3933  pg_index_indexrelid);
3934  }
3935  }
3936  else
3937  appendPQExpBuffer(upgrade_buffer,
3938  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
3939  pg_class_oid);
3940 
3941  appendPQExpBufferChar(upgrade_buffer, '\n');
3942 
3943  PQclear(upgrade_res);
3944  destroyPQExpBuffer(upgrade_query);
3945 }
3946 
3947 /*
3948  * If the DumpableObject is a member of an extension, add a suitable
3949  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
3950  */
3951 static void
3953  DumpableObject *dobj,
3954  const char *objlabel)
3955 {
3956  DumpableObject *extobj = NULL;
3957  int i;
3958 
3959  if (!dobj->ext_member)
3960  return;
3961 
3962  /*
3963  * Find the parent extension. We could avoid this search if we wanted to
3964  * add a link field to DumpableObject, but the space costs of that would
3965  * be considerable. We assume that member objects could only have a
3966  * direct dependency on their own extension, not any others.
3967  */
3968  for (i = 0; i < dobj->nDeps; i++)
3969  {
3970  extobj = findObjectByDumpId(dobj->dependencies[i]);
3971  if (extobj && extobj->objType == DO_EXTENSION)
3972  break;
3973  extobj = NULL;
3974  }
3975  if (extobj == NULL)
3976  exit_horribly(NULL, "could not find parent extension for %s\n", objlabel);
3977 
3978  appendPQExpBufferStr(upgrade_buffer,
3979  "\n-- For binary upgrade, handle extension membership the hard way\n");
3980  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s;\n",
3981  fmtId(extobj->name),
3982  objlabel);
3983 }
3984 
3985 /*
3986  * getNamespaces:
3987  * read all namespaces in the system catalogs and return them in the
3988  * NamespaceInfo* structure
3989  *
3990  * numNamespaces is set to the number of namespaces read in
3991  */
3992 NamespaceInfo *
3994 {
3995  DumpOptions *dopt = fout->dopt;
3996  PGresult *res;
3997  int ntups;
3998  int i;
3999  PQExpBuffer query;
4000  NamespaceInfo *nsinfo;
4001  int i_tableoid;
4002  int i_oid;
4003  int i_nspname;
4004  int i_rolname;
4005  int i_nspacl;
4006  int i_rnspacl;
4007  int i_initnspacl;
4008  int i_initrnspacl;
4009 
4010  query = createPQExpBuffer();
4011 
4012  /* Make sure we are in proper schema */
4013  selectSourceSchema(fout, "pg_catalog");
4014 
4015  /*
4016  * we fetch all namespaces including system ones, so that every object we
4017  * read in can be linked to a containing namespace.
4018  */
4019  if (fout->remoteVersion >= 90600)
4020  {
4021  PQExpBuffer acl_subquery = createPQExpBuffer();
4022  PQExpBuffer racl_subquery = createPQExpBuffer();
4023  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4024  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4025 
4026  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4027  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4028  dopt->binary_upgrade);
4029 
4030  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4031  "(%s nspowner) AS rolname, "
4032  "%s as nspacl, "
4033  "%s as rnspacl, "
4034  "%s as initnspacl, "
4035  "%s as initrnspacl "
4036  "FROM pg_namespace n "
4037  "LEFT JOIN pg_init_privs pip "
4038  "ON (n.oid = pip.objoid "
4039  "AND pip.classoid = 'pg_namespace'::regclass "
4040  "AND pip.objsubid = 0",
4042  acl_subquery->data,
4043  racl_subquery->data,
4044  init_acl_subquery->data,
4045  init_racl_subquery->data);
4046 
4047  /*
4048  * When we are doing a 'clean' run, we will be dropping and recreating
4049  * the 'public' schema (the only object which has that kind of
4050  * treatment in the backend and which has an entry in pg_init_privs)
4051  * and therefore we should not consider any initial privileges in
4052  * pg_init_privs in that case.
4053  *
4054  * See pg_backup_archiver.c:_printTocEntry() for the details on why
4055  * the public schema is special in this regard.
4056  *
4057  * Note that if the public schema is dropped and re-created, this is
4058  * essentially a no-op because the new public schema won't have an
4059  * entry in pg_init_privs anyway, as the entry will be removed when
4060  * the public schema is dropped.
4061  */
4062  if (dopt->outputClean)
4063  appendPQExpBuffer(query," AND pip.objoid <> 'public'::regnamespace");
4064 
4065  appendPQExpBuffer(query,") ");
4066 
4067  destroyPQExpBuffer(acl_subquery);
4068  destroyPQExpBuffer(racl_subquery);
4069  destroyPQExpBuffer(init_acl_subquery);
4070  destroyPQExpBuffer(init_racl_subquery);
4071  }
4072  else
4073  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4074  "(%s nspowner) AS rolname, "
4075  "nspacl, NULL as rnspacl, "
4076  "NULL AS initnspacl, NULL as initrnspacl "
4077  "FROM pg_namespace",
4079 
4080  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4081 
4082  ntups = PQntuples(res);
4083 
4084  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4085 
4086  i_tableoid = PQfnumber(res, "tableoid");
4087  i_oid = PQfnumber(res, "oid");
4088  i_nspname = PQfnumber(res, "nspname");
4089  i_rolname = PQfnumber(res, "rolname");
4090  i_nspacl = PQfnumber(res, "nspacl");
4091  i_rnspacl = PQfnumber(res, "rnspacl");
4092  i_initnspacl = PQfnumber(res, "initnspacl");
4093  i_initrnspacl = PQfnumber(res, "initrnspacl");
4094 
4095  for (i = 0; i < ntups; i++)
4096  {
4097  nsinfo[i].dobj.objType = DO_NAMESPACE;
4098  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4099  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4100  AssignDumpId(&nsinfo[i].dobj);
4101  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4102  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4103  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4104  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4105  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4106  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4107 
4108  /* Decide whether to dump this namespace */
4109  selectDumpableNamespace(&nsinfo[i], fout);
4110 
4111  /*
4112  * Do not try to dump ACL if the ACL is empty or the default.
4113  *
4114  * This is useful because, for some schemas/objects, the only
4115  * component we are going to try and dump is the ACL and if we can
4116  * remove that then 'dump' goes to zero/false and we don't consider
4117  * this object for dumping at all later on.
4118  */
4119  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4120  PQgetisnull(res, i, i_initnspacl) &&
4121  PQgetisnull(res, i, i_initrnspacl))
4122  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4123 
4124  if (strlen(nsinfo[i].rolname) == 0)
4125  write_msg(NULL, "WARNING: owner of schema \"%s\" appears to be invalid\n",
4126  nsinfo[i].dobj.name);
4127  }
4128 
4129  PQclear(res);
4130  destroyPQExpBuffer(query);
4131 
4132  *numNamespaces = ntups;
4133 
4134  return nsinfo;
4135 }
4136 
4137 /*
4138  * findNamespace:
4139  * given a namespace OID, look up the info read by getNamespaces
4140  */
4141 static NamespaceInfo *
4143 {
4144  NamespaceInfo *nsinfo;
4145 
4146  nsinfo = findNamespaceByOid(nsoid);
4147  if (nsinfo == NULL)
4148  exit_horribly(NULL, "schema with OID %u does not exist\n", nsoid);
4149  return nsinfo;
4150 }
4151 
4152 /*
4153  * getExtensions:
4154  * read all extensions in the system catalogs and return them in the
4155  * ExtensionInfo* structure
4156  *
4157  * numExtensions is set to the number of extensions read in
4158  */
4159 ExtensionInfo *
4161 {
4162  DumpOptions *dopt = fout->dopt;
4163  PGresult *res;
4164  int ntups;
4165  int i;
4166  PQExpBuffer query;
4167  ExtensionInfo *extinfo;
4168  int i_tableoid;
4169  int i_oid;
4170  int i_extname;
4171  int i_nspname;
4172  int i_extrelocatable;
4173  int i_extversion;
4174  int i_extconfig;
4175  int i_extcondition;
4176 
4177  /*
4178  * Before 9.1, there are no extensions.
4179  */
4180  if (fout->remoteVersion < 90100)
4181  {
4182  *numExtensions = 0;
4183  return NULL;
4184  }
4185 
4186  query = createPQExpBuffer();
4187 
4188  /* Make sure we are in proper schema */
4189  selectSourceSchema(fout, "pg_catalog");
4190 
4191  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4192  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4193  "FROM pg_extension x "
4194  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4195 
4196  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4197 
4198  ntups = PQntuples(res);
4199 
4200  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4201 
4202  i_tableoid = PQfnumber(res, "tableoid");
4203  i_oid = PQfnumber(res, "oid");
4204  i_extname = PQfnumber(res, "extname");
4205  i_nspname = PQfnumber(res, "nspname");
4206  i_extrelocatable = PQfnumber(res, "extrelocatable");
4207  i_extversion = PQfnumber(res, "extversion");
4208  i_extconfig = PQfnumber(res, "extconfig");
4209  i_extcondition = PQfnumber(res, "extcondition");
4210 
4211  for (i = 0; i < ntups; i++)
4212  {
4213  extinfo[i].dobj.objType = DO_EXTENSION;
4214  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4215  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4216  AssignDumpId(&extinfo[i].dobj);
4217  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4218  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4219  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4220  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4221  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4222  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4223 
4224  /* Decide whether we want to dump it */
4225  selectDumpableExtension(&(extinfo[i]), dopt);
4226  }
4227 
4228  PQclear(res);
4229  destroyPQExpBuffer(query);
4230 
4231  *numExtensions = ntups;
4232 
4233  return extinfo;
4234 }
4235 
4236 /*
4237  * getTypes:
4238  * read all types in the system catalogs and return them in the
4239  * TypeInfo* structure
4240  *
4241  * numTypes is set to the number of types read in
4242  *
4243  * NB: this must run after getFuncs() because we assume we can do
4244  * findFuncByOid().
4245  */
4246 TypeInfo *
4248 {
4249  DumpOptions *dopt = fout->dopt;
4250  PGresult *res;
4251  int ntups;
4252  int i;
4253  PQExpBuffer query = createPQExpBuffer();
4254  TypeInfo *tyinfo;
4255  ShellTypeInfo *stinfo;
4256  int i_tableoid;
4257  int i_oid;
4258  int i_typname;
4259  int i_typnamespace;
4260  int i_typacl;
4261  int i_rtypacl;
4262  int i_inittypacl;
4263  int i_initrtypacl;
4264  int i_rolname;
4265  int i_typelem;
4266  int i_typrelid;
4267  int i_typrelkind;
4268  int i_typtype;
4269  int i_typisdefined;
4270  int i_isarray;
4271 
4272  /*
4273  * we include even the built-in types because those may be used as array
4274  * elements by user-defined types
4275  *
4276  * we filter out the built-in types when we dump out the types
4277  *
4278  * same approach for undefined (shell) types and array types
4279  *
4280  * Note: as of 8.3 we can reliably detect whether a type is an
4281  * auto-generated array type by checking the element type's typarray.
4282  * (Before that the test is capable of generating false positives.) We
4283  * still check for name beginning with '_', though, so as to avoid the
4284  * cost of the subselect probe for all standard types. This would have to
4285  * be revisited if the backend ever allows renaming of array types.
4286  */
4287 
4288  /* Make sure we are in proper schema */
4289  selectSourceSchema(fout, "pg_catalog");
4290 
4291  if (fout->remoteVersion >= 90600)
4292  {
4293  PQExpBuffer acl_subquery = createPQExpBuffer();
4294  PQExpBuffer racl_subquery = createPQExpBuffer();
4295  PQExpBuffer initacl_subquery = createPQExpBuffer();
4296  PQExpBuffer initracl_subquery = createPQExpBuffer();
4297 
4298  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
4299  initracl_subquery, "t.typacl", "t.typowner", "'T'",
4300  dopt->binary_upgrade);
4301 
4302  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
4303  "t.typnamespace, "
4304  "%s AS typacl, "
4305  "%s AS rtypacl, "
4306  "%s AS inittypacl, "
4307  "%s AS initrtypacl, "
4308  "(%s t.typowner) AS rolname, "
4309  "t.typelem, t.typrelid, "
4310  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
4311  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
4312  "t.typtype, t.typisdefined, "
4313  "t.typname[0] = '_' AND t.typelem != 0 AND "
4314  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
4315  "FROM pg_type t "
4316  "LEFT JOIN pg_init_privs pip ON "
4317  "(t.oid = pip.objoid "
4318  "AND pip.classoid = 'pg_type'::regclass "
4319  "AND pip.objsubid = 0) ",
4320  acl_subquery->data,
4321  racl_subquery->data,
4322  initacl_subquery->data,
4323  initracl_subquery->data,
4325 
4326  destroyPQExpBuffer(acl_subquery);
4327  destroyPQExpBuffer(racl_subquery);
4328  destroyPQExpBuffer(initacl_subquery);
4329  destroyPQExpBuffer(initracl_subquery);
4330  }
4331  else if (fout->remoteVersion >= 90200)
4332  {
4333  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4334  "typnamespace, typacl, NULL as rtypacl, "
4335  "NULL AS inittypacl, NULL AS initrtypacl, "
4336  "(%s typowner) AS rolname, "
4337  "typelem, typrelid, "
4338  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4339  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4340  "typtype, typisdefined, "
4341  "typname[0] = '_' AND typelem != 0 AND "
4342  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4343  "FROM pg_type",
4345  }
4346  else if (fout->remoteVersion >= 80300)
4347  {
4348  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4349  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4350  "NULL AS inittypacl, NULL AS initrtypacl, "
4351  "(%s typowner) AS rolname, "
4352  "typelem, typrelid, "
4353  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4354  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4355  "typtype, typisdefined, "
4356  "typname[0] = '_' AND typelem != 0 AND "
4357  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4358  "FROM pg_type",
4360  }
4361  else
4362  {
4363  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4364  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4365  "NULL AS inittypacl, NULL AS initrtypacl, "
4366  "(%s typowner) AS rolname, "
4367  "typelem, typrelid, "
4368  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4369  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4370  "typtype, typisdefined, "
4371  "typname[0] = '_' AND typelem != 0 AS isarray "
4372  "FROM pg_type",
4374  }
4375 
4376  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4377 
4378  ntups = PQntuples(res);
4379 
4380  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
4381 
4382  i_tableoid = PQfnumber(res, "tableoid");
4383  i_oid = PQfnumber(res, "oid");
4384  i_typname = PQfnumber(res, "typname");
4385  i_typnamespace = PQfnumber(res, "typnamespace");
4386  i_typacl = PQfnumber(res, "typacl");
4387  i_rtypacl = PQfnumber(res, "rtypacl");
4388  i_inittypacl = PQfnumber(res, "inittypacl");
4389  i_initrtypacl = PQfnumber(res, "initrtypacl");
4390  i_rolname = PQfnumber(res, "rolname");
4391  i_typelem = PQfnumber(res, "typelem");
4392  i_typrelid = PQfnumber(res, "typrelid");
4393  i_typrelkind = PQfnumber(res, "typrelkind");
4394  i_typtype = PQfnumber(res, "typtype");
4395  i_typisdefined = PQfnumber(res, "typisdefined");
4396  i_isarray = PQfnumber(res, "isarray");
4397 
4398  for (i = 0; i < ntups; i++)
4399  {
4400  tyinfo[i].dobj.objType = DO_TYPE;
4401  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4402  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4403  AssignDumpId(&tyinfo[i].dobj);
4404  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
4405  tyinfo[i].dobj.namespace =
4406  findNamespace(fout,
4407  atooid(PQgetvalue(res, i, i_typnamespace)));
4408  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4409  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
4410  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
4411  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
4412  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
4413  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
4414  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
4415  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
4416  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
4417  tyinfo[i].shellType = NULL;
4418 
4419  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
4420  tyinfo[i].isDefined = true;
4421  else
4422  tyinfo[i].isDefined = false;
4423 
4424  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
4425  tyinfo[i].isArray = true;
4426  else
4427  tyinfo[i].isArray = false;
4428 
4429  /* Decide whether we want to dump it */
4430  selectDumpableType(&tyinfo[i], fout);
4431 
4432  /* Do not try to dump ACL if no ACL exists. */
4433  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
4434  PQgetisnull(res, i, i_inittypacl) &&
4435  PQgetisnull(res, i, i_initrtypacl))
4436  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4437 
4438  /*
4439  * If it's a domain, fetch info about its constraints, if any
4440  */
4441  tyinfo[i].nDomChecks = 0;
4442  tyinfo[i].domChecks = NULL;
4443  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4444  tyinfo[i].typtype == TYPTYPE_DOMAIN)
4445  getDomainConstraints(fout, &(tyinfo[i]));
4446 
4447  /*
4448  * If it's a base type, make a DumpableObject representing a shell
4449  * definition of the type. We will need to dump that ahead of the I/O
4450  * functions for the type. Similarly, range types need a shell
4451  * definition in case they have a canonicalize function.
4452  *
4453  * Note: the shell type doesn't have a catId. You might think it
4454  * should copy the base type's catId, but then it might capture the
4455  * pg_depend entries for the type, which we don't want.
4456  */
4457  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4458  (tyinfo[i].typtype == TYPTYPE_BASE ||
4459  tyinfo[i].typtype == TYPTYPE_RANGE))
4460  {
4461  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
4462  stinfo->dobj.objType = DO_SHELL_TYPE;
4463  stinfo->dobj.catId = nilCatalogId;
4464  AssignDumpId(&stinfo->dobj);
4465  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
4466  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
4467  stinfo->baseType = &(tyinfo[i]);
4468  tyinfo[i].shellType = stinfo;
4469 
4470  /*
4471  * Initially mark the shell type as not to be dumped. We'll only
4472  * dump it if the I/O or canonicalize functions need to be dumped;
4473  * this is taken care of while sorting dependencies.
4474  */
4475  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
4476  }
4477 
4478  if (strlen(tyinfo[i].rolname) == 0)
4479  write_msg(NULL, "WARNING: owner of data type \"%s\" appears to be invalid\n",
4480  tyinfo[i].dobj.name);
4481  }
4482 
4483  *numTypes = ntups;
4484 
4485  PQclear(res);
4486 
4487  destroyPQExpBuffer(query);
4488 
4489  return tyinfo;
4490 }
4491 
4492 /*
4493  * getOperators:
4494  * read all operators in the system catalogs and return them in the
4495  * OprInfo* structure
4496  *
4497  * numOprs is set to the number of operators read in
4498  */
4499 OprInfo *
4500 getOperators(Archive *fout, int *numOprs)
4501 {
4502  PGresult *res;
4503  int ntups;
4504  int i;
4505  PQExpBuffer query = createPQExpBuffer();
4506  OprInfo *oprinfo;
4507  int i_tableoid;
4508  int i_oid;
4509  int i_oprname;
4510  int i_oprnamespace;
4511  int i_rolname;
4512  int i_oprkind;
4513  int i_oprcode;
4514 
4515  /*
4516  * find all operators, including builtin operators; we filter out
4517  * system-defined operators at dump-out time.
4518  */
4519 
4520  /* Make sure we are in proper schema */
4521  selectSourceSchema(fout, "pg_catalog");
4522 
4523  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
4524  "oprnamespace, "
4525  "(%s oprowner) AS rolname, "
4526  "oprkind, "
4527  "oprcode::oid AS oprcode "
4528  "FROM pg_operator",
4530 
4531  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4532 
4533  ntups = PQntuples(res);
4534  *numOprs = ntups;
4535 
4536  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
4537 
4538  i_tableoid = PQfnumber(res, "tableoid");
4539  i_oid = PQfnumber(res, "oid");
4540  i_oprname = PQfnumber(res, "oprname");
4541  i_oprnamespace = PQfnumber(res, "oprnamespace");
4542  i_rolname = PQfnumber(res, "rolname");
4543  i_oprkind = PQfnumber(res, "oprkind");
4544  i_oprcode = PQfnumber(res, "oprcode");
4545 
4546  for (i = 0; i < ntups; i++)
4547  {
4548  oprinfo[i].dobj.objType = DO_OPERATOR;
4549  oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4550  oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4551  AssignDumpId(&oprinfo[i].dobj);
4552  oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
4553  oprinfo[i].dobj.namespace =
4554  findNamespace(fout,
4555  atooid(PQgetvalue(res, i, i_oprnamespace)));
4556  oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4557  oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
4558  oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
4559 
4560  /* Decide whether we want to dump it */
4561  selectDumpableObject(&(oprinfo[i].dobj), fout);
4562 
4563  /* Operators do not currently have ACLs. */
4564  oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4565 
4566  if (strlen(oprinfo[i].rolname) == 0)
4567  write_msg(NULL, "WARNING: owner of operator \"%s\" appears to be invalid\n",
4568  oprinfo[i].dobj.name);
4569  }
4570 
4571  PQclear(res);
4572 
4573  destroyPQExpBuffer(query);
4574 
4575  return oprinfo;
4576 }
4577 
4578 /*
4579  * getCollations:
4580  * read all collations in the system catalogs and return them in the
4581  * CollInfo* structure
4582  *
4583  * numCollations is set to the number of collations read in
4584  */
4585 CollInfo *
4587 {
4588  PGresult *res;
4589  int ntups;
4590  int i;
4591  PQExpBuffer query;
4592  CollInfo *collinfo;
4593  int i_tableoid;
4594  int i_oid;
4595  int i_collname;
4596  int i_collnamespace;
4597  int i_rolname;
4598 
4599  /* Collations didn't exist pre-9.1 */
4600  if (fout->remoteVersion < 90100)
4601  {
4602  *numCollations = 0;
4603  return NULL;
4604  }
4605 
4606  query = createPQExpBuffer();
4607 
4608  /*
4609  * find all collations, including builtin collations; we filter out
4610  * system-defined collations at dump-out time.
4611  */
4612 
4613  /* Make sure we are in proper schema */
4614  selectSourceSchema(fout, "pg_catalog");
4615 
4616  appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
4617  "collnamespace, "
4618  "(%s collowner) AS rolname "
4619  "FROM pg_collation",
4621 
4622  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4623 
4624  ntups = PQntuples(res);
4625  *numCollations = ntups;
4626 
4627  collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
4628 
4629  i_tableoid = PQfnumber(res, "tableoid");
4630  i_oid = PQfnumber(res, "oid");
4631  i_collname = PQfnumber(res, "collname");
4632  i_collnamespace = PQfnumber(res, "collnamespace");
4633  i_rolname = PQfnumber(res, "rolname");
4634 
4635  for (i = 0; i < ntups; i++)
4636  {
4637  collinfo[i].dobj.objType = DO_COLLATION;
4638  collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4639  collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4640  AssignDumpId(&collinfo[i].dobj);
4641  collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
4642  collinfo[i].dobj.namespace =
4643  findNamespace(fout,
4644  atooid(PQgetvalue(res, i, i_collnamespace)));
4645  collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4646 
4647  /* Decide whether we want to dump it */
4648  selectDumpableObject(&(collinfo[i].dobj), fout);
4649 
4650  /* Collations do not currently have ACLs. */
4651  collinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4652  }
4653 
4654  PQclear(res);
4655 
4656  destroyPQExpBuffer(query);
4657 
4658  return collinfo;
4659 }
4660 
4661 /*
4662  * getConversions:
4663  * read all conversions in the system catalogs and return them in the
4664  * ConvInfo* structure
4665  *
4666  * numConversions is set to the number of conversions read in
4667  */
4668 ConvInfo *
4669 getConversions(Archive *fout, int *numConversions)
4670 {
4671  PGresult *res;
4672  int ntups;
4673  int i;
4674  PQExpBuffer query;
4675  ConvInfo *convinfo;
4676  int i_tableoid;
4677  int i_oid;
4678  int i_conname;
4679  int i_connamespace;
4680  int i_rolname;
4681 
4682  query = createPQExpBuffer();
4683 
4684  /*
4685  * find all conversions, including builtin conversions; we filter out
4686  * system-defined conversions at dump-out time.
4687  */
4688 
4689  /* Make sure we are in proper schema */
4690  selectSourceSchema(fout, "pg_catalog");
4691 
4692  appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
4693  "connamespace, "
4694  "(%s conowner) AS rolname "
4695  "FROM pg_conversion",
4697 
4698  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4699 
4700  ntups = PQntuples(res);
4701  *numConversions = ntups;
4702 
4703  convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
4704 
4705  i_tableoid = PQfnumber(res, "tableoid");
4706  i_oid = PQfnumber(res, "oid");
4707  i_conname = PQfnumber(res, "conname");
4708  i_connamespace = PQfnumber(res, "connamespace");
4709  i_rolname = PQfnumber(res, "rolname");
4710 
4711  for (i = 0; i < ntups; i++)
4712  {
4713  convinfo[i].dobj.objType = DO_CONVERSION;
4714  convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4715  convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4716  AssignDumpId(&convinfo[i].dobj);
4717  convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
4718  convinfo[i].dobj.namespace =
4719  findNamespace(fout,
4720  atooid(PQgetvalue(res, i, i_connamespace)));
4721  convinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4722 
4723  /* Decide whether we want to dump it */
4724  selectDumpableObject(&(convinfo[i].dobj), fout);
4725 
4726  /* Conversions do not currently have ACLs. */
4727  convinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4728  }
4729 
4730  PQclear(res);
4731 
4732  destroyPQExpBuffer(query);
4733 
4734  return convinfo;
4735 }
4736 
4737 /*
4738  * getAccessMethods:
4739  * read all user-defined access methods in the system catalogs and return
4740  * them in the AccessMethodInfo* structure
4741  *
4742  * numAccessMethods is set to the number of access methods read in
4743  */
4745 getAccessMethods(Archive *fout, int *numAccessMethods)
4746 {
4747  PGresult *res;
4748  int ntups;
4749  int i;
4750  PQExpBuffer query;
4751  AccessMethodInfo *aminfo;
4752  int i_tableoid;
4753  int i_oid;
4754  int i_amname;
4755  int i_amhandler;
4756  int i_amtype;
4757 
4758  /* Before 9.6, there are no user-defined access methods */
4759  if (fout->remoteVersion < 90600)
4760  {
4761  *numAccessMethods = 0;
4762  return NULL;
4763  }
4764 
4765  query = createPQExpBuffer();
4766 
4767  /* Make sure we are in proper schema */
4768  selectSourceSchema(fout, "pg_catalog");
4769 
4770  /* Select all access methods from pg_am table */
4771  appendPQExpBuffer(query, "SELECT tableoid, oid, amname, amtype, "
4772  "amhandler::pg_catalog.regproc AS amhandler "
4773  "FROM pg_am");
4774 
4775  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4776 
4777  ntups = PQntuples(res);
4778  *numAccessMethods = ntups;
4779 
4780  aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
4781 
4782  i_tableoid = PQfnumber(res, "tableoid");
4783  i_oid = PQfnumber(res, "oid");
4784  i_amname = PQfnumber(res, "amname");
4785  i_amhandler = PQfnumber(res, "amhandler");
4786  i_amtype = PQfnumber(res, "amtype");
4787 
4788  for (i = 0; i < ntups; i++)
4789  {
4790  aminfo[i].dobj.objType = DO_ACCESS_METHOD;
4791  aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4792  aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4793  AssignDumpId(&aminfo[i].dobj);
4794  aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
4795  aminfo[i].dobj.namespace = NULL;
4796  aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
4797  aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
4798 
4799  /* Decide whether we want to dump it */
4800  selectDumpableAccessMethod(&(aminfo[i]), fout);
4801 
4802  /* Access methods do not currently have ACLs. */
4803  aminfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4804  }
4805 
4806  PQclear(res);
4807 
4808  destroyPQExpBuffer(query);
4809 
4810  return aminfo;
4811 }
4812 
4813 
4814 /*
4815  * getOpclasses:
4816  * read all opclasses in the system catalogs and return them in the
4817  * OpclassInfo* structure
4818  *
4819  * numOpclasses is set to the number of opclasses read in
4820  */
4821 OpclassInfo *
4822 getOpclasses(Archive *fout, int *numOpclasses)
4823 {
4824  PGresult *res;
4825  int ntups;
4826  int i;
4827  PQExpBuffer query = createPQExpBuffer();
4828  OpclassInfo *opcinfo;
4829  int i_tableoid;
4830  int i_oid;
4831  int i_opcname;
4832  int i_opcnamespace;
4833  int i_rolname;
4834 
4835  /*
4836  * find all opclasses, including builtin opclasses; we filter out
4837  * system-defined opclasses at dump-out time.
4838  */
4839 
4840  /* Make sure we are in proper schema */
4841  selectSourceSchema(fout, "pg_catalog");
4842 
4843  appendPQExpBuffer(query, "SELECT tableoid, oid, opcname, "
4844  "opcnamespace, "
4845  "(%s opcowner) AS rolname "
4846  "FROM pg_opclass",
4848 
4849  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4850 
4851  ntups = PQntuples(res);
4852  *numOpclasses = ntups;
4853 
4854  opcinfo = (OpclassInfo *) pg_malloc(ntups * sizeof(OpclassInfo));
4855 
4856  i_tableoid = PQfnumber(res, "tableoid");
4857  i_oid = PQfnumber(res, "oid");
4858  i_opcname = PQfnumber(res, "opcname");
4859  i_opcnamespace = PQfnumber(res, "opcnamespace");
4860  i_rolname = PQfnumber(res, "rolname");
4861 
4862  for (i = 0; i < ntups; i++)
4863  {
4864  opcinfo[i].dobj.objType = DO_OPCLASS;
4865  opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4866  opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4867  AssignDumpId(&opcinfo[i].dobj);
4868  opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
4869  opcinfo[i].dobj.namespace =
4870  findNamespace(fout,
4871  atooid(PQgetvalue(res, i, i_opcnamespace)));
4872  opcinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4873 
4874  /* Decide whether we want to dump it */
4875  selectDumpableObject(&(opcinfo[i].dobj), fout);
4876 
4877  /* Op Classes do not currently have ACLs. */
4878  opcinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4879 
4880  if (strlen(opcinfo[i].rolname) == 0)
4881  write_msg(NULL, "WARNING: owner of operator class \"%s\" appears to be invalid\n",
4882  opcinfo[i].dobj.name);
4883  }
4884 
4885  PQclear(res);
4886 
4887  destroyPQExpBuffer(query);
4888 
4889  return opcinfo;
4890 }
4891 
4892 /*
4893  * getOpfamilies:
4894  * read all opfamilies in the system catalogs and return them in the
4895  * OpfamilyInfo* structure
4896  *
4897  * numOpfamilies is set to the number of opfamilies read in
4898  */
4899 OpfamilyInfo *
4900 getOpfamilies(Archive *fout, int *numOpfamilies)
4901 {
4902  PGresult *res;
4903  int ntups;
4904  int i;
4905  PQExpBuffer query;
4906  OpfamilyInfo *opfinfo;
4907  int i_tableoid;
4908  int i_oid;
4909  int i_opfname;
4910  int i_opfnamespace;
4911  int i_rolname;
4912 
4913  /* Before 8.3, there is no separate concept of opfamilies */
4914  if (fout->remoteVersion < 80300)
4915  {
4916  *numOpfamilies = 0;
4917  return NULL;
4918  }
4919 
4920  query = createPQExpBuffer();
4921 
4922  /*
4923  * find all opfamilies, including builtin opfamilies; we filter out
4924  * system-defined opfamilies at dump-out time.
4925  */
4926 
4927  /* Make sure we are in proper schema */
4928  selectSourceSchema(fout, "pg_catalog");
4929 
4930  appendPQExpBuffer(query, "SELECT tableoid, oid, opfname, "
4931  "opfnamespace, "
4932  "(%s opfowner) AS rolname "
4933  "FROM pg_opfamily",
4935 
4936  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4937 
4938  ntups = PQntuples(res);
4939  *numOpfamilies = ntups;
4940 
4941  opfinfo = (OpfamilyInfo *) pg_malloc(ntups * sizeof(OpfamilyInfo));
4942 
4943  i_tableoid = PQfnumber(res, "tableoid");
4944  i_oid = PQfnumber(res, "oid");
4945  i_opfname = PQfnumber(res, "opfname");
4946  i_opfnamespace = PQfnumber(res, "opfnamespace");
4947  i_rolname = PQfnumber(res, "rolname");
4948 
4949  for (i = 0; i < ntups; i++)
4950  {
4951  opfinfo[i].dobj.objType = DO_OPFAMILY;
4952  opfinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4953  opfinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4954  AssignDumpId(&opfinfo[i].dobj);
4955  opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
4956  opfinfo[i].dobj.namespace =
4957  findNamespace(fout,
4958  atooid(PQgetvalue(res, i, i_opfnamespace)));
4959  opfinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4960 
4961  /* Decide whether we want to dump it */
4962  selectDumpableObject(&(opfinfo[i].dobj), fout);
4963 
4964  /* Extensions do not currently have ACLs. */
4965  opfinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4966 
4967  if (strlen(opfinfo[i].rolname) == 0)
4968  write_msg(NULL, "WARNING: owner of operator family \"%s\" appears to be invalid\n",
4969  opfinfo[i].dobj.name);
4970  }
4971 
4972  PQclear(res);
4973 
4974  destroyPQExpBuffer(query);
4975 
4976  return opfinfo;
4977 }
4978 
4979 /*
4980  * getAggregates:
4981  * read all the user-defined aggregates in the system catalogs and
4982  * return them in the AggInfo* structure
4983  *
4984  * numAggs is set to the number of aggregates read in
4985  */
4986 AggInfo *
4987 getAggregates(Archive *fout, int *numAggs)
4988 {
4989  DumpOptions *dopt = fout->dopt;
4990  PGresult *res;
4991  int ntups;
4992  int i;
4993  PQExpBuffer query = createPQExpBuffer();
4994  AggInfo *agginfo;
4995  int i_tableoid;
4996  int i_oid;
4997  int i_aggname;
4998  int i_aggnamespace;
4999  int i_pronargs;
5000  int i_proargtypes;
5001  int i_rolname;
5002  int i_aggacl;
5003  int i_raggacl;
5004  int i_initaggacl;
5005  int i_initraggacl;
5006 
5007  /* Make sure we are in proper schema */
5008  selectSourceSchema(fout, "pg_catalog");
5009 
5010  /*
5011  * Find all interesting aggregates. See comment in getFuncs() for the
5012  * rationale behind the filtering logic.
5013  */
5014  if (fout->remoteVersion >= 90600)
5015  {
5016  PQExpBuffer acl_subquery = createPQExpBuffer();
5017  PQExpBuffer racl_subquery = createPQExpBuffer();
5018  PQExpBuffer initacl_subquery = createPQExpBuffer();
5019  PQExpBuffer initracl_subquery = createPQExpBuffer();
5020 
5021  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
5022  initracl_subquery, "p.proacl", "p.proowner", "'f'",
5023  dopt->binary_upgrade);
5024 
5025  appendPQExpBuffer(query, "SELECT p.tableoid, p.oid, "
5026  "p.proname AS aggname, "
5027  "p.pronamespace AS aggnamespace, "
5028  "p.pronargs, p.proargtypes, "
5029  "(%s p.proowner) AS rolname, "
5030  "%s AS aggacl, "
5031  "%s AS raggacl, "
5032  "%s AS initaggacl, "
5033  "%s AS initraggacl "
5034  "FROM pg_proc p "
5035  "LEFT JOIN pg_init_privs pip ON "
5036  "(p.oid = pip.objoid "
5037  "AND pip.classoid = 'pg_proc'::regclass "
5038  "AND pip.objsubid = 0) "
5039  "WHERE p.proisagg AND ("
5040  "p.pronamespace != "
5041  "(SELECT oid FROM pg_namespace "
5042  "WHERE nspname = 'pg_catalog') OR "
5043  "p.proacl IS DISTINCT FROM pip.initprivs",
5045  acl_subquery->data,
5046  racl_subquery->data,
5047  initacl_subquery->data,
5048  initracl_subquery->data);
5049  if (dopt->binary_upgrade)
5050  appendPQExpBufferStr(query,
5051  " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
5052  "classid = 'pg_proc'::regclass AND "
5053  "objid = p.oid AND "
5054  "refclassid = 'pg_extension'::regclass AND "
5055  "deptype = 'e')");
5056  appendPQExpBufferChar(query, ')');
5057 
5058  destroyPQExpBuffer(acl_subquery);
5059  destroyPQExpBuffer(racl_subquery);
5060  destroyPQExpBuffer(initacl_subquery);
5061  destroyPQExpBuffer(initracl_subquery);
5062  }
5063  else if (fout->remoteVersion >= 80200)
5064  {
5065  appendPQExpBuffer(query, "SELECT tableoid, oid, proname AS aggname, "
5066  "pronamespace AS aggnamespace, "
5067  "pronargs, proargtypes, "
5068  "(%s proowner) AS rolname, "
5069  "proacl AS aggacl, "
5070  "NULL AS raggacl, "
5071  "NULL AS initaggacl, NULL AS initraggacl "
5072  "FROM pg_proc p "
5073  "WHERE proisagg AND ("
5074  "pronamespace != "
5075  "(SELECT oid FROM pg_namespace "
5076  "WHERE nspname = 'pg_catalog')",
5078  if (dopt->binary_upgrade && fout->remoteVersion >= 90100)
5079  appendPQExpBufferStr(query,
5080  " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
5081  "classid = 'pg_proc'::regclass AND "
5082  "objid = p.oid AND "
5083  "refclassid = 'pg_extension'::regclass AND "
5084  "deptype = 'e')");
5085  appendPQExpBufferChar(query, ')');
5086  }
5087  else
5088  {
5089  appendPQExpBuffer(query, "SELECT tableoid, oid, proname AS aggname, "
5090  "pronamespace AS aggnamespace, "
5091  "CASE WHEN proargtypes[0] = 'pg_catalog.\"any\"'::pg_catalog.regtype THEN 0 ELSE 1 END AS pronargs, "
5092  "proargtypes, "
5093  "(%s proowner) AS rolname, "
5094  "proacl AS aggacl, "
5095  "NULL AS raggacl, "
5096  "NULL AS initaggacl, NULL AS initraggacl "
5097  "FROM pg_proc "
5098  "WHERE proisagg "
5099  "AND pronamespace != "
5100  "(SELECT oid FROM pg_namespace WHERE nspname = 'pg_catalog')",
5102  }
5103 
5104  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5105 
5106  ntups = PQntuples(res);
5107  *numAggs = ntups;
5108 
5109  agginfo = (AggInfo *) pg_malloc(ntups * sizeof(AggInfo));
5110 
5111  i_tableoid = PQfnumber(res, "tableoid");
5112  i_oid = PQfnumber(res, "oid");
5113  i_aggname = PQfnumber(res, "aggname");
5114  i_aggnamespace = PQfnumber(res, "aggnamespace");
5115  i_pronargs = PQfnumber(res, "pronargs");
5116  i_proargtypes = PQfnumber(res, "proargtypes");
5117  i_rolname = PQfnumber(res, "rolname");
5118  i_aggacl = PQfnumber(res, "aggacl");
5119  i_raggacl = PQfnumber(res, "raggacl");
5120  i_initaggacl = PQfnumber(res, "initaggacl");
5121  i_initraggacl = PQfnumber(res, "initraggacl");
5122 
5123  for (i = 0; i < ntups; i++)
5124  {
5125  agginfo[i].aggfn.dobj.objType = DO_AGG;
5126  agginfo[i].aggfn.dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5127  agginfo[i].aggfn.dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5128  AssignDumpId(&agginfo[i].aggfn.dobj);
5129  agginfo[i].aggfn.dobj.name = pg_strdup(PQgetvalue(res, i, i_aggname));
5130  agginfo[i].aggfn.dobj.namespace =
5131  findNamespace(fout,
5132  atooid(PQgetvalue(res, i, i_aggnamespace)));
5133  agginfo[i].aggfn.rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5134  if (strlen(agginfo[i].aggfn.rolname) == 0)
5135  write_msg(NULL, "WARNING: owner of aggregate function \"%s\" appears to be invalid\n",
5136  agginfo[i].aggfn.dobj.name);
5137  agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */
5138  agginfo[i].aggfn.prorettype = InvalidOid;