PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #ifdef HAVE_TERMIOS_H
37 #include <termios.h>
38 #endif
39 
40 #include "getopt_long.h"
41 
42 #include "access/attnum.h"
43 #include "access/sysattr.h"
44 #include "access/transam.h"
45 #include "catalog/pg_am.h"
46 #include "catalog/pg_attribute.h"
47 #include "catalog/pg_cast.h"
48 #include "catalog/pg_class.h"
49 #include "catalog/pg_default_acl.h"
50 #include "catalog/pg_largeobject.h"
52 #include "catalog/pg_proc.h"
53 #include "catalog/pg_trigger.h"
54 #include "catalog/pg_type.h"
55 #include "libpq/libpq-fs.h"
56 
57 #include "dumputils.h"
58 #include "parallel.h"
59 #include "pg_backup_db.h"
60 #include "pg_backup_utils.h"
61 #include "pg_dump.h"
62 #include "fe_utils/string_utils.h"
63 
64 
65 typedef struct
66 {
67  const char *descr; /* comment for an object */
68  Oid classoid; /* object class (catalog OID) */
69  Oid objoid; /* object OID */
70  int objsubid; /* subobject (table column #) */
71 } CommentItem;
72 
73 typedef struct
74 {
75  const char *provider; /* label provider of this security label */
76  const char *label; /* security label for an object */
77  Oid classoid; /* object class (catalog OID) */
78  Oid objoid; /* object OID */
79  int objsubid; /* subobject (table column #) */
80 } SecLabelItem;
81 
82 typedef enum OidOptions
83 {
85  zeroAsAny = 2,
88 } OidOptions;
89 
90 /* global decls */
91 bool g_verbose; /* User wants verbose narration of our
92  * activities. */
93 static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
94 
95 /* subquery used to convert user ID (eg, datdba) to user name */
96 static const char *username_subquery;
97 
98 /*
99  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
100  * FirstNormalObjectId - 1.
101  */
102 static Oid g_last_builtin_oid; /* value of the last builtin oid */
103 
104 /* The specified names/patterns should to match at least one entity */
105 static int strict_names = 0;
106 
107 /*
108  * Object inclusion/exclusion lists
109  *
110  * The string lists record the patterns given by command-line switches,
111  * which we then convert to lists of OIDs of matching objects.
112  */
117 
124 
125 
126 char g_opaque_type[10]; /* name for the opaque type */
127 
128 /* placeholders for the delimiters for comments */
130 char g_comment_end[10];
131 
132 static const CatalogId nilCatalogId = {0, 0};
133 
134 static void help(const char *progname);
135 static void setup_connection(Archive *AH,
136  const char *dumpencoding, const char *dumpsnapshot,
137  char *use_role);
138 static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
139 static void expand_schema_name_patterns(Archive *fout,
140  SimpleStringList *patterns,
141  SimpleOidList *oids,
142  bool strict_names);
143 static void expand_table_name_patterns(Archive *fout,
144  SimpleStringList *patterns,
145  SimpleOidList *oids,
146  bool strict_names);
147 static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
148 static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
149 static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
150 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
151 static void dumpComment(Archive *fout, const char *target,
152  const char *namespace, const char *owner,
153  CatalogId catalogId, int subid, DumpId dumpId);
154 static int findComments(Archive *fout, Oid classoid, Oid objoid,
155  CommentItem **items);
156 static int collectComments(Archive *fout, CommentItem **items);
157 static void dumpSecLabel(Archive *fout, const char *target,
158  const char *namespace, const char *owner,
159  CatalogId catalogId, int subid, DumpId dumpId);
160 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
161  SecLabelItem **items);
162 static int collectSecLabels(Archive *fout, SecLabelItem **items);
163 static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
164 static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
165 static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
166 static void dumpType(Archive *fout, TypeInfo *tyinfo);
167 static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
168 static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
169 static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
170 static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
171 static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
172 static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
173 static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
174 static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
175 static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
176 static void dumpFunc(Archive *fout, FuncInfo *finfo);
177 static void dumpCast(Archive *fout, CastInfo *cast);
178 static void dumpTransform(Archive *fout, TransformInfo *transform);
179 static void dumpOpr(Archive *fout, OprInfo *oprinfo);
180 static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
181 static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
182 static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
183 static void dumpCollation(Archive *fout, CollInfo *collinfo);
184 static void dumpConversion(Archive *fout, ConvInfo *convinfo);
185 static void dumpRule(Archive *fout, RuleInfo *rinfo);
186 static void dumpAgg(Archive *fout, AggInfo *agginfo);
187 static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
188 static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
189 static void dumpTable(Archive *fout, TableInfo *tbinfo);
190 static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
191 static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
192 static void dumpSequence(Archive *fout, TableInfo *tbinfo);
193 static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
194 static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
195 static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
196 static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
197 static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
198 static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
199 static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
200 static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
201 static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
202 static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
203 static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
204 static void dumpUserMappings(Archive *fout,
205  const char *servername, const char *namespace,
206  const char *owner, CatalogId catalogId, DumpId dumpId);
207 static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
208 
209 static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
210  const char *type, const char *name, const char *subname,
211  const char *tag, const char *nspname, const char *owner,
212  const char *acls, const char *racls,
213  const char *initacls, const char *initracls);
214 
215 static void getDependencies(Archive *fout);
216 static void BuildArchiveDependencies(Archive *fout);
218  DumpId **dependencies, int *nDeps, int *allocDeps);
219 
221 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
222  DumpableObject *boundaryObjs);
223 
224 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
225 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind);
226 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids);
227 static void buildMatViewRefreshDependencies(Archive *fout);
228 static void getTableDataFKConstraints(void);
229 static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
230  bool is_agg);
231 static char *format_function_arguments_old(Archive *fout,
232  FuncInfo *finfo, int nallargs,
233  char **allargtypes,
234  char **argmodes,
235  char **argnames);
236 static char *format_function_signature(Archive *fout,
237  FuncInfo *finfo, bool honor_quotes);
238 static char *convertRegProcReference(Archive *fout,
239  const char *proc);
240 static char *convertOperatorReference(Archive *fout, const char *opr);
241 static char *convertTSFunction(Archive *fout, Oid funcOid);
242 static Oid findLastBuiltinOid_V71(Archive *fout, const char *);
243 static void selectSourceSchema(Archive *fout, const char *schemaName);
244 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
245 static void getBlobs(Archive *fout);
246 static void dumpBlob(Archive *fout, BlobInfo *binfo);
247 static int dumpBlobs(Archive *fout, void *arg);
248 static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
249 static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
250 static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
251 static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
252 static void dumpDatabase(Archive *AH);
253 static void dumpEncoding(Archive *AH);
254 static void dumpStdStrings(Archive *AH);
256  PQExpBuffer upgrade_buffer, Oid pg_type_oid);
258  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
259 static void binary_upgrade_set_pg_class_oids(Archive *fout,
260  PQExpBuffer upgrade_buffer,
261  Oid pg_class_oid, bool is_index);
262 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
263  DumpableObject *dobj,
264  const char *objlabel);
265 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
266 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
267 static bool nonemptyReloptions(const char *reloptions);
268 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
269  const char *prefix, Archive *fout);
270 static char *get_synchronized_snapshot(Archive *fout);
271 static void setupDumpWorker(Archive *AHX);
272 
273 
274 int
275 main(int argc, char **argv)
276 {
277  int c;
278  const char *filename = NULL;
279  const char *format = "p";
280  TableInfo *tblinfo;
281  int numTables;
282  DumpableObject **dobjs;
283  int numObjs;
284  DumpableObject *boundaryObjs;
285  int i;
286  int optindex;
287  RestoreOptions *ropt;
288  Archive *fout; /* the script file */
289  const char *dumpencoding = NULL;
290  const char *dumpsnapshot = NULL;
291  char *use_role = NULL;
292  int numWorkers = 1;
293  trivalue prompt_password = TRI_DEFAULT;
294  int compressLevel = -1;
295  int plainText = 0;
296  ArchiveFormat archiveFormat = archUnknown;
297  ArchiveMode archiveMode;
298 
299  static DumpOptions dopt;
300 
301  static struct option long_options[] = {
302  {"data-only", no_argument, NULL, 'a'},
303  {"blobs", no_argument, NULL, 'b'},
304  {"no-blobs", no_argument, NULL, 'B'},
305  {"clean", no_argument, NULL, 'c'},
306  {"create", no_argument, NULL, 'C'},
307  {"dbname", required_argument, NULL, 'd'},
308  {"file", required_argument, NULL, 'f'},
309  {"format", required_argument, NULL, 'F'},
310  {"host", required_argument, NULL, 'h'},
311  {"jobs", 1, NULL, 'j'},
312  {"no-reconnect", no_argument, NULL, 'R'},
313  {"oids", no_argument, NULL, 'o'},
314  {"no-owner", no_argument, NULL, 'O'},
315  {"port", required_argument, NULL, 'p'},
316  {"schema", required_argument, NULL, 'n'},
317  {"exclude-schema", required_argument, NULL, 'N'},
318  {"schema-only", no_argument, NULL, 's'},
319  {"superuser", required_argument, NULL, 'S'},
320  {"table", required_argument, NULL, 't'},
321  {"exclude-table", required_argument, NULL, 'T'},
322  {"no-password", no_argument, NULL, 'w'},
323  {"password", no_argument, NULL, 'W'},
324  {"username", required_argument, NULL, 'U'},
325  {"verbose", no_argument, NULL, 'v'},
326  {"no-privileges", no_argument, NULL, 'x'},
327  {"no-acl", no_argument, NULL, 'x'},
328  {"compress", required_argument, NULL, 'Z'},
329  {"encoding", required_argument, NULL, 'E'},
330  {"help", no_argument, NULL, '?'},
331  {"version", no_argument, NULL, 'V'},
332 
333  /*
334  * the following options don't have an equivalent short option letter
335  */
336  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
337  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
338  {"column-inserts", no_argument, &dopt.column_inserts, 1},
339  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
340  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
341  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
342  {"exclude-table-data", required_argument, NULL, 4},
343  {"if-exists", no_argument, &dopt.if_exists, 1},
344  {"inserts", no_argument, &dopt.dump_inserts, 1},
345  {"lock-wait-timeout", required_argument, NULL, 2},
346  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
347  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
348  {"role", required_argument, NULL, 3},
349  {"section", required_argument, NULL, 5},
350  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
351  {"snapshot", required_argument, NULL, 6},
352  {"strict-names", no_argument, &strict_names, 1},
353  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
354  {"no-publications", no_argument, &dopt.no_publications, 1},
355  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
356  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
357  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
358  {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
359  {"no-sync", no_argument, NULL, 7},
360 
361  {NULL, 0, NULL, 0}
362  };
363 
364  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
365 
366  /*
367  * Initialize what we need for parallel execution, especially for thread
368  * support on Windows.
369  */
371 
372  g_verbose = false;
373 
374  strcpy(g_comment_start, "-- ");
375  g_comment_end[0] = '\0';
376  strcpy(g_opaque_type, "opaque");
377 
378  progname = get_progname(argv[0]);
379 
380  if (argc > 1)
381  {
382  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
383  {
384  help(progname);
385  exit_nicely(0);
386  }
387  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
388  {
389  puts("pg_dump (PostgreSQL) " PG_VERSION);
390  exit_nicely(0);
391  }
392  }
393 
394  InitDumpOptions(&dopt);
395 
396  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:oOp:RsS:t:T:U:vwWxZ:",
397  long_options, &optindex)) != -1)
398  {
399  switch (c)
400  {
401  case 'a': /* Dump data only */
402  dopt.dataOnly = true;
403  break;
404 
405  case 'b': /* Dump blobs */
406  dopt.outputBlobs = true;
407  break;
408 
409  case 'B': /* Don't dump blobs */
410  dopt.dontOutputBlobs = true;
411  break;
412 
413  case 'c': /* clean (i.e., drop) schema prior to create */
414  dopt.outputClean = 1;
415  break;
416 
417  case 'C': /* Create DB */
418  dopt.outputCreateDB = 1;
419  break;
420 
421  case 'd': /* database name */
422  dopt.dbname = pg_strdup(optarg);
423  break;
424 
425  case 'E': /* Dump encoding */
426  dumpencoding = pg_strdup(optarg);
427  break;
428 
429  case 'f':
430  filename = pg_strdup(optarg);
431  break;
432 
433  case 'F':
434  format = pg_strdup(optarg);
435  break;
436 
437  case 'h': /* server host */
438  dopt.pghost = pg_strdup(optarg);
439  break;
440 
441  case 'j': /* number of dump jobs */
442  numWorkers = atoi(optarg);
443  break;
444 
445  case 'n': /* include schema(s) */
446  simple_string_list_append(&schema_include_patterns, optarg);
447  dopt.include_everything = false;
448  break;
449 
450  case 'N': /* exclude schema(s) */
451  simple_string_list_append(&schema_exclude_patterns, optarg);
452  break;
453 
454  case 'o': /* Dump oids */
455  dopt.oids = true;
456  break;
457 
458  case 'O': /* Don't reconnect to match owner */
459  dopt.outputNoOwner = 1;
460  break;
461 
462  case 'p': /* server port */
463  dopt.pgport = pg_strdup(optarg);
464  break;
465 
466  case 'R':
467  /* no-op, still accepted for backwards compatibility */
468  break;
469 
470  case 's': /* dump schema only */
471  dopt.schemaOnly = true;
472  break;
473 
474  case 'S': /* Username for superuser in plain text output */
476  break;
477 
478  case 't': /* include table(s) */
479  simple_string_list_append(&table_include_patterns, optarg);
480  dopt.include_everything = false;
481  break;
482 
483  case 'T': /* exclude table(s) */
484  simple_string_list_append(&table_exclude_patterns, optarg);
485  break;
486 
487  case 'U':
488  dopt.username = pg_strdup(optarg);
489  break;
490 
491  case 'v': /* verbose */
492  g_verbose = true;
493  break;
494 
495  case 'w':
496  prompt_password = TRI_NO;
497  break;
498 
499  case 'W':
500  prompt_password = TRI_YES;
501  break;
502 
503  case 'x': /* skip ACL dump */
504  dopt.aclsSkip = true;
505  break;
506 
507  case 'Z': /* Compression Level */
508  compressLevel = atoi(optarg);
509  if (compressLevel < 0 || compressLevel > 9)
510  {
511  write_msg(NULL, "compression level must be in range 0..9\n");
512  exit_nicely(1);
513  }
514  break;
515 
516  case 0:
517  /* This covers the long options. */
518  break;
519 
520  case 2: /* lock-wait-timeout */
522  break;
523 
524  case 3: /* SET ROLE */
525  use_role = pg_strdup(optarg);
526  break;
527 
528  case 4: /* exclude table(s) data */
529  simple_string_list_append(&tabledata_exclude_patterns, optarg);
530  break;
531 
532  case 5: /* section */
534  break;
535 
536  case 6: /* snapshot */
537  dumpsnapshot = pg_strdup(optarg);
538  break;
539 
540  case 7: /* no-sync */
541  dosync = false;
542  break;
543 
544  default:
545  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
546  exit_nicely(1);
547  }
548  }
549 
550  /*
551  * Non-option argument specifies database name as long as it wasn't
552  * already specified with -d / --dbname
553  */
554  if (optind < argc && dopt.dbname == NULL)
555  dopt.dbname = argv[optind++];
556 
557  /* Complain if any arguments remain */
558  if (optind < argc)
559  {
560  fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
561  progname, argv[optind]);
562  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
563  progname);
564  exit_nicely(1);
565  }
566 
567  /* --column-inserts implies --inserts */
568  if (dopt.column_inserts)
569  dopt.dump_inserts = 1;
570 
571  /*
572  * Binary upgrade mode implies dumping sequence data even in schema-only
573  * mode. This is not exposed as a separate option, but kept separate
574  * internally for clarity.
575  */
576  if (dopt.binary_upgrade)
577  dopt.sequence_data = 1;
578 
579  if (dopt.dataOnly && dopt.schemaOnly)
580  {
581  write_msg(NULL, "options -s/--schema-only and -a/--data-only cannot be used together\n");
582  exit_nicely(1);
583  }
584 
585  if (dopt.dataOnly && dopt.outputClean)
586  {
587  write_msg(NULL, "options -c/--clean and -a/--data-only cannot be used together\n");
588  exit_nicely(1);
589  }
590 
591  if (dopt.dump_inserts && dopt.oids)
592  {
593  write_msg(NULL, "options --inserts/--column-inserts and -o/--oids cannot be used together\n");
594  write_msg(NULL, "(The INSERT command cannot set OIDs.)\n");
595  exit_nicely(1);
596  }
597 
598  if (dopt.if_exists && !dopt.outputClean)
599  exit_horribly(NULL, "option --if-exists requires option -c/--clean\n");
600 
601  /* Identify archive format to emit */
602  archiveFormat = parseArchiveFormat(format, &archiveMode);
603 
604  /* archiveFormat specific setup */
605  if (archiveFormat == archNull)
606  plainText = 1;
607 
608  /* Custom and directory formats are compressed by default, others not */
609  if (compressLevel == -1)
610  {
611 #ifdef HAVE_LIBZ
612  if (archiveFormat == archCustom || archiveFormat == archDirectory)
613  compressLevel = Z_DEFAULT_COMPRESSION;
614  else
615 #endif
616  compressLevel = 0;
617  }
618 
619 #ifndef HAVE_LIBZ
620  if (compressLevel != 0)
621  write_msg(NULL, "WARNING: requested compression not available in this "
622  "installation -- archive will be uncompressed\n");
623  compressLevel = 0;
624 #endif
625 
626  /*
627  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
628  * parallel jobs because that's the maximum limit for the
629  * WaitForMultipleObjects() call.
630  */
631  if (numWorkers <= 0
632 #ifdef WIN32
633  || numWorkers > MAXIMUM_WAIT_OBJECTS
634 #endif
635  )
636  exit_horribly(NULL, "invalid number of parallel jobs\n");
637 
638  /* Parallel backup only in the directory archive format so far */
639  if (archiveFormat != archDirectory && numWorkers > 1)
640  exit_horribly(NULL, "parallel backup only supported by the directory format\n");
641 
642  /* Open the output file */
643  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
644  archiveMode, setupDumpWorker);
645 
646  /* Make dump options accessible right away */
647  SetArchiveOptions(fout, &dopt, NULL);
648 
649  /* Register the cleanup hook */
650  on_exit_close_archive(fout);
651 
652  /* Let the archiver know how noisy to be */
653  fout->verbose = g_verbose;
654 
655  /*
656  * We allow the server to be back to 8.0, and up to any minor release of
657  * our own major version. (See also version check in pg_dumpall.c.)
658  */
659  fout->minRemoteVersion = 80000;
660  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
661 
662  fout->numWorkers = numWorkers;
663 
664  /*
665  * Open the database using the Archiver, so it knows about it. Errors mean
666  * death.
667  */
668  ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
669  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
670 
671  /*
672  * Disable security label support if server version < v9.1.x (prevents
673  * access to nonexistent pg_seclabel catalog)
674  */
675  if (fout->remoteVersion < 90100)
676  dopt.no_security_labels = 1;
677 
678  /*
679  * On hot standby slaves, never try to dump unlogged table data, since it
680  * will just throw an error.
681  */
682  if (fout->isStandby)
683  dopt.no_unlogged_table_data = true;
684 
685  /* Select the appropriate subquery to convert user IDs to names */
686  if (fout->remoteVersion >= 80100)
687  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
688  else
689  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
690 
691  /* check the version for the synchronized snapshots feature */
692  if (numWorkers > 1 && fout->remoteVersion < 90200
693  && !dopt.no_synchronized_snapshots)
695  "Synchronized snapshots are not supported by this server version.\n"
696  "Run with --no-synchronized-snapshots instead if you do not need\n"
697  "synchronized snapshots.\n");
698 
699  /* check the version when a snapshot is explicitly specified by user */
700  if (dumpsnapshot && fout->remoteVersion < 90200)
702  "Exported snapshots are not supported by this server version.\n");
703 
704  /*
705  * Find the last built-in OID, if needed (prior to 8.1)
706  *
707  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
708  */
709  if (fout->remoteVersion < 80100)
711  PQdb(GetConnection(fout)));
712  else
714 
715  if (g_verbose)
716  write_msg(NULL, "last built-in OID is %u\n", g_last_builtin_oid);
717 
718  /* Expand schema selection patterns into OID lists */
719  if (schema_include_patterns.head != NULL)
720  {
721  expand_schema_name_patterns(fout, &schema_include_patterns,
722  &schema_include_oids,
723  strict_names);
724  if (schema_include_oids.head == NULL)
725  exit_horribly(NULL, "no matching schemas were found\n");
726  }
727  expand_schema_name_patterns(fout, &schema_exclude_patterns,
728  &schema_exclude_oids,
729  false);
730  /* non-matching exclusion patterns aren't an error */
731 
732  /* Expand table selection patterns into OID lists */
733  if (table_include_patterns.head != NULL)
734  {
735  expand_table_name_patterns(fout, &table_include_patterns,
736  &table_include_oids,
737  strict_names);
738  if (table_include_oids.head == NULL)
739  exit_horribly(NULL, "no matching tables were found\n");
740  }
741  expand_table_name_patterns(fout, &table_exclude_patterns,
742  &table_exclude_oids,
743  false);
744 
745  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
746  &tabledata_exclude_oids,
747  false);
748 
749  /* non-matching exclusion patterns aren't an error */
750 
751  /*
752  * Dumping blobs is the default for dumps where an inclusion switch is not
753  * used (an "include everything" dump). -B can be used to exclude blobs
754  * from those dumps. -b can be used to include blobs even when an
755  * inclusion switch is used.
756  *
757  * -s means "schema only" and blobs are data, not schema, so we never
758  * include blobs when -s is used.
759  */
760  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
761  dopt.outputBlobs = true;
762 
763  /*
764  * Now scan the database and create DumpableObject structs for all the
765  * objects we intend to dump.
766  */
767  tblinfo = getSchemaData(fout, &numTables);
768 
769  if (fout->remoteVersion < 80400)
770  guessConstraintInheritance(tblinfo, numTables);
771 
772  if (!dopt.schemaOnly)
773  {
774  getTableData(&dopt, tblinfo, numTables, dopt.oids, 0);
776  if (dopt.dataOnly)
778  }
779 
780  if (dopt.schemaOnly && dopt.sequence_data)
781  getTableData(&dopt, tblinfo, numTables, dopt.oids, RELKIND_SEQUENCE);
782 
783  /*
784  * In binary-upgrade mode, we do not have to worry about the actual blob
785  * data or the associated metadata that resides in the pg_largeobject and
786  * pg_largeobject_metadata tables, respectivly.
787  *
788  * However, we do need to collect blob information as there may be
789  * comments or other information on blobs that we do need to dump out.
790  */
791  if (dopt.outputBlobs || dopt.binary_upgrade)
792  getBlobs(fout);
793 
794  /*
795  * Collect dependency data to assist in ordering the objects.
796  */
797  getDependencies(fout);
798 
799  /* Lastly, create dummy objects to represent the section boundaries */
800  boundaryObjs = createBoundaryObjects();
801 
802  /* Get pointers to all the known DumpableObjects */
803  getDumpableObjects(&dobjs, &numObjs);
804 
805  /*
806  * Add dummy dependencies to enforce the dump section ordering.
807  */
808  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
809 
810  /*
811  * Sort the objects into a safe dump order (no forward references).
812  *
813  * We rely on dependency information to help us determine a safe order, so
814  * the initial sort is mostly for cosmetic purposes: we sort by name to
815  * ensure that logically identical schemas will dump identically.
816  */
817  sortDumpableObjectsByTypeName(dobjs, numObjs);
818 
819  /* If we do a parallel dump, we want the largest tables to go first */
820  if (archiveFormat == archDirectory && numWorkers > 1)
821  sortDataAndIndexObjectsBySize(dobjs, numObjs);
822 
823  sortDumpableObjects(dobjs, numObjs,
824  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
825 
826  /*
827  * Create archive TOC entries for all the objects to be dumped, in a safe
828  * order.
829  */
830 
831  /* First the special ENCODING and STDSTRINGS entries. */
832  dumpEncoding(fout);
833  dumpStdStrings(fout);
834 
835  /* The database item is always next, unless we don't want it at all */
836  if (dopt.include_everything && !dopt.dataOnly)
837  dumpDatabase(fout);
838 
839  /* Now the rearrangeable objects. */
840  for (i = 0; i < numObjs; i++)
841  dumpDumpableObject(fout, dobjs[i]);
842 
843  /*
844  * Set up options info to ensure we dump what we want.
845  */
846  ropt = NewRestoreOptions();
847  ropt->filename = filename;
848 
849  /* if you change this list, see dumpOptionsFromRestoreOptions */
850  ropt->dropSchema = dopt.outputClean;
851  ropt->dataOnly = dopt.dataOnly;
852  ropt->schemaOnly = dopt.schemaOnly;
853  ropt->if_exists = dopt.if_exists;
854  ropt->column_inserts = dopt.column_inserts;
855  ropt->dumpSections = dopt.dumpSections;
856  ropt->aclsSkip = dopt.aclsSkip;
857  ropt->superuser = dopt.outputSuperuser;
858  ropt->createDB = dopt.outputCreateDB;
859  ropt->noOwner = dopt.outputNoOwner;
860  ropt->noTablespace = dopt.outputNoTablespaces;
861  ropt->disable_triggers = dopt.disable_triggers;
862  ropt->use_setsessauth = dopt.use_setsessauth;
864  ropt->dump_inserts = dopt.dump_inserts;
865  ropt->no_publications = dopt.no_publications;
867  ropt->no_subscriptions = dopt.no_subscriptions;
868  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
871  ropt->sequence_data = dopt.sequence_data;
872  ropt->binary_upgrade = dopt.binary_upgrade;
873 
874  if (compressLevel == -1)
875  ropt->compression = 0;
876  else
877  ropt->compression = compressLevel;
878 
879  ropt->suppressDumpWarnings = true; /* We've already shown them */
880 
881  SetArchiveOptions(fout, &dopt, ropt);
882 
883  /* Mark which entries should be output */
885 
886  /*
887  * The archive's TOC entries are now marked as to which ones will actually
888  * be output, so we can set up their dependency lists properly. This isn't
889  * necessary for plain-text output, though.
890  */
891  if (!plainText)
893 
894  /*
895  * And finally we can do the actual output.
896  *
897  * Note: for non-plain-text output formats, the output file is written
898  * inside CloseArchive(). This is, um, bizarre; but not worth changing
899  * right now.
900  */
901  if (plainText)
902  RestoreArchive(fout);
903 
904  CloseArchive(fout);
905 
906  exit_nicely(0);
907 }
908 
909 
910 static void
911 help(const char *progname)
912 {
913  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
914  printf(_("Usage:\n"));
915  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
916 
917  printf(_("\nGeneral options:\n"));
918  printf(_(" -f, --file=FILENAME output file or directory name\n"));
919  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
920  " plain text (default))\n"));
921  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
922  printf(_(" -v, --verbose verbose mode\n"));
923  printf(_(" -V, --version output version information, then exit\n"));
924  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
925  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
926  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
927  printf(_(" -?, --help show this help, then exit\n"));
928 
929  printf(_("\nOptions controlling the output content:\n"));
930  printf(_(" -a, --data-only dump only the data, not the schema\n"));
931  printf(_(" -b, --blobs include large objects in dump\n"));
932  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
933  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
934  printf(_(" -C, --create include commands to create database in dump\n"));
935  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
936  printf(_(" -n, --schema=SCHEMA dump the named schema(s) only\n"));
937  printf(_(" -N, --exclude-schema=SCHEMA do NOT dump the named schema(s)\n"));
938  printf(_(" -o, --oids include OIDs in dump\n"));
939  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
940  " plain-text format\n"));
941  printf(_(" -s, --schema-only dump only the schema, no data\n"));
942  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
943  printf(_(" -t, --table=TABLE dump the named table(s) only\n"));
944  printf(_(" -T, --exclude-table=TABLE do NOT dump the named table(s)\n"));
945  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
946  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
947  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
948  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
949  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
950  printf(_(" --enable-row-security enable row security (dump only content user has\n"
951  " access to)\n"));
952  printf(_(" --exclude-table-data=TABLE do NOT dump data for the named table(s)\n"));
953  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
954  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
955  printf(_(" --no-publications do not dump publications\n"));
956  printf(_(" --no-security-labels do not dump security label assignments\n"));
957  printf(_(" --no-subscriptions do not dump subscriptions\n"));
958  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
959  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
960  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
961  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
962  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
963  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
964  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
965  printf(_(" --strict-names require table and/or schema include patterns to\n"
966  " match at least one entity each\n"));
967  printf(_(" --use-set-session-authorization\n"
968  " use SET SESSION AUTHORIZATION commands instead of\n"
969  " ALTER OWNER commands to set ownership\n"));
970 
971  printf(_("\nConnection options:\n"));
972  printf(_(" -d, --dbname=DBNAME database to dump\n"));
973  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
974  printf(_(" -p, --port=PORT database server port number\n"));
975  printf(_(" -U, --username=NAME connect as specified database user\n"));
976  printf(_(" -w, --no-password never prompt for password\n"));
977  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
978  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
979 
980  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
981  "variable value is used.\n\n"));
982  printf(_("Report bugs to <pgsql-bugs@postgresql.org>.\n"));
983 }
984 
985 static void
986 setup_connection(Archive *AH, const char *dumpencoding,
987  const char *dumpsnapshot, char *use_role)
988 {
989  DumpOptions *dopt = AH->dopt;
990  PGconn *conn = GetConnection(AH);
991  const char *std_strings;
992 
993  /*
994  * Set the client encoding if requested.
995  */
996  if (dumpencoding)
997  {
998  if (PQsetClientEncoding(conn, dumpencoding) < 0)
999  exit_horribly(NULL, "invalid client encoding \"%s\" specified\n",
1000  dumpencoding);
1001  }
1002 
1003  /*
1004  * Get the active encoding and the standard_conforming_strings setting, so
1005  * we know how to escape strings.
1006  */
1007  AH->encoding = PQclientEncoding(conn);
1008 
1009  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1010  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1011 
1012  /*
1013  * Set the role if requested. In a parallel dump worker, we'll be passed
1014  * use_role == NULL, but AH->use_role is already set (if user specified it
1015  * originally) and we should use that.
1016  */
1017  if (!use_role && AH->use_role)
1018  use_role = AH->use_role;
1019 
1020  /* Set the role if requested */
1021  if (use_role && AH->remoteVersion >= 80100)
1022  {
1023  PQExpBuffer query = createPQExpBuffer();
1024 
1025  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1026  ExecuteSqlStatement(AH, query->data);
1027  destroyPQExpBuffer(query);
1028 
1029  /* save it for possible later use by parallel workers */
1030  if (!AH->use_role)
1031  AH->use_role = pg_strdup(use_role);
1032  }
1033 
1034  /* Set the datestyle to ISO to ensure the dump's portability */
1035  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1036 
1037  /* Likewise, avoid using sql_standard intervalstyle */
1038  if (AH->remoteVersion >= 80400)
1039  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1040 
1041  /*
1042  * Set extra_float_digits so that we can dump float data exactly (given
1043  * correctly implemented float I/O code, anyway)
1044  */
1045  if (AH->remoteVersion >= 90000)
1046  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1047  else
1048  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1049 
1050  /*
1051  * If synchronized scanning is supported, disable it, to prevent
1052  * unpredictable changes in row ordering across a dump and reload.
1053  */
1054  if (AH->remoteVersion >= 80300)
1055  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1056 
1057  /*
1058  * Disable timeouts if supported.
1059  */
1060  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1061  if (AH->remoteVersion >= 90300)
1062  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1063  if (AH->remoteVersion >= 90600)
1064  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1065 
1066  /*
1067  * Quote all identifiers, if requested.
1068  */
1069  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1070  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1071 
1072  /*
1073  * Adjust row-security mode, if supported.
1074  */
1075  if (AH->remoteVersion >= 90500)
1076  {
1077  if (dopt->enable_row_security)
1078  ExecuteSqlStatement(AH, "SET row_security = on");
1079  else
1080  ExecuteSqlStatement(AH, "SET row_security = off");
1081  }
1082 
1083  /*
1084  * Start transaction-snapshot mode transaction to dump consistent data.
1085  */
1086  ExecuteSqlStatement(AH, "BEGIN");
1087  if (AH->remoteVersion >= 90100)
1088  {
1089  /*
1090  * To support the combination of serializable_deferrable with the jobs
1091  * option we use REPEATABLE READ for the worker connections that are
1092  * passed a snapshot. As long as the snapshot is acquired in a
1093  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1094  * REPEATABLE READ transaction provides the appropriate integrity
1095  * guarantees. This is a kluge, but safe for back-patching.
1096  */
1097  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1099  "SET TRANSACTION ISOLATION LEVEL "
1100  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1101  else
1103  "SET TRANSACTION ISOLATION LEVEL "
1104  "REPEATABLE READ, READ ONLY");
1105  }
1106  else
1107  {
1109  "SET TRANSACTION ISOLATION LEVEL "
1110  "SERIALIZABLE, READ ONLY");
1111  }
1112 
1113  /*
1114  * If user specified a snapshot to use, select that. In a parallel dump
1115  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1116  * is already set (if the server can handle it) and we should use that.
1117  */
1118  if (dumpsnapshot)
1119  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1120 
1121  if (AH->sync_snapshot_id)
1122  {
1123  PQExpBuffer query = createPQExpBuffer();
1124 
1125  appendPQExpBuffer(query, "SET TRANSACTION SNAPSHOT ");
1126  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1127  ExecuteSqlStatement(AH, query->data);
1128  destroyPQExpBuffer(query);
1129  }
1130  else if (AH->numWorkers > 1 &&
1131  AH->remoteVersion >= 90200 &&
1133  {
1134  if (AH->isStandby)
1136  "Synchronized snapshots are not supported on standby servers.\n"
1137  "Run with --no-synchronized-snapshots instead if you do not need\n"
1138  "synchronized snapshots.\n");
1139 
1140 
1142  }
1143 }
1144 
1145 /* Set up connection for a parallel worker process */
1146 static void
1148 {
1149  /*
1150  * We want to re-select all the same values the master connection is
1151  * using. We'll have inherited directly-usable values in
1152  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1153  * inherited encoding value back to a string to pass to setup_connection.
1154  */
1155  setup_connection(AH,
1157  NULL,
1158  NULL);
1159 }
1160 
1161 static char *
1163 {
1164  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1165  char *result;
1166  PGresult *res;
1167 
1168  res = ExecuteSqlQueryForSingleRow(fout, query);
1169  result = pg_strdup(PQgetvalue(res, 0, 0));
1170  PQclear(res);
1171 
1172  return result;
1173 }
1174 
1175 static ArchiveFormat
1177 {
1178  ArchiveFormat archiveFormat;
1179 
1180  *mode = archModeWrite;
1181 
1182  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1183  {
1184  /* This is used by pg_dumpall, and is not documented */
1185  archiveFormat = archNull;
1186  *mode = archModeAppend;
1187  }
1188  else if (pg_strcasecmp(format, "c") == 0)
1189  archiveFormat = archCustom;
1190  else if (pg_strcasecmp(format, "custom") == 0)
1191  archiveFormat = archCustom;
1192  else if (pg_strcasecmp(format, "d") == 0)
1193  archiveFormat = archDirectory;
1194  else if (pg_strcasecmp(format, "directory") == 0)
1195  archiveFormat = archDirectory;
1196  else if (pg_strcasecmp(format, "p") == 0)
1197  archiveFormat = archNull;
1198  else if (pg_strcasecmp(format, "plain") == 0)
1199  archiveFormat = archNull;
1200  else if (pg_strcasecmp(format, "t") == 0)
1201  archiveFormat = archTar;
1202  else if (pg_strcasecmp(format, "tar") == 0)
1203  archiveFormat = archTar;
1204  else
1205  exit_horribly(NULL, "invalid output format \"%s\" specified\n", format);
1206  return archiveFormat;
1207 }
1208 
1209 /*
1210  * Find the OIDs of all schemas matching the given list of patterns,
1211  * and append them to the given OID list.
1212  */
1213 static void
1215  SimpleStringList *patterns,
1216  SimpleOidList *oids,
1217  bool strict_names)
1218 {
1219  PQExpBuffer query;
1220  PGresult *res;
1221  SimpleStringListCell *cell;
1222  int i;
1223 
1224  if (patterns->head == NULL)
1225  return; /* nothing to do */
1226 
1227  query = createPQExpBuffer();
1228 
1229  /*
1230  * The loop below runs multiple SELECTs might sometimes result in
1231  * duplicate entries in the OID list, but we don't care.
1232  */
1233 
1234  for (cell = patterns->head; cell; cell = cell->next)
1235  {
1236  appendPQExpBuffer(query,
1237  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1238  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1239  false, NULL, "n.nspname", NULL, NULL);
1240 
1241  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1242  if (strict_names && PQntuples(res) == 0)
1243  exit_horribly(NULL, "no matching schemas were found for pattern \"%s\"\n", cell->val);
1244 
1245  for (i = 0; i < PQntuples(res); i++)
1246  {
1247  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1248  }
1249 
1250  PQclear(res);
1251  resetPQExpBuffer(query);
1252  }
1253 
1254  destroyPQExpBuffer(query);
1255 }
1256 
1257 /*
1258  * Find the OIDs of all tables matching the given list of patterns,
1259  * and append them to the given OID list.
1260  */
1261 static void
1263  SimpleStringList *patterns, SimpleOidList *oids,
1264  bool strict_names)
1265 {
1266  PQExpBuffer query;
1267  PGresult *res;
1268  SimpleStringListCell *cell;
1269  int i;
1270 
1271  if (patterns->head == NULL)
1272  return; /* nothing to do */
1273 
1274  query = createPQExpBuffer();
1275 
1276  /*
1277  * this might sometimes result in duplicate entries in the OID list, but
1278  * we don't care.
1279  */
1280 
1281  for (cell = patterns->head; cell; cell = cell->next)
1282  {
1283  appendPQExpBuffer(query,
1284  "SELECT c.oid"
1285  "\nFROM pg_catalog.pg_class c"
1286  "\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace"
1287  "\nWHERE c.relkind in ('%c', '%c', '%c', '%c', '%c', '%c')\n",
1291  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1292  false, "n.nspname", "c.relname", NULL,
1293  "pg_catalog.pg_table_is_visible(c.oid)");
1294 
1295  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1296  if (strict_names && PQntuples(res) == 0)
1297  exit_horribly(NULL, "no matching tables were found for pattern \"%s\"\n", cell->val);
1298 
1299  for (i = 0; i < PQntuples(res); i++)
1300  {
1301  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1302  }
1303 
1304  PQclear(res);
1305  resetPQExpBuffer(query);
1306  }
1307 
1308  destroyPQExpBuffer(query);
1309 }
1310 
1311 /*
1312  * checkExtensionMembership
1313  * Determine whether object is an extension member, and if so,
1314  * record an appropriate dependency and set the object's dump flag.
1315  *
1316  * It's important to call this for each object that could be an extension
1317  * member. Generally, we integrate this with determining the object's
1318  * to-be-dumped-ness, since extension membership overrides other rules for that.
1319  *
1320  * Returns true if object is an extension member, else false.
1321  */
1322 static bool
1324 {
1325  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1326 
1327  if (ext == NULL)
1328  return false;
1329 
1330  dobj->ext_member = true;
1331 
1332  /* Record dependency so that getDependencies needn't deal with that */
1333  addObjectDependency(dobj, ext->dobj.dumpId);
1334 
1335  /*
1336  * In 9.6 and above, mark the member object to have any non-initial ACL,
1337  * policies, and security labels dumped.
1338  *
1339  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1340  * extract the information about the object. We don't provide support for
1341  * initial policies and security labels and it seems unlikely for those to
1342  * ever exist, but we may have to revisit this later.
1343  *
1344  * Prior to 9.6, we do not include any extension member components.
1345  *
1346  * In binary upgrades, we still dump all components of the members
1347  * individually, since the idea is to exactly reproduce the database
1348  * contents rather than replace the extension contents with something
1349  * different.
1350  */
1351  if (fout->dopt->binary_upgrade)
1352  dobj->dump = ext->dobj.dump;
1353  else
1354  {
1355  if (fout->remoteVersion < 90600)
1356  dobj->dump = DUMP_COMPONENT_NONE;
1357  else
1358  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1361  }
1362 
1363  return true;
1364 }
1365 
1366 /*
1367  * selectDumpableNamespace: policy-setting subroutine
1368  * Mark a namespace as to be dumped or not
1369  */
1370 static void
1372 {
1373  /*
1374  * If specific tables are being dumped, do not dump any complete
1375  * namespaces. If specific namespaces are being dumped, dump just those
1376  * namespaces. Otherwise, dump all non-system namespaces.
1377  */
1378  if (table_include_oids.head != NULL)
1379  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1380  else if (schema_include_oids.head != NULL)
1381  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1382  simple_oid_list_member(&schema_include_oids,
1383  nsinfo->dobj.catId.oid) ?
1385  else if (fout->remoteVersion >= 90600 &&
1386  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1387  {
1388  /*
1389  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1390  * they are interesting (and not the original ACLs which were set at
1391  * initdb time, see pg_init_privs).
1392  */
1393  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1394  }
1395  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1396  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1397  {
1398  /* Other system schemas don't get dumped */
1399  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1400  }
1401  else
1402  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1403 
1404  /*
1405  * In any case, a namespace can be excluded by an exclusion switch
1406  */
1407  if (nsinfo->dobj.dump_contains &&
1408  simple_oid_list_member(&schema_exclude_oids,
1409  nsinfo->dobj.catId.oid))
1410  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1411 
1412  /*
1413  * If the schema belongs to an extension, allow extension membership to
1414  * override the dump decision for the schema itself. However, this does
1415  * not change dump_contains, so this won't change what we do with objects
1416  * within the schema. (If they belong to the extension, they'll get
1417  * suppressed by it, otherwise not.)
1418  */
1419  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1420 }
1421 
1422 /*
1423  * selectDumpableTable: policy-setting subroutine
1424  * Mark a table as to be dumped or not
1425  */
1426 static void
1428 {
1429  if (checkExtensionMembership(&tbinfo->dobj, fout))
1430  return; /* extension membership overrides all else */
1431 
1432  /*
1433  * If specific tables are being dumped, dump just those tables; else, dump
1434  * according to the parent namespace's dump flag.
1435  */
1436  if (table_include_oids.head != NULL)
1437  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1438  tbinfo->dobj.catId.oid) ?
1440  else
1441  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1442 
1443  /*
1444  * In any case, a table can be excluded by an exclusion switch
1445  */
1446  if (tbinfo->dobj.dump &&
1447  simple_oid_list_member(&table_exclude_oids,
1448  tbinfo->dobj.catId.oid))
1449  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1450 }
1451 
1452 /*
1453  * selectDumpableType: policy-setting subroutine
1454  * Mark a type as to be dumped or not
1455  *
1456  * If it's a table's rowtype or an autogenerated array type, we also apply a
1457  * special type code to facilitate sorting into the desired order. (We don't
1458  * want to consider those to be ordinary types because that would bring tables
1459  * up into the datatype part of the dump order.) We still set the object's
1460  * dump flag; that's not going to cause the dummy type to be dumped, but we
1461  * need it so that casts involving such types will be dumped correctly -- see
1462  * dumpCast. This means the flag should be set the same as for the underlying
1463  * object (the table or base type).
1464  */
1465 static void
1467 {
1468  /* skip complex types, except for standalone composite types */
1469  if (OidIsValid(tyinfo->typrelid) &&
1471  {
1472  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1473 
1474  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1475  if (tytable != NULL)
1476  tyinfo->dobj.dump = tytable->dobj.dump;
1477  else
1478  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1479  return;
1480  }
1481 
1482  /* skip auto-generated array types */
1483  if (tyinfo->isArray)
1484  {
1485  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1486 
1487  /*
1488  * Fall through to set the dump flag; we assume that the subsequent
1489  * rules will do the same thing as they would for the array's base
1490  * type. (We cannot reliably look up the base type here, since
1491  * getTypes may not have processed it yet.)
1492  */
1493  }
1494 
1495  if (checkExtensionMembership(&tyinfo->dobj, fout))
1496  return; /* extension membership overrides all else */
1497 
1498  /* Dump based on if the contents of the namespace are being dumped */
1499  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1500 }
1501 
1502 /*
1503  * selectDumpableDefaultACL: policy-setting subroutine
1504  * Mark a default ACL as to be dumped or not
1505  *
1506  * For per-schema default ACLs, dump if the schema is to be dumped.
1507  * Otherwise dump if we are dumping "everything". Note that dataOnly
1508  * and aclsSkip are checked separately.
1509  */
1510 static void
1512 {
1513  /* Default ACLs can't be extension members */
1514 
1515  if (dinfo->dobj.namespace)
1516  /* default ACLs are considered part of the namespace */
1517  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1518  else
1519  dinfo->dobj.dump = dopt->include_everything ?
1521 }
1522 
1523 /*
1524  * selectDumpableCast: policy-setting subroutine
1525  * Mark a cast as to be dumped or not
1526  *
1527  * Casts do not belong to any particular namespace (since they haven't got
1528  * names), nor do they have identifiable owners. To distinguish user-defined
1529  * casts from built-in ones, we must resort to checking whether the cast's
1530  * OID is in the range reserved for initdb.
1531  */
1532 static void
1534 {
1535  if (checkExtensionMembership(&cast->dobj, fout))
1536  return; /* extension membership overrides all else */
1537 
1538  /*
1539  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1540  * support ACLs currently.
1541  */
1542  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1543  cast->dobj.dump = DUMP_COMPONENT_NONE;
1544  else
1545  cast->dobj.dump = fout->dopt->include_everything ?
1547 }
1548 
1549 /*
1550  * selectDumpableProcLang: policy-setting subroutine
1551  * Mark a procedural language as to be dumped or not
1552  *
1553  * Procedural languages do not belong to any particular namespace. To
1554  * identify built-in languages, we must resort to checking whether the
1555  * language's OID is in the range reserved for initdb.
1556  */
1557 static void
1559 {
1560  if (checkExtensionMembership(&plang->dobj, fout))
1561  return; /* extension membership overrides all else */
1562 
1563  /*
1564  * Only include procedural languages when we are dumping everything.
1565  *
1566  * For from-initdb procedural languages, only include ACLs, as we do for
1567  * the pg_catalog namespace. We need this because procedural languages do
1568  * not live in any namespace.
1569  */
1570  if (!fout->dopt->include_everything)
1571  plang->dobj.dump = DUMP_COMPONENT_NONE;
1572  else
1573  {
1574  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1575  plang->dobj.dump = fout->remoteVersion < 90600 ?
1577  else
1578  plang->dobj.dump = DUMP_COMPONENT_ALL;
1579  }
1580 }
1581 
1582 /*
1583  * selectDumpableAccessMethod: policy-setting subroutine
1584  * Mark an access method as to be dumped or not
1585  *
1586  * Access methods do not belong to any particular namespace. To identify
1587  * built-in access methods, we must resort to checking whether the
1588  * method's OID is in the range reserved for initdb.
1589  */
1590 static void
1592 {
1593  if (checkExtensionMembership(&method->dobj, fout))
1594  return; /* extension membership overrides all else */
1595 
1596  /*
1597  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1598  * they do not support ACLs currently.
1599  */
1600  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1601  method->dobj.dump = DUMP_COMPONENT_NONE;
1602  else
1603  method->dobj.dump = fout->dopt->include_everything ?
1605 }
1606 
1607 /*
1608  * selectDumpableExtension: policy-setting subroutine
1609  * Mark an extension as to be dumped or not
1610  *
1611  * Normally, we dump all extensions, or none of them if include_everything
1612  * is false (i.e., a --schema or --table switch was given). However, in
1613  * binary-upgrade mode it's necessary to skip built-in extensions, since we
1614  * assume those will already be installed in the target database. We identify
1615  * such extensions by their having OIDs in the range reserved for initdb.
1616  */
1617 static void
1619 {
1620  /*
1621  * Use DUMP_COMPONENT_ACL for from-initdb extensions, to allow users to
1622  * change permissions on those objects, if they wish to, and have those
1623  * changes preserved.
1624  */
1625  if (dopt->binary_upgrade && extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1626  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1627  else
1628  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1631 }
1632 
1633 /*
1634  * selectDumpablePublicationTable: policy-setting subroutine
1635  * Mark a publication table as to be dumped or not
1636  *
1637  * Publication tables have schemas, but those are ignored in decision making,
1638  * because publications are only dumped when we are dumping everything.
1639  */
1640 static void
1642 {
1643  if (checkExtensionMembership(dobj, fout))
1644  return; /* extension membership overrides all else */
1645 
1646  dobj->dump = fout->dopt->include_everything ?
1648 }
1649 
1650 /*
1651  * selectDumpableObject: policy-setting subroutine
1652  * Mark a generic dumpable object as to be dumped or not
1653  *
1654  * Use this only for object types without a special-case routine above.
1655  */
1656 static void
1658 {
1659  if (checkExtensionMembership(dobj, fout))
1660  return; /* extension membership overrides all else */
1661 
1662  /*
1663  * Default policy is to dump if parent namespace is dumpable, or for
1664  * non-namespace-associated items, dump if we're dumping "everything".
1665  */
1666  if (dobj->namespace)
1667  dobj->dump = dobj->namespace->dobj.dump_contains;
1668  else
1669  dobj->dump = fout->dopt->include_everything ?
1671 }
1672 
1673 /*
1674  * Dump a table's contents for loading using the COPY command
1675  * - this routine is called by the Archiver when it wants the table
1676  * to be dumped.
1677  */
1678 
1679 static int
1680 dumpTableData_copy(Archive *fout, void *dcontext)
1681 {
1682  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1683  TableInfo *tbinfo = tdinfo->tdtable;
1684  const char *classname = tbinfo->dobj.name;
1685  const bool hasoids = tbinfo->hasoids;
1686  const bool oids = tdinfo->oids;
1688 
1689  /*
1690  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1691  * which uses it already.
1692  */
1693  PQExpBuffer clistBuf = createPQExpBuffer();
1694  PGconn *conn = GetConnection(fout);
1695  PGresult *res;
1696  int ret;
1697  char *copybuf;
1698  const char *column_list;
1699 
1700  if (g_verbose)
1701  write_msg(NULL, "dumping contents of table \"%s.%s\"\n",
1702  tbinfo->dobj.namespace->dobj.name, classname);
1703 
1704  /*
1705  * Make sure we are in proper schema. We will qualify the table name
1706  * below anyway (in case its name conflicts with a pg_catalog table); but
1707  * this ensures reproducible results in case the table contains regproc,
1708  * regclass, etc columns.
1709  */
1710  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
1711 
1712  /*
1713  * Specify the column list explicitly so that we have no possibility of
1714  * retrieving data in the wrong column order. (The default column
1715  * ordering of COPY will not be what we want in certain corner cases
1716  * involving ADD COLUMN and inheritance.)
1717  */
1718  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1719 
1720  if (oids && hasoids)
1721  {
1722  appendPQExpBuffer(q, "COPY %s %s WITH OIDS TO stdout;",
1724  tbinfo->dobj.namespace->dobj.name,
1725  classname),
1726  column_list);
1727  }
1728  else if (tdinfo->filtercond)
1729  {
1730  /* Note: this syntax is only supported in 8.2 and up */
1731  appendPQExpBufferStr(q, "COPY (SELECT ");
1732  /* klugery to get rid of parens in column list */
1733  if (strlen(column_list) > 2)
1734  {
1735  appendPQExpBufferStr(q, column_list + 1);
1736  q->data[q->len - 1] = ' ';
1737  }
1738  else
1739  appendPQExpBufferStr(q, "* ");
1740  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1742  tbinfo->dobj.namespace->dobj.name,
1743  classname),
1744  tdinfo->filtercond);
1745  }
1746  else
1747  {
1748  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1750  tbinfo->dobj.namespace->dobj.name,
1751  classname),
1752  column_list);
1753  }
1754  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1755  PQclear(res);
1756  destroyPQExpBuffer(clistBuf);
1757 
1758  for (;;)
1759  {
1760  ret = PQgetCopyData(conn, &copybuf, 0);
1761 
1762  if (ret < 0)
1763  break; /* done or error */
1764 
1765  if (copybuf)
1766  {
1767  WriteData(fout, copybuf, ret);
1768  PQfreemem(copybuf);
1769  }
1770 
1771  /* ----------
1772  * THROTTLE:
1773  *
1774  * There was considerable discussion in late July, 2000 regarding
1775  * slowing down pg_dump when backing up large tables. Users with both
1776  * slow & fast (multi-processor) machines experienced performance
1777  * degradation when doing a backup.
1778  *
1779  * Initial attempts based on sleeping for a number of ms for each ms
1780  * of work were deemed too complex, then a simple 'sleep in each loop'
1781  * implementation was suggested. The latter failed because the loop
1782  * was too tight. Finally, the following was implemented:
1783  *
1784  * If throttle is non-zero, then
1785  * See how long since the last sleep.
1786  * Work out how long to sleep (based on ratio).
1787  * If sleep is more than 100ms, then
1788  * sleep
1789  * reset timer
1790  * EndIf
1791  * EndIf
1792  *
1793  * where the throttle value was the number of ms to sleep per ms of
1794  * work. The calculation was done in each loop.
1795  *
1796  * Most of the hard work is done in the backend, and this solution
1797  * still did not work particularly well: on slow machines, the ratio
1798  * was 50:1, and on medium paced machines, 1:1, and on fast
1799  * multi-processor machines, it had little or no effect, for reasons
1800  * that were unclear.
1801  *
1802  * Further discussion ensued, and the proposal was dropped.
1803  *
1804  * For those people who want this feature, it can be implemented using
1805  * gettimeofday in each loop, calculating the time since last sleep,
1806  * multiplying that by the sleep ratio, then if the result is more
1807  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1808  * function to sleep for a subsecond period ie.
1809  *
1810  * select(0, NULL, NULL, NULL, &tvi);
1811  *
1812  * This will return after the interval specified in the structure tvi.
1813  * Finally, call gettimeofday again to save the 'last sleep time'.
1814  * ----------
1815  */
1816  }
1817  archprintf(fout, "\\.\n\n\n");
1818 
1819  if (ret == -2)
1820  {
1821  /* copy data transfer failed */
1822  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.\n", classname);
1823  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1824  write_msg(NULL, "The command was: %s\n", q->data);
1825  exit_nicely(1);
1826  }
1827 
1828  /* Check command status and return to normal libpq state */
1829  res = PQgetResult(conn);
1830  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1831  {
1832  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetResult() failed.\n", classname);
1833  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1834  write_msg(NULL, "The command was: %s\n", q->data);
1835  exit_nicely(1);
1836  }
1837  PQclear(res);
1838 
1839  /* Do this to ensure we've pumped libpq back to idle state */
1840  if (PQgetResult(conn) != NULL)
1841  write_msg(NULL, "WARNING: unexpected extra results during COPY of table \"%s\"\n",
1842  classname);
1843 
1844  destroyPQExpBuffer(q);
1845  return 1;
1846 }
1847 
1848 /*
1849  * Dump table data using INSERT commands.
1850  *
1851  * Caution: when we restore from an archive file direct to database, the
1852  * INSERT commands emitted by this function have to be parsed by
1853  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
1854  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
1855  */
1856 static int
1857 dumpTableData_insert(Archive *fout, void *dcontext)
1858 {
1859  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1860  TableInfo *tbinfo = tdinfo->tdtable;
1861  const char *classname = tbinfo->dobj.name;
1862  DumpOptions *dopt = fout->dopt;
1864  PQExpBuffer insertStmt = NULL;
1865  PGresult *res;
1866  int tuple;
1867  int nfields;
1868  int field;
1869 
1870  /*
1871  * Make sure we are in proper schema. We will qualify the table name
1872  * below anyway (in case its name conflicts with a pg_catalog table); but
1873  * this ensures reproducible results in case the table contains regproc,
1874  * regclass, etc columns.
1875  */
1876  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
1877 
1878  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
1879  "SELECT * FROM ONLY %s",
1881  tbinfo->dobj.namespace->dobj.name,
1882  classname));
1883  if (tdinfo->filtercond)
1884  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
1885 
1886  ExecuteSqlStatement(fout, q->data);
1887 
1888  while (1)
1889  {
1890  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
1891  PGRES_TUPLES_OK);
1892  nfields = PQnfields(res);
1893  for (tuple = 0; tuple < PQntuples(res); tuple++)
1894  {
1895  /*
1896  * First time through, we build as much of the INSERT statement as
1897  * possible in "insertStmt", which we can then just print for each
1898  * line. If the table happens to have zero columns then this will
1899  * be a complete statement, otherwise it will end in "VALUES(" and
1900  * be ready to have the row's column values appended.
1901  */
1902  if (insertStmt == NULL)
1903  {
1904  insertStmt = createPQExpBuffer();
1905  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
1906  fmtId(classname));
1907 
1908  /* corner case for zero-column table */
1909  if (nfields == 0)
1910  {
1911  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
1912  }
1913  else
1914  {
1915  /* append the list of column names if required */
1916  if (dopt->column_inserts)
1917  {
1918  appendPQExpBufferChar(insertStmt, '(');
1919  for (field = 0; field < nfields; field++)
1920  {
1921  if (field > 0)
1922  appendPQExpBufferStr(insertStmt, ", ");
1923  appendPQExpBufferStr(insertStmt,
1924  fmtId(PQfname(res, field)));
1925  }
1926  appendPQExpBufferStr(insertStmt, ") ");
1927  }
1928 
1929  if (tbinfo->needs_override)
1930  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
1931 
1932  appendPQExpBufferStr(insertStmt, "VALUES (");
1933  }
1934  }
1935 
1936  archputs(insertStmt->data, fout);
1937 
1938  /* if it is zero-column table then we're done */
1939  if (nfields == 0)
1940  continue;
1941 
1942  for (field = 0; field < nfields; field++)
1943  {
1944  if (field > 0)
1945  archputs(", ", fout);
1946  if (PQgetisnull(res, tuple, field))
1947  {
1948  archputs("NULL", fout);
1949  continue;
1950  }
1951 
1952  /* XXX This code is partially duplicated in ruleutils.c */
1953  switch (PQftype(res, field))
1954  {
1955  case INT2OID:
1956  case INT4OID:
1957  case INT8OID:
1958  case OIDOID:
1959  case FLOAT4OID:
1960  case FLOAT8OID:
1961  case NUMERICOID:
1962  {
1963  /*
1964  * These types are printed without quotes unless
1965  * they contain values that aren't accepted by the
1966  * scanner unquoted (e.g., 'NaN'). Note that
1967  * strtod() and friends might accept NaN, so we
1968  * can't use that to test.
1969  *
1970  * In reality we only need to defend against
1971  * infinity and NaN, so we need not get too crazy
1972  * about pattern matching here.
1973  */
1974  const char *s = PQgetvalue(res, tuple, field);
1975 
1976  if (strspn(s, "0123456789 +-eE.") == strlen(s))
1977  archputs(s, fout);
1978  else
1979  archprintf(fout, "'%s'", s);
1980  }
1981  break;
1982 
1983  case BITOID:
1984  case VARBITOID:
1985  archprintf(fout, "B'%s'",
1986  PQgetvalue(res, tuple, field));
1987  break;
1988 
1989  case BOOLOID:
1990  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
1991  archputs("true", fout);
1992  else
1993  archputs("false", fout);
1994  break;
1995 
1996  default:
1997  /* All other types are printed as string literals. */
1998  resetPQExpBuffer(q);
2000  PQgetvalue(res, tuple, field),
2001  fout);
2002  archputs(q->data, fout);
2003  break;
2004  }
2005  }
2006  archputs(");\n", fout);
2007  }
2008 
2009  if (PQntuples(res) <= 0)
2010  {
2011  PQclear(res);
2012  break;
2013  }
2014  PQclear(res);
2015  }
2016 
2017  archputs("\n\n", fout);
2018 
2019  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2020 
2021  destroyPQExpBuffer(q);
2022  if (insertStmt != NULL)
2023  destroyPQExpBuffer(insertStmt);
2024 
2025  return 1;
2026 }
2027 
2028 
2029 /*
2030  * dumpTableData -
2031  * dump the contents of a single table
2032  *
2033  * Actually, this just makes an ArchiveEntry for the table contents.
2034  */
2035 static void
2037 {
2038  DumpOptions *dopt = fout->dopt;
2039  TableInfo *tbinfo = tdinfo->tdtable;
2040  PQExpBuffer copyBuf = createPQExpBuffer();
2041  PQExpBuffer clistBuf = createPQExpBuffer();
2042  DataDumperPtr dumpFn;
2043  char *copyStmt;
2044 
2045  if (!dopt->dump_inserts)
2046  {
2047  /* Dump/restore using COPY */
2048  dumpFn = dumpTableData_copy;
2049  /* must use 2 steps here 'cause fmtId is nonreentrant */
2050  appendPQExpBuffer(copyBuf, "COPY %s ",
2051  fmtId(tbinfo->dobj.name));
2052  appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n",
2053  fmtCopyColumnList(tbinfo, clistBuf),
2054  (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
2055  copyStmt = copyBuf->data;
2056  }
2057  else
2058  {
2059  /* Restore using INSERT */
2060  dumpFn = dumpTableData_insert;
2061  copyStmt = NULL;
2062  }
2063 
2064  /*
2065  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2066  * dependency on its table as "special" and pass it to ArchiveEntry now.
2067  * See comments for BuildArchiveDependencies.
2068  */
2069  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2070  ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2071  tbinfo->dobj.name, tbinfo->dobj.namespace->dobj.name,
2072  NULL, tbinfo->rolname,
2073  false, "TABLE DATA", SECTION_DATA,
2074  "", "", copyStmt,
2075  &(tbinfo->dobj.dumpId), 1,
2076  dumpFn, tdinfo);
2077 
2078  destroyPQExpBuffer(copyBuf);
2079  destroyPQExpBuffer(clistBuf);
2080 }
2081 
2082 /*
2083  * refreshMatViewData -
2084  * load or refresh the contents of a single materialized view
2085  *
2086  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2087  * statement.
2088  */
2089 static void
2091 {
2092  TableInfo *tbinfo = tdinfo->tdtable;
2093  PQExpBuffer q;
2094 
2095  /* If the materialized view is not flagged as populated, skip this. */
2096  if (!tbinfo->relispopulated)
2097  return;
2098 
2099  q = createPQExpBuffer();
2100 
2101  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2102  fmtId(tbinfo->dobj.name));
2103 
2104  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2105  ArchiveEntry(fout,
2106  tdinfo->dobj.catId, /* catalog ID */
2107  tdinfo->dobj.dumpId, /* dump ID */
2108  tbinfo->dobj.name, /* Name */
2109  tbinfo->dobj.namespace->dobj.name, /* Namespace */
2110  NULL, /* Tablespace */
2111  tbinfo->rolname, /* Owner */
2112  false, /* with oids */
2113  "MATERIALIZED VIEW DATA", /* Desc */
2114  SECTION_POST_DATA, /* Section */
2115  q->data, /* Create */
2116  "", /* Del */
2117  NULL, /* Copy */
2118  tdinfo->dobj.dependencies, /* Deps */
2119  tdinfo->dobj.nDeps, /* # Deps */
2120  NULL, /* Dumper */
2121  NULL); /* Dumper Arg */
2122 
2123  destroyPQExpBuffer(q);
2124 }
2125 
2126 /*
2127  * getTableData -
2128  * set up dumpable objects representing the contents of tables
2129  */
2130 static void
2131 getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind)
2132 {
2133  int i;
2134 
2135  for (i = 0; i < numTables; i++)
2136  {
2137  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2138  (!relkind || tblinfo[i].relkind == relkind))
2139  makeTableDataInfo(dopt, &(tblinfo[i]), oids);
2140  }
2141 }
2142 
2143 /*
2144  * Make a dumpable object for the data of this specific table
2145  *
2146  * Note: we make a TableDataInfo if and only if we are going to dump the
2147  * table data; the "dump" flag in such objects isn't used.
2148  */
2149 static void
2150 makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids)
2151 {
2152  TableDataInfo *tdinfo;
2153 
2154  /*
2155  * Nothing to do if we already decided to dump the table. This will
2156  * happen for "config" tables.
2157  */
2158  if (tbinfo->dataObj != NULL)
2159  return;
2160 
2161  /* Skip VIEWs (no data to dump) */
2162  if (tbinfo->relkind == RELKIND_VIEW)
2163  return;
2164  /* Skip FOREIGN TABLEs (no data to dump) */
2165  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2166  return;
2167  /* Skip partitioned tables (data in partitions) */
2168  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2169  return;
2170 
2171  /* Don't dump data in unlogged tables, if so requested */
2172  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2173  dopt->no_unlogged_table_data)
2174  return;
2175 
2176  /* Check that the data is not explicitly excluded */
2177  if (simple_oid_list_member(&tabledata_exclude_oids,
2178  tbinfo->dobj.catId.oid))
2179  return;
2180 
2181  /* OK, let's dump it */
2182  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2183 
2184  if (tbinfo->relkind == RELKIND_MATVIEW)
2185  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2186  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2187  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2188  else
2189  tdinfo->dobj.objType = DO_TABLE_DATA;
2190 
2191  /*
2192  * Note: use tableoid 0 so that this object won't be mistaken for
2193  * something that pg_depend entries apply to.
2194  */
2195  tdinfo->dobj.catId.tableoid = 0;
2196  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2197  AssignDumpId(&tdinfo->dobj);
2198  tdinfo->dobj.name = tbinfo->dobj.name;
2199  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2200  tdinfo->tdtable = tbinfo;
2201  tdinfo->oids = oids;
2202  tdinfo->filtercond = NULL; /* might get set later */
2203  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2204 
2205  tbinfo->dataObj = tdinfo;
2206 }
2207 
2208 /*
2209  * The refresh for a materialized view must be dependent on the refresh for
2210  * any materialized view that this one is dependent on.
2211  *
2212  * This must be called after all the objects are created, but before they are
2213  * sorted.
2214  */
2215 static void
2217 {
2218  PQExpBuffer query;
2219  PGresult *res;
2220  int ntups,
2221  i;
2222  int i_classid,
2223  i_objid,
2224  i_refobjid;
2225 
2226  /* No Mat Views before 9.3. */
2227  if (fout->remoteVersion < 90300)
2228  return;
2229 
2230  /* Make sure we are in proper schema */
2231  selectSourceSchema(fout, "pg_catalog");
2232 
2233  query = createPQExpBuffer();
2234 
2235  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2236  "( "
2237  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2238  "FROM pg_depend d1 "
2239  "JOIN pg_class c1 ON c1.oid = d1.objid "
2240  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2241  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2242  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2243  "AND d2.objid = r1.oid "
2244  "AND d2.refobjid <> d1.objid "
2245  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2246  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2248  "WHERE d1.classid = 'pg_class'::regclass "
2249  "UNION "
2250  "SELECT w.objid, d3.refobjid, c3.relkind "
2251  "FROM w "
2252  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2253  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2254  "AND d3.objid = r3.oid "
2255  "AND d3.refobjid <> w.refobjid "
2256  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2257  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2259  ") "
2260  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2261  "FROM w "
2262  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2263 
2264  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2265 
2266  ntups = PQntuples(res);
2267 
2268  i_classid = PQfnumber(res, "classid");
2269  i_objid = PQfnumber(res, "objid");
2270  i_refobjid = PQfnumber(res, "refobjid");
2271 
2272  for (i = 0; i < ntups; i++)
2273  {
2274  CatalogId objId;
2275  CatalogId refobjId;
2276  DumpableObject *dobj;
2277  DumpableObject *refdobj;
2278  TableInfo *tbinfo;
2279  TableInfo *reftbinfo;
2280 
2281  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2282  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2283  refobjId.tableoid = objId.tableoid;
2284  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2285 
2286  dobj = findObjectByCatalogId(objId);
2287  if (dobj == NULL)
2288  continue;
2289 
2290  Assert(dobj->objType == DO_TABLE);
2291  tbinfo = (TableInfo *) dobj;
2292  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2293  dobj = (DumpableObject *) tbinfo->dataObj;
2294  if (dobj == NULL)
2295  continue;
2296  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2297 
2298  refdobj = findObjectByCatalogId(refobjId);
2299  if (refdobj == NULL)
2300  continue;
2301 
2302  Assert(refdobj->objType == DO_TABLE);
2303  reftbinfo = (TableInfo *) refdobj;
2304  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2305  refdobj = (DumpableObject *) reftbinfo->dataObj;
2306  if (refdobj == NULL)
2307  continue;
2308  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2309 
2310  addObjectDependency(dobj, refdobj->dumpId);
2311 
2312  if (!reftbinfo->relispopulated)
2313  tbinfo->relispopulated = false;
2314  }
2315 
2316  PQclear(res);
2317 
2318  destroyPQExpBuffer(query);
2319 }
2320 
2321 /*
2322  * getTableDataFKConstraints -
2323  * add dump-order dependencies reflecting foreign key constraints
2324  *
2325  * This code is executed only in a data-only dump --- in schema+data dumps
2326  * we handle foreign key issues by not creating the FK constraints until
2327  * after the data is loaded. In a data-only dump, however, we want to
2328  * order the table data objects in such a way that a table's referenced
2329  * tables are restored first. (In the presence of circular references or
2330  * self-references this may be impossible; we'll detect and complain about
2331  * that during the dependency sorting step.)
2332  */
2333 static void
2335 {
2336  DumpableObject **dobjs;
2337  int numObjs;
2338  int i;
2339 
2340  /* Search through all the dumpable objects for FK constraints */
2341  getDumpableObjects(&dobjs, &numObjs);
2342  for (i = 0; i < numObjs; i++)
2343  {
2344  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2345  {
2346  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2347  TableInfo *ftable;
2348 
2349  /* Not interesting unless both tables are to be dumped */
2350  if (cinfo->contable == NULL ||
2351  cinfo->contable->dataObj == NULL)
2352  continue;
2353  ftable = findTableByOid(cinfo->confrelid);
2354  if (ftable == NULL ||
2355  ftable->dataObj == NULL)
2356  continue;
2357 
2358  /*
2359  * Okay, make referencing table's TABLE_DATA object depend on the
2360  * referenced table's TABLE_DATA object.
2361  */
2363  ftable->dataObj->dobj.dumpId);
2364  }
2365  }
2366  free(dobjs);
2367 }
2368 
2369 
2370 /*
2371  * guessConstraintInheritance:
2372  * In pre-8.4 databases, we can't tell for certain which constraints
2373  * are inherited. We assume a CHECK constraint is inherited if its name
2374  * matches the name of any constraint in the parent. Originally this code
2375  * tried to compare the expression texts, but that can fail for various
2376  * reasons --- for example, if the parent and child tables are in different
2377  * schemas, reverse-listing of function calls may produce different text
2378  * (schema-qualified or not) depending on search path.
2379  *
2380  * In 8.4 and up we can rely on the conislocal field to decide which
2381  * constraints must be dumped; much safer.
2382  *
2383  * This function assumes all conislocal flags were initialized to TRUE.
2384  * It clears the flag on anything that seems to be inherited.
2385  */
2386 static void
2388 {
2389  int i,
2390  j,
2391  k;
2392 
2393  for (i = 0; i < numTables; i++)
2394  {
2395  TableInfo *tbinfo = &(tblinfo[i]);
2396  int numParents;
2397  TableInfo **parents;
2398  TableInfo *parent;
2399 
2400  /* Sequences and views never have parents */
2401  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2402  tbinfo->relkind == RELKIND_VIEW)
2403  continue;
2404 
2405  /* Don't bother computing anything for non-target tables, either */
2406  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2407  continue;
2408 
2409  numParents = tbinfo->numParents;
2410  parents = tbinfo->parents;
2411 
2412  if (numParents == 0)
2413  continue; /* nothing to see here, move along */
2414 
2415  /* scan for inherited CHECK constraints */
2416  for (j = 0; j < tbinfo->ncheck; j++)
2417  {
2418  ConstraintInfo *constr;
2419 
2420  constr = &(tbinfo->checkexprs[j]);
2421 
2422  for (k = 0; k < numParents; k++)
2423  {
2424  int l;
2425 
2426  parent = parents[k];
2427  for (l = 0; l < parent->ncheck; l++)
2428  {
2429  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2430 
2431  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2432  {
2433  constr->conislocal = false;
2434  break;
2435  }
2436  }
2437  if (!constr->conislocal)
2438  break;
2439  }
2440  }
2441  }
2442 }
2443 
2444 
2445 /*
2446  * dumpDatabase:
2447  * dump the database definition
2448  */
2449 static void
2451 {
2452  DumpOptions *dopt = fout->dopt;
2453  PQExpBuffer dbQry = createPQExpBuffer();
2454  PQExpBuffer delQry = createPQExpBuffer();
2455  PQExpBuffer creaQry = createPQExpBuffer();
2456  PGconn *conn = GetConnection(fout);
2457  PGresult *res;
2458  int i_tableoid,
2459  i_oid,
2460  i_dba,
2461  i_encoding,
2462  i_collate,
2463  i_ctype,
2464  i_frozenxid,
2465  i_minmxid,
2466  i_tablespace;
2467  CatalogId dbCatId;
2468  DumpId dbDumpId;
2469  const char *datname,
2470  *dba,
2471  *encoding,
2472  *collate,
2473  *ctype,
2474  *tablespace;
2475  uint32 frozenxid,
2476  minmxid;
2477 
2478  datname = PQdb(conn);
2479 
2480  if (g_verbose)
2481  write_msg(NULL, "saving database definition\n");
2482 
2483  /* Make sure we are in proper schema */
2484  selectSourceSchema(fout, "pg_catalog");
2485 
2486  /* Get the database owner and parameters from pg_database */
2487  if (fout->remoteVersion >= 90300)
2488  {
2489  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2490  "(%s datdba) AS dba, "
2491  "pg_encoding_to_char(encoding) AS encoding, "
2492  "datcollate, datctype, datfrozenxid, datminmxid, "
2493  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2494  "shobj_description(oid, 'pg_database') AS description "
2495 
2496  "FROM pg_database "
2497  "WHERE datname = ",
2499  appendStringLiteralAH(dbQry, datname, fout);
2500  }
2501  else if (fout->remoteVersion >= 80400)
2502  {
2503  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2504  "(%s datdba) AS dba, "
2505  "pg_encoding_to_char(encoding) AS encoding, "
2506  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2507  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2508  "shobj_description(oid, 'pg_database') AS description "
2509 
2510  "FROM pg_database "
2511  "WHERE datname = ",
2513  appendStringLiteralAH(dbQry, datname, fout);
2514  }
2515  else if (fout->remoteVersion >= 80200)
2516  {
2517  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2518  "(%s datdba) AS dba, "
2519  "pg_encoding_to_char(encoding) AS encoding, "
2520  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2521  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2522  "shobj_description(oid, 'pg_database') AS description "
2523 
2524  "FROM pg_database "
2525  "WHERE datname = ",
2527  appendStringLiteralAH(dbQry, datname, fout);
2528  }
2529  else
2530  {
2531  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2532  "(%s datdba) AS dba, "
2533  "pg_encoding_to_char(encoding) AS encoding, "
2534  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2535  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2536  "FROM pg_database "
2537  "WHERE datname = ",
2539  appendStringLiteralAH(dbQry, datname, fout);
2540  }
2541 
2542  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2543 
2544  i_tableoid = PQfnumber(res, "tableoid");
2545  i_oid = PQfnumber(res, "oid");
2546  i_dba = PQfnumber(res, "dba");
2547  i_encoding = PQfnumber(res, "encoding");
2548  i_collate = PQfnumber(res, "datcollate");
2549  i_ctype = PQfnumber(res, "datctype");
2550  i_frozenxid = PQfnumber(res, "datfrozenxid");
2551  i_minmxid = PQfnumber(res, "datminmxid");
2552  i_tablespace = PQfnumber(res, "tablespace");
2553 
2554  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2555  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2556  dba = PQgetvalue(res, 0, i_dba);
2557  encoding = PQgetvalue(res, 0, i_encoding);
2558  collate = PQgetvalue(res, 0, i_collate);
2559  ctype = PQgetvalue(res, 0, i_ctype);
2560  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2561  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2562  tablespace = PQgetvalue(res, 0, i_tablespace);
2563 
2564  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2565  fmtId(datname));
2566  if (strlen(encoding) > 0)
2567  {
2568  appendPQExpBufferStr(creaQry, " ENCODING = ");
2569  appendStringLiteralAH(creaQry, encoding, fout);
2570  }
2571  if (strlen(collate) > 0)
2572  {
2573  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2574  appendStringLiteralAH(creaQry, collate, fout);
2575  }
2576  if (strlen(ctype) > 0)
2577  {
2578  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2579  appendStringLiteralAH(creaQry, ctype, fout);
2580  }
2581  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2582  !dopt->outputNoTablespaces)
2583  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2584  fmtId(tablespace));
2585  appendPQExpBufferStr(creaQry, ";\n");
2586 
2587  if (dopt->binary_upgrade)
2588  {
2589  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
2590  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
2591  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
2592  "WHERE datname = ",
2593  frozenxid, minmxid);
2594  appendStringLiteralAH(creaQry, datname, fout);
2595  appendPQExpBufferStr(creaQry, ";\n");
2596 
2597  }
2598 
2599  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2600  fmtId(datname));
2601 
2602  dbDumpId = createDumpId();
2603 
2604  ArchiveEntry(fout,
2605  dbCatId, /* catalog ID */
2606  dbDumpId, /* dump ID */
2607  datname, /* Name */
2608  NULL, /* Namespace */
2609  NULL, /* Tablespace */
2610  dba, /* Owner */
2611  false, /* with oids */
2612  "DATABASE", /* Desc */
2613  SECTION_PRE_DATA, /* Section */
2614  creaQry->data, /* Create */
2615  delQry->data, /* Del */
2616  NULL, /* Copy */
2617  NULL, /* Deps */
2618  0, /* # Deps */
2619  NULL, /* Dumper */
2620  NULL); /* Dumper Arg */
2621 
2622  /*
2623  * pg_largeobject and pg_largeobject_metadata come from the old system
2624  * intact, so set their relfrozenxids and relminmxids.
2625  */
2626  if (dopt->binary_upgrade)
2627  {
2628  PGresult *lo_res;
2629  PQExpBuffer loFrozenQry = createPQExpBuffer();
2630  PQExpBuffer loOutQry = createPQExpBuffer();
2631  int i_relfrozenxid,
2632  i_relminmxid;
2633 
2634  /*
2635  * pg_largeobject
2636  */
2637  if (fout->remoteVersion >= 90300)
2638  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2639  "FROM pg_catalog.pg_class\n"
2640  "WHERE oid = %u;\n",
2642  else
2643  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2644  "FROM pg_catalog.pg_class\n"
2645  "WHERE oid = %u;\n",
2647 
2648  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2649 
2650  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2651  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2652 
2653  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
2654  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2655  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2656  "WHERE oid = %u;\n",
2657  atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2658  atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
2660  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2661  "pg_largeobject", NULL, NULL, "",
2662  false, "pg_largeobject", SECTION_PRE_DATA,
2663  loOutQry->data, "", NULL,
2664  NULL, 0,
2665  NULL, NULL);
2666 
2667  PQclear(lo_res);
2668 
2669  /*
2670  * pg_largeobject_metadata
2671  */
2672  if (fout->remoteVersion >= 90000)
2673  {
2674  resetPQExpBuffer(loFrozenQry);
2675  resetPQExpBuffer(loOutQry);
2676 
2677  if (fout->remoteVersion >= 90300)
2678  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2679  "FROM pg_catalog.pg_class\n"
2680  "WHERE oid = %u;\n",
2682  else
2683  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2684  "FROM pg_catalog.pg_class\n"
2685  "WHERE oid = %u;\n",
2687 
2688  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2689 
2690  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2691  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2692 
2693  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
2694  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2695  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2696  "WHERE oid = %u;\n",
2697  atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2698  atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
2700  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2701  "pg_largeobject_metadata", NULL, NULL, "",
2702  false, "pg_largeobject_metadata", SECTION_PRE_DATA,
2703  loOutQry->data, "", NULL,
2704  NULL, 0,
2705  NULL, NULL);
2706 
2707  PQclear(lo_res);
2708  }
2709 
2710  destroyPQExpBuffer(loFrozenQry);
2711  destroyPQExpBuffer(loOutQry);
2712  }
2713 
2714  /* Dump DB comment if any */
2715  if (fout->remoteVersion >= 80200)
2716  {
2717  /*
2718  * 8.2 keeps comments on shared objects in a shared table, so we
2719  * cannot use the dumpComment used for other database objects.
2720  */
2721  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2722 
2723  if (comment && strlen(comment))
2724  {
2725  resetPQExpBuffer(dbQry);
2726 
2727  /*
2728  * Generates warning when loaded into a differently-named
2729  * database.
2730  */
2731  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", fmtId(datname));
2732  appendStringLiteralAH(dbQry, comment, fout);
2733  appendPQExpBufferStr(dbQry, ";\n");
2734 
2735  ArchiveEntry(fout, dbCatId, createDumpId(), datname, NULL, NULL,
2736  dba, false, "COMMENT", SECTION_NONE,
2737  dbQry->data, "", NULL,
2738  &dbDumpId, 1, NULL, NULL);
2739  }
2740  }
2741  else
2742  {
2743  resetPQExpBuffer(dbQry);
2744  appendPQExpBuffer(dbQry, "DATABASE %s", fmtId(datname));
2745  dumpComment(fout, dbQry->data, NULL, "",
2746  dbCatId, 0, dbDumpId);
2747  }
2748 
2749  /* Dump shared security label. */
2750  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2751  {
2752  PGresult *shres;
2753  PQExpBuffer seclabelQry;
2754 
2755  seclabelQry = createPQExpBuffer();
2756 
2757  buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry);
2758  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2759  resetPQExpBuffer(seclabelQry);
2760  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2761  if (strlen(seclabelQry->data))
2762  ArchiveEntry(fout, dbCatId, createDumpId(), datname, NULL, NULL,
2763  dba, false, "SECURITY LABEL", SECTION_NONE,
2764  seclabelQry->data, "", NULL,
2765  &dbDumpId, 1, NULL, NULL);
2766  destroyPQExpBuffer(seclabelQry);
2767  PQclear(shres);
2768  }
2769 
2770  PQclear(res);
2771 
2772  destroyPQExpBuffer(dbQry);
2773  destroyPQExpBuffer(delQry);
2774  destroyPQExpBuffer(creaQry);
2775 }
2776 
2777 /*
2778  * dumpEncoding: put the correct encoding into the archive
2779  */
2780 static void
2782 {
2783  const char *encname = pg_encoding_to_char(AH->encoding);
2785 
2786  if (g_verbose)
2787  write_msg(NULL, "saving encoding = %s\n", encname);
2788 
2789  appendPQExpBufferStr(qry, "SET client_encoding = ");
2790  appendStringLiteralAH(qry, encname, AH);
2791  appendPQExpBufferStr(qry, ";\n");
2792 
2793  ArchiveEntry(AH, nilCatalogId, createDumpId(),
2794  "ENCODING", NULL, NULL, "",
2795  false, "ENCODING", SECTION_PRE_DATA,
2796  qry->data, "", NULL,
2797  NULL, 0,
2798  NULL, NULL);
2799 
2800  destroyPQExpBuffer(qry);
2801 }
2802 
2803 
2804 /*
2805  * dumpStdStrings: put the correct escape string behavior into the archive
2806  */
2807 static void
2809 {
2810  const char *stdstrings = AH->std_strings ? "on" : "off";
2812 
2813  if (g_verbose)
2814  write_msg(NULL, "saving standard_conforming_strings = %s\n",
2815  stdstrings);
2816 
2817  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
2818  stdstrings);
2819 
2820  ArchiveEntry(AH, nilCatalogId, createDumpId(),
2821  "STDSTRINGS", NULL, NULL, "",
2822  false, "STDSTRINGS", SECTION_PRE_DATA,
2823  qry->data, "", NULL,
2824  NULL, 0,
2825  NULL, NULL);
2826 
2827  destroyPQExpBuffer(qry);
2828 }
2829 
2830 
2831 /*
2832  * getBlobs:
2833  * Collect schema-level data about large objects
2834  */
2835 static void
2837 {
2838  DumpOptions *dopt = fout->dopt;
2839  PQExpBuffer blobQry = createPQExpBuffer();
2840  BlobInfo *binfo;
2841  DumpableObject *bdata;
2842  PGresult *res;
2843  int ntups;
2844  int i;
2845  int i_oid;
2846  int i_lomowner;
2847  int i_lomacl;
2848  int i_rlomacl;
2849  int i_initlomacl;
2850  int i_initrlomacl;
2851 
2852  /* Verbose message */
2853  if (g_verbose)
2854  write_msg(NULL, "reading large objects\n");
2855 
2856  /* Make sure we are in proper schema */
2857  selectSourceSchema(fout, "pg_catalog");
2858 
2859  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
2860  if (fout->remoteVersion >= 90600)
2861  {
2862  PQExpBuffer acl_subquery = createPQExpBuffer();
2863  PQExpBuffer racl_subquery = createPQExpBuffer();
2864  PQExpBuffer init_acl_subquery = createPQExpBuffer();
2865  PQExpBuffer init_racl_subquery = createPQExpBuffer();
2866 
2867  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
2868  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
2869  dopt->binary_upgrade);
2870 
2871  appendPQExpBuffer(blobQry,
2872  "SELECT l.oid, (%s l.lomowner) AS rolname, "
2873  "%s AS lomacl, "
2874  "%s AS rlomacl, "
2875  "%s AS initlomacl, "
2876  "%s AS initrlomacl "
2877  "FROM pg_largeobject_metadata l "
2878  "LEFT JOIN pg_init_privs pip ON "
2879  "(l.oid = pip.objoid "
2880  "AND pip.classoid = 'pg_largeobject'::regclass "
2881  "AND pip.objsubid = 0) ",
2883  acl_subquery->data,
2884  racl_subquery->data,
2885  init_acl_subquery->data,
2886  init_racl_subquery->data);
2887 
2888  destroyPQExpBuffer(acl_subquery);
2889  destroyPQExpBuffer(racl_subquery);
2890  destroyPQExpBuffer(init_acl_subquery);
2891  destroyPQExpBuffer(init_racl_subquery);
2892  }
2893  else if (fout->remoteVersion >= 90000)
2894  appendPQExpBuffer(blobQry,
2895  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
2896  "NULL AS rlomacl, NULL AS initlomacl, "
2897  "NULL AS initrlomacl "
2898  " FROM pg_largeobject_metadata",
2900  else
2901  appendPQExpBufferStr(blobQry,
2902  "SELECT DISTINCT loid AS oid, "
2903  "NULL::name AS rolname, NULL::oid AS lomacl, "
2904  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
2905  "NULL::oid AS initrlomacl "
2906  " FROM pg_largeobject");
2907 
2908  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
2909 
2910  i_oid = PQfnumber(res, "oid");
2911  i_lomowner = PQfnumber(res, "rolname");
2912  i_lomacl = PQfnumber(res, "lomacl");
2913  i_rlomacl = PQfnumber(res, "rlomacl");
2914  i_initlomacl = PQfnumber(res, "initlomacl");
2915  i_initrlomacl = PQfnumber(res, "initrlomacl");
2916 
2917  ntups = PQntuples(res);
2918 
2919  /*
2920  * Each large object has its own BLOB archive entry.
2921  */
2922  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
2923 
2924  for (i = 0; i < ntups; i++)
2925  {
2926  binfo[i].dobj.objType = DO_BLOB;
2928  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
2929  AssignDumpId(&binfo[i].dobj);
2930 
2931  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
2932  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
2933  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
2934  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
2935  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
2936  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
2937 
2938  if (PQgetisnull(res, i, i_lomacl) &&
2939  PQgetisnull(res, i, i_rlomacl) &&
2940  PQgetisnull(res, i, i_initlomacl) &&
2941  PQgetisnull(res, i, i_initrlomacl))
2942  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
2943 
2944  /*
2945  * In binary-upgrade mode for blobs, we do *not* dump out the data or
2946  * the ACLs, should any exist. The data and ACL (if any) will be
2947  * copied by pg_upgrade, which simply copies the pg_largeobject and
2948  * pg_largeobject_metadata tables.
2949  *
2950  * We *do* dump out the definition of the blob because we need that to
2951  * make the restoration of the comments, and anything else, work since
2952  * pg_upgrade copies the files behind pg_largeobject and
2953  * pg_largeobject_metadata after the dump is restored.
2954  */
2955  if (dopt->binary_upgrade)
2957  }
2958 
2959  /*
2960  * If we have any large objects, a "BLOBS" archive entry is needed. This
2961  * is just a placeholder for sorting; it carries no data now.
2962  */
2963  if (ntups > 0)
2964  {
2965  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
2966  bdata->objType = DO_BLOB_DATA;
2967  bdata->catId = nilCatalogId;
2968  AssignDumpId(bdata);
2969  bdata->name = pg_strdup("BLOBS");
2970  }
2971 
2972  PQclear(res);
2973  destroyPQExpBuffer(blobQry);
2974 }
2975 
2976 /*
2977  * dumpBlob
2978  *
2979  * dump the definition (metadata) of the given large object
2980  */
2981 static void
2982 dumpBlob(Archive *fout, BlobInfo *binfo)
2983 {
2984  PQExpBuffer cquery = createPQExpBuffer();
2985  PQExpBuffer dquery = createPQExpBuffer();
2986 
2987  appendPQExpBuffer(cquery,
2988  "SELECT pg_catalog.lo_create('%s');\n",
2989  binfo->dobj.name);
2990 
2991  appendPQExpBuffer(dquery,
2992  "SELECT pg_catalog.lo_unlink('%s');\n",
2993  binfo->dobj.name);
2994 
2995  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
2996  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
2997  binfo->dobj.name,
2998  NULL, NULL,
2999  binfo->rolname, false,
3000  "BLOB", SECTION_PRE_DATA,
3001  cquery->data, dquery->data, NULL,
3002  NULL, 0,
3003  NULL, NULL);
3004 
3005  /* set up tag for comment and/or ACL */
3006  resetPQExpBuffer(cquery);
3007  appendPQExpBuffer(cquery, "LARGE OBJECT %s", binfo->dobj.name);
3008 
3009  /* Dump comment if any */
3010  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3011  dumpComment(fout, cquery->data,
3012  NULL, binfo->rolname,
3013  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3014 
3015  /* Dump security label if any */
3016  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3017  dumpSecLabel(fout, cquery->data,
3018  NULL, binfo->rolname,
3019  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3020 
3021  /* Dump ACL if any */
3022  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3023  dumpACL(fout, binfo->dobj.catId, binfo->dobj.dumpId, "LARGE OBJECT",
3024  binfo->dobj.name, NULL, cquery->data,
3025  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3026  binfo->initblobacl, binfo->initrblobacl);
3027 
3028  destroyPQExpBuffer(cquery);
3029  destroyPQExpBuffer(dquery);
3030 }
3031 
3032 /*
3033  * dumpBlobs:
3034  * dump the data contents of all large objects
3035  */
3036 static int
3037 dumpBlobs(Archive *fout, void *arg)
3038 {
3039  const char *blobQry;
3040  const char *blobFetchQry;
3041  PGconn *conn = GetConnection(fout);
3042  PGresult *res;
3043  char buf[LOBBUFSIZE];
3044  int ntups;
3045  int i;
3046  int cnt;
3047 
3048  if (g_verbose)
3049  write_msg(NULL, "saving large objects\n");
3050 
3051  /* Make sure we are in proper schema */
3052  selectSourceSchema(fout, "pg_catalog");
3053 
3054  /*
3055  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3056  * the already-in-memory dumpable objects instead...
3057  */
3058  if (fout->remoteVersion >= 90000)
3059  blobQry = "DECLARE bloboid CURSOR FOR SELECT oid FROM pg_largeobject_metadata";
3060  else
3061  blobQry = "DECLARE bloboid CURSOR FOR SELECT DISTINCT loid FROM pg_largeobject";
3062 
3063  ExecuteSqlStatement(fout, blobQry);
3064 
3065  /* Command to fetch from cursor */
3066  blobFetchQry = "FETCH 1000 IN bloboid";
3067 
3068  do
3069  {
3070  /* Do a fetch */
3071  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3072 
3073  /* Process the tuples, if any */
3074  ntups = PQntuples(res);
3075  for (i = 0; i < ntups; i++)
3076  {
3077  Oid blobOid;
3078  int loFd;
3079 
3080  blobOid = atooid(PQgetvalue(res, i, 0));
3081  /* Open the BLOB */
3082  loFd = lo_open(conn, blobOid, INV_READ);
3083  if (loFd == -1)
3084  exit_horribly(NULL, "could not open large object %u: %s",
3085  blobOid, PQerrorMessage(conn));
3086 
3087  StartBlob(fout, blobOid);
3088 
3089  /* Now read it in chunks, sending data to archive */
3090  do
3091  {
3092  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3093  if (cnt < 0)
3094  exit_horribly(NULL, "error reading large object %u: %s",
3095  blobOid, PQerrorMessage(conn));
3096 
3097  WriteData(fout, buf, cnt);
3098  } while (cnt > 0);
3099 
3100  lo_close(conn, loFd);
3101 
3102  EndBlob(fout, blobOid);
3103  }
3104 
3105  PQclear(res);
3106  } while (ntups > 0);
3107 
3108  return 1;
3109 }
3110 
3111 /*
3112  * getPolicies
3113  * get information about policies on a dumpable table.
3114  */
3115 void
3116 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3117 {
3118  PQExpBuffer query;
3119  PGresult *res;
3120  PolicyInfo *polinfo;
3121  int i_oid;
3122  int i_tableoid;
3123  int i_polname;
3124  int i_polcmd;
3125  int i_polpermissive;
3126  int i_polroles;
3127  int i_polqual;
3128  int i_polwithcheck;
3129  int i,
3130  j,
3131  ntups;
3132 
3133  if (fout->remoteVersion < 90500)
3134  return;
3135 
3136  query = createPQExpBuffer();
3137 
3138  for (i = 0; i < numTables; i++)
3139  {
3140  TableInfo *tbinfo = &tblinfo[i];
3141 
3142  /* Ignore row security on tables not to be dumped */
3143  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3144  continue;
3145 
3146  if (g_verbose)
3147  write_msg(NULL, "reading row security enabled for table \"%s.%s\"\n",
3148  tbinfo->dobj.namespace->dobj.name,
3149  tbinfo->dobj.name);
3150 
3151  /*
3152  * Get row security enabled information for the table. We represent
3153  * RLS enabled on a table by creating PolicyInfo object with an empty
3154  * policy.
3155  */
3156  if (tbinfo->rowsec)
3157  {
3158  /*
3159  * Note: use tableoid 0 so that this object won't be mistaken for
3160  * something that pg_depend entries apply to.
3161  */
3162  polinfo = pg_malloc(sizeof(PolicyInfo));
3163  polinfo->dobj.objType = DO_POLICY;
3164  polinfo->dobj.catId.tableoid = 0;
3165  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3166  AssignDumpId(&polinfo->dobj);
3167  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3168  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3169  polinfo->poltable = tbinfo;
3170  polinfo->polname = NULL;
3171  polinfo->polcmd = '\0';
3172  polinfo->polpermissive = 0;
3173  polinfo->polroles = NULL;
3174  polinfo->polqual = NULL;
3175  polinfo->polwithcheck = NULL;
3176  }
3177 
3178  if (g_verbose)
3179  write_msg(NULL, "reading policies for table \"%s.%s\"\n",
3180  tbinfo->dobj.namespace->dobj.name,
3181  tbinfo->dobj.name);
3182 
3183  /*
3184  * select table schema to ensure regproc name is qualified if needed
3185  */
3186  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
3187 
3188  resetPQExpBuffer(query);
3189 
3190  /* Get the policies for the table. */
3191  if (fout->remoteVersion >= 100000)
3192  appendPQExpBuffer(query,
3193  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3194  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3195  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3196  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3197  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3198  "FROM pg_catalog.pg_policy pol "
3199  "WHERE polrelid = '%u'",
3200  tbinfo->dobj.catId.oid);
3201  else
3202  appendPQExpBuffer(query,
3203  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3204  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3205  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3206  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3207  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3208  "FROM pg_catalog.pg_policy pol "
3209  "WHERE polrelid = '%u'",
3210  tbinfo->dobj.catId.oid);
3211  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3212 
3213  ntups = PQntuples(res);
3214 
3215  if (ntups == 0)
3216  {
3217  /*
3218  * No explicit policies to handle (only the default-deny policy,
3219  * which is handled as part of the table definition). Clean up
3220  * and return.
3221  */
3222  PQclear(res);
3223  continue;
3224  }
3225 
3226  i_oid = PQfnumber(res, "oid");
3227  i_tableoid = PQfnumber(res, "tableoid");
3228  i_polname = PQfnumber(res, "polname");
3229  i_polcmd = PQfnumber(res, "polcmd");
3230  i_polpermissive = PQfnumber(res, "polpermissive");
3231  i_polroles = PQfnumber(res, "polroles");
3232  i_polqual = PQfnumber(res, "polqual");
3233  i_polwithcheck = PQfnumber(res, "polwithcheck");
3234 
3235  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3236 
3237  for (j = 0; j < ntups; j++)
3238  {
3239  polinfo[j].dobj.objType = DO_POLICY;
3240  polinfo[j].dobj.catId.tableoid =
3241  atooid(PQgetvalue(res, j, i_tableoid));
3242  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3243  AssignDumpId(&polinfo[j].dobj);
3244  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3245  polinfo[j].poltable = tbinfo;
3246  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3247  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3248 
3249  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3250  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3251 
3252  if (PQgetisnull(res, j, i_polroles))
3253  polinfo[j].polroles = NULL;
3254  else
3255  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3256 
3257  if (PQgetisnull(res, j, i_polqual))
3258  polinfo[j].polqual = NULL;
3259  else
3260  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3261 
3262  if (PQgetisnull(res, j, i_polwithcheck))
3263  polinfo[j].polwithcheck = NULL;
3264  else
3265  polinfo[j].polwithcheck
3266  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3267  }
3268  PQclear(res);
3269  }
3270  destroyPQExpBuffer(query);
3271 }
3272 
3273 /*
3274  * dumpPolicy
3275  * dump the definition of the given policy
3276  */
3277 static void
3279 {
3280  DumpOptions *dopt = fout->dopt;
3281  TableInfo *tbinfo = polinfo->poltable;
3282  PQExpBuffer query;
3283  PQExpBuffer delqry;
3284  const char *cmd;
3285  char *tag;
3286 
3287  if (dopt->dataOnly)
3288  return;
3289 
3290  /*
3291  * If polname is NULL, then this record is just indicating that ROW LEVEL
3292  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3293  * ROW LEVEL SECURITY.
3294  */
3295  if (polinfo->polname == NULL)
3296  {
3297  query = createPQExpBuffer();
3298 
3299  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3300  fmtId(polinfo->dobj.name));
3301 
3302  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3303  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3304  polinfo->dobj.name,
3305  polinfo->dobj.namespace->dobj.name,
3306  NULL,
3307  tbinfo->rolname, false,
3308  "ROW SECURITY", SECTION_POST_DATA,
3309  query->data, "", NULL,
3310  NULL, 0,
3311  NULL, NULL);
3312 
3313  destroyPQExpBuffer(query);
3314  return;
3315  }
3316 
3317  if (polinfo->polcmd == '*')
3318  cmd = "";
3319  else if (polinfo->polcmd == 'r')
3320  cmd = " FOR SELECT";
3321  else if (polinfo->polcmd == 'a')
3322  cmd = " FOR INSERT";
3323  else if (polinfo->polcmd == 'w')
3324  cmd = " FOR UPDATE";
3325  else if (polinfo->polcmd == 'd')
3326  cmd = " FOR DELETE";
3327  else
3328  {
3329  write_msg(NULL, "unexpected policy command type: %c\n",
3330  polinfo->polcmd);
3331  exit_nicely(1);
3332  }
3333 
3334  query = createPQExpBuffer();
3335  delqry = createPQExpBuffer();
3336 
3337  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3338 
3339  appendPQExpBuffer(query, " ON %s%s%s", fmtId(tbinfo->dobj.name),
3340  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3341 
3342  if (polinfo->polroles != NULL)
3343  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3344 
3345  if (polinfo->polqual != NULL)
3346  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3347 
3348  if (polinfo->polwithcheck != NULL)
3349  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3350 
3351  appendPQExpBuffer(query, ";\n");
3352 
3353  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3354  appendPQExpBuffer(delqry, " ON %s;\n", fmtId(tbinfo->dobj.name));
3355 
3356  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3357 
3358  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3359  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3360  tag,
3361  polinfo->dobj.namespace->dobj.name,
3362  NULL,
3363  tbinfo->rolname, false,
3364  "POLICY", SECTION_POST_DATA,
3365  query->data, delqry->data, NULL,
3366  NULL, 0,
3367  NULL, NULL);
3368 
3369  free(tag);
3370  destroyPQExpBuffer(query);
3371  destroyPQExpBuffer(delqry);
3372 }
3373 
3374 /*
3375  * getPublications
3376  * get information about publications
3377  */
3378 void
3380 {
3381  DumpOptions *dopt = fout->dopt;
3382  PQExpBuffer query;
3383  PGresult *res;
3384  PublicationInfo *pubinfo;
3385  int i_tableoid;
3386  int i_oid;
3387  int i_pubname;
3388  int i_rolname;
3389  int i_puballtables;
3390  int i_pubinsert;
3391  int i_pubupdate;
3392  int i_pubdelete;
3393  int i,
3394  ntups;
3395 
3396  if (dopt->no_publications || fout->remoteVersion < 100000)
3397  return;
3398 
3399  query = createPQExpBuffer();
3400 
3401  resetPQExpBuffer(query);
3402 
3403  /* Get the publications. */
3404  appendPQExpBuffer(query,
3405  "SELECT p.tableoid, p.oid, p.pubname, "
3406  "(%s p.pubowner) AS rolname, "
3407  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete "
3408  "FROM pg_catalog.pg_publication p",
3410 
3411  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3412 
3413  ntups = PQntuples(res);
3414 
3415  i_tableoid = PQfnumber(res, "tableoid");
3416  i_oid = PQfnumber(res, "oid");
3417  i_pubname = PQfnumber(res, "pubname");
3418  i_rolname = PQfnumber(res, "rolname");
3419  i_puballtables = PQfnumber(res, "puballtables");
3420  i_pubinsert = PQfnumber(res, "pubinsert");
3421  i_pubupdate = PQfnumber(res, "pubupdate");
3422  i_pubdelete = PQfnumber(res, "pubdelete");
3423 
3424  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3425 
3426  for (i = 0; i < ntups; i++)
3427  {
3428  pubinfo[i].dobj.objType = DO_PUBLICATION;
3429  pubinfo[i].dobj.catId.tableoid =
3430  atooid(PQgetvalue(res, i, i_tableoid));
3431  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3432  AssignDumpId(&pubinfo[i].dobj);
3433  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3434  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3435  pubinfo[i].puballtables =
3436  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3437  pubinfo[i].pubinsert =
3438  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3439  pubinfo[i].pubupdate =
3440  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3441  pubinfo[i].pubdelete =
3442  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3443 
3444  if (strlen(pubinfo[i].rolname) == 0)
3445  write_msg(NULL, "WARNING: owner of publication \"%s\" appears to be invalid\n",
3446  pubinfo[i].dobj.name);
3447 
3448  /* Decide whether we want to dump it */
3449  selectDumpableObject(&(pubinfo[i].dobj), fout);
3450  }
3451  PQclear(res);
3452 
3453  destroyPQExpBuffer(query);
3454 }
3455 
3456 /*
3457  * dumpPublication
3458  * dump the definition of the given publication
3459  */
3460 static void
3462 {
3463  PQExpBuffer delq;
3464  PQExpBuffer query;
3465  PQExpBuffer labelq;
3466  bool first = true;
3467 
3468  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3469  return;
3470 
3471  delq = createPQExpBuffer();
3472  query = createPQExpBuffer();
3473  labelq = createPQExpBuffer();
3474 
3475  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3476  fmtId(pubinfo->dobj.name));
3477 
3478  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3479  fmtId(pubinfo->dobj.name));
3480 
3481  appendPQExpBuffer(labelq, "PUBLICATION %s", fmtId(pubinfo->dobj.name));
3482 
3483  if (pubinfo->puballtables)
3484  appendPQExpBufferStr(query, " FOR ALL TABLES");
3485 
3486  appendPQExpBufferStr(query, " WITH (publish = '");
3487  if (pubinfo->pubinsert)
3488  {
3489  appendPQExpBufferStr(query, "insert");
3490  first = false;
3491  }
3492 
3493  if (pubinfo->pubupdate)
3494  {
3495  if (!first)
3496  appendPQExpBufferStr(query, ", ");
3497 
3498  appendPQExpBufferStr(query, "update");
3499  first = false;
3500  }
3501 
3502  if (pubinfo->pubdelete)
3503  {
3504  if (!first)
3505  appendPQExpBufferStr(query, ", ");
3506 
3507  appendPQExpBufferStr(query, "delete");
3508  first = false;
3509  }
3510 
3511  appendPQExpBufferStr(query, "');\n");
3512 
3513  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
3514  pubinfo->dobj.name,
3515  NULL,
3516  NULL,
3517  pubinfo->rolname, false,
3518  "PUBLICATION", SECTION_POST_DATA,
3519  query->data, delq->data, NULL,
3520  NULL, 0,
3521  NULL, NULL);
3522 
3523  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3524  dumpComment(fout, labelq->data,
3525  NULL, pubinfo->rolname,
3526  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3527 
3528  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3529  dumpSecLabel(fout, labelq->data,
3530  NULL, pubinfo->rolname,
3531  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3532 
3533  destroyPQExpBuffer(delq);
3534  destroyPQExpBuffer(query);
3535 }
3536 
3537 /*
3538  * getPublicationTables
3539  * get information about publication membership for dumpable tables.
3540  */
3541 void
3543 {
3544  PQExpBuffer query;
3545  PGresult *res;
3546  PublicationRelInfo *pubrinfo;
3547  int i_tableoid;
3548  int i_oid;
3549  int i_pubname;
3550  int i,
3551  j,
3552  ntups;
3553 
3554  if (fout->remoteVersion < 100000)
3555  return;
3556 
3557  query = createPQExpBuffer();
3558 
3559  for (i = 0; i < numTables; i++)
3560  {
3561  TableInfo *tbinfo = &tblinfo[i];
3562 
3563  /* Only plain tables can be aded to publications. */
3564  if (tbinfo->relkind != RELKIND_RELATION)
3565  continue;
3566 
3567  /*
3568  * Ignore publication membership of tables whose definitions are not
3569  * to be dumped.
3570  */
3571  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3572  continue;
3573 
3574  if (g_verbose)
3575  write_msg(NULL, "reading publication membership for table \"%s.%s\"\n",
3576  tbinfo->dobj.namespace->dobj.name,
3577  tbinfo->dobj.name);
3578 
3579  resetPQExpBuffer(query);
3580 
3581  /* Get the publication membership for the table. */
3582  appendPQExpBuffer(query,
3583  "SELECT pr.tableoid, pr.oid, p.pubname "
3584  "FROM pg_catalog.pg_publication_rel pr,"
3585  " pg_catalog.pg_publication p "
3586  "WHERE pr.prrelid = '%u'"
3587  " AND p.oid = pr.prpubid",
3588  tbinfo->dobj.catId.oid);
3589  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3590 
3591  ntups = PQntuples(res);
3592 
3593  if (ntups == 0)
3594  {
3595  /*
3596  * Table is not member of any publications. Clean up and return.
3597  */
3598  PQclear(res);
3599  continue;
3600  }
3601 
3602  i_tableoid = PQfnumber(res, "tableoid");
3603  i_oid = PQfnumber(res, "oid");
3604  i_pubname = PQfnumber(res, "pubname");
3605 
3606  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
3607 
3608  for (j = 0; j < ntups; j++)
3609  {
3610  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
3611  pubrinfo[j].dobj.catId.tableoid =
3612  atooid(PQgetvalue(res, j, i_tableoid));
3613  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3614  AssignDumpId(&pubrinfo[j].dobj);
3615  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3616  pubrinfo[j].dobj.name = tbinfo->dobj.name;
3617  pubrinfo[j].pubname = pg_strdup(PQgetvalue(res, j, i_pubname));
3618  pubrinfo[j].pubtable = tbinfo;
3619 
3620  /* Decide whether we want to dump it */
3621  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
3622  }
3623  PQclear(res);
3624  }
3625  destroyPQExpBuffer(query);
3626 }
3627 
3628 /*
3629  * dumpPublicationTable
3630  * dump the definition of the given publication table mapping
3631  */
3632 static void
3634 {
3635  TableInfo *tbinfo = pubrinfo->pubtable;
3636  PQExpBuffer query;
3637  char *tag;
3638 
3639  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3640  return;
3641 
3642  tag = psprintf("%s %s", pubrinfo->pubname, tbinfo->dobj.name);
3643 
3644  query = createPQExpBuffer();
3645 
3646  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
3647  fmtId(pubrinfo->pubname));
3648  appendPQExpBuffer(query, " %s;",
3649  fmtId(tbinfo->dobj.name));
3650 
3651  /*
3652  * There is no point in creating drop query as drop query as the drop is
3653  * done by table drop.
3654  */
3655  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
3656  tag,
3657  tbinfo->dobj.namespace->dobj.name,
3658  NULL,
3659  "", false,
3660  "PUBLICATION TABLE", SECTION_POST_DATA,
3661  query->data, "", NULL,
3662  NULL, 0,
3663  NULL, NULL);
3664 
3665  free(tag);
3666  destroyPQExpBuffer(query);
3667 }
3668 
3669 /*
3670  * Is the currently connected user a superuser?
3671  */
3672 static bool
3674 {
3675  ArchiveHandle *AH = (ArchiveHandle *) fout;
3676  const char *val;
3677 
3678  val = PQparameterStatus(AH->connection, "is_superuser");
3679 
3680  if (val && strcmp(val, "on") == 0)
3681  return true;
3682 
3683  return false;
3684 }
3685 
3686 /*
3687  * getSubscriptions
3688  * get information about subscriptions
3689  */
3690 void
3692 {
3693  DumpOptions *dopt = fout->dopt;
3694  PQExpBuffer query;
3695  PGresult *res;
3696  SubscriptionInfo *subinfo;
3697  int i_tableoid;
3698  int i_oid;
3699  int i_subname;
3700  int i_rolname;
3701  int i_subconninfo;
3702  int i_subslotname;
3703  int i_subsynccommit;
3704  int i_subpublications;
3705  int i,
3706  ntups;
3707 
3708  if (dopt->no_subscriptions || fout->remoteVersion < 100000)
3709  return;
3710 
3711  if (!is_superuser(fout))
3712  {
3713  int n;
3714 
3715  res = ExecuteSqlQuery(fout,
3716  "SELECT count(*) FROM pg_subscription "
3717  "WHERE subdbid = (SELECT oid FROM pg_catalog.pg_database"
3718  " WHERE datname = current_database())",
3719  PGRES_TUPLES_OK);
3720  n = atoi(PQgetvalue(res, 0, 0));
3721  if (n > 0)
3722  write_msg(NULL, "WARNING: subscriptions not dumped because current user is not a superuser\n");
3723  PQclear(res);
3724  return;
3725  }
3726 
3727  query = createPQExpBuffer();
3728 
3729  resetPQExpBuffer(query);
3730 
3731  /* Get the subscriptions in current database. */
3732  appendPQExpBuffer(query,
3733  "SELECT s.tableoid, s.oid, s.subname,"
3734  "(%s s.subowner) AS rolname, "
3735  " s.subconninfo, s.subslotname, s.subsynccommit, "
3736  " s.subpublications "
3737  "FROM pg_catalog.pg_subscription s "
3738  "WHERE s.subdbid = (SELECT oid FROM pg_catalog.pg_database"
3739  " WHERE datname = current_database())",
3741  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3742 
3743  ntups = PQntuples(res);
3744 
3745  i_tableoid = PQfnumber(res, "tableoid");
3746  i_oid = PQfnumber(res, "oid");
3747  i_subname = PQfnumber(res, "subname");
3748  i_rolname = PQfnumber(res, "rolname");
3749  i_subconninfo = PQfnumber(res, "subconninfo");
3750  i_subslotname = PQfnumber(res, "subslotname");
3751  i_subsynccommit = PQfnumber(res, "subsynccommit");
3752  i_subpublications = PQfnumber(res, "subpublications");
3753 
3754  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
3755 
3756  for (i = 0; i < ntups; i++)
3757  {
3758  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
3759  subinfo[i].dobj.catId.tableoid =
3760  atooid(PQgetvalue(res, i, i_tableoid));
3761  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3762  AssignDumpId(&subinfo[i].dobj);
3763  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
3764  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3765  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
3766  if (PQgetisnull(res, i, i_subslotname))
3767  subinfo[i].subslotname = NULL;
3768  else
3769  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
3770  subinfo[i].subsynccommit =
3771  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
3772  subinfo[i].subpublications =
3773  pg_strdup(PQgetvalue(res, i, i_subpublications));
3774 
3775  if (strlen(subinfo[i].rolname) == 0)
3776  write_msg(NULL, "WARNING: owner of subscription \"%s\" appears to be invalid\n",
3777  subinfo[i].dobj.name);
3778 
3779  /* Decide whether we want to dump it */
3780  selectDumpableObject(&(subinfo[i].dobj), fout);
3781  }
3782  PQclear(res);
3783 
3784  destroyPQExpBuffer(query);
3785 }
3786 
3787 /*
3788  * dumpSubscription
3789  * dump the definition of the given subscription
3790  */
3791 static void
3793 {
3794  PQExpBuffer delq;
3795  PQExpBuffer query;
3796  PQExpBuffer labelq;
3797  PQExpBuffer publications;
3798  char **pubnames = NULL;
3799  int npubnames = 0;
3800  int i;
3801 
3802  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3803  return;
3804 
3805  delq = createPQExpBuffer();
3806  query = createPQExpBuffer();
3807  labelq = createPQExpBuffer();
3808 
3809  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
3810  fmtId(subinfo->dobj.name));
3811 
3812  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
3813  fmtId(subinfo->dobj.name));
3814  appendStringLiteralAH(query, subinfo->subconninfo, fout);
3815 
3816  /* Build list of quoted publications and append them to query. */
3817  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
3818  {
3819  write_msg(NULL,
3820  "WARNING: could not parse subpublications array\n");
3821  if (pubnames)
3822  free(pubnames);
3823  pubnames = NULL;
3824  npubnames = 0;
3825  }
3826 
3827  publications = createPQExpBuffer();
3828  for (i = 0; i < npubnames; i++)
3829  {
3830  if (i > 0)
3831  appendPQExpBufferStr(publications, ", ");
3832 
3833  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
3834  }
3835 
3836  appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
3837  if (subinfo->subslotname)
3838  appendStringLiteralAH(query, subinfo->subslotname, fout);
3839  else
3840  appendPQExpBufferStr(query, "NONE");
3841 
3842  if (strcmp(subinfo->subsynccommit, "off") != 0)
3843  appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
3844 
3845  appendPQExpBufferStr(query, ");\n");
3846 
3847  appendPQExpBuffer(labelq, "SUBSCRIPTION %s", fmtId(subinfo->dobj.name));
3848 
3849  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
3850  subinfo->dobj.name,
3851  NULL,
3852  NULL,
3853  subinfo->rolname, false,
3854  "SUBSCRIPTION", SECTION_POST_DATA,
3855  query->data, delq->data, NULL,
3856  NULL, 0,
3857  NULL, NULL);
3858 
3859  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3860  dumpComment(fout, labelq->data,
3861  NULL, subinfo->rolname,
3862  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
3863 
3864  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3865  dumpSecLabel(fout, labelq->data,
3866  NULL, subinfo->rolname,
3867  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
3868 
3869  destroyPQExpBuffer(publications);
3870  if (pubnames)
3871  free(pubnames);
3872 
3873  destroyPQExpBuffer(delq);
3874  destroyPQExpBuffer(query);
3875 }
3876 
3877 static void
3879  PQExpBuffer upgrade_buffer,
3880  Oid pg_type_oid)
3881 {
3882  PQExpBuffer upgrade_query = createPQExpBuffer();
3883  PGresult *upgrade_res;
3884  Oid pg_type_array_oid;
3885 
3886  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
3887  appendPQExpBuffer(upgrade_buffer,
3888  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
3889  pg_type_oid);
3890 
3891  /* we only support old >= 8.3 for binary upgrades */
3892  appendPQExpBuffer(upgrade_query,
3893  "SELECT typarray "
3894  "FROM pg_catalog.pg_type "
3895  "WHERE pg_type.oid = '%u'::pg_catalog.oid;",
3896  pg_type_oid);
3897 
3898  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
3899 
3900  pg_type_array_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "typarray")));
3901 
3902  if (OidIsValid(pg_type_array_oid))
3903  {
3904  appendPQExpBufferStr(upgrade_buffer,
3905  "\n-- For binary upgrade, must preserve pg_type array oid\n");
3906  appendPQExpBuffer(upgrade_buffer,
3907  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
3908  pg_type_array_oid);
3909  }
3910 
3911  PQclear(upgrade_res);
3912  destroyPQExpBuffer(upgrade_query);
3913 }
3914 
3915 static bool
3917  PQExpBuffer upgrade_buffer,
3918  Oid pg_rel_oid)
3919 {
3920  PQExpBuffer upgrade_query = createPQExpBuffer();
3921  PGresult *upgrade_res;
3922  Oid pg_type_oid;
3923  bool toast_set = false;
3924 
3925  /* we only support old >= 8.3 for binary upgrades */
3926  appendPQExpBuffer(upgrade_query,
3927  "SELECT c.reltype AS crel, t.reltype AS trel "
3928  "FROM pg_catalog.pg_class c "
3929  "LEFT JOIN pg_catalog.pg_class t ON "
3930  " (c.reltoastrelid = t.oid) "
3931  "WHERE c.oid = '%u'::pg_catalog.oid;",
3932  pg_rel_oid);
3933 
3934  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
3935 
3936  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
3937 
3938  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
3939  pg_type_oid);
3940 
3941  if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
3942  {
3943  /* Toast tables do not have pg_type array rows */
3944  Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
3945  PQfnumber(upgrade_res, "trel")));
3946 
3947  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
3948  appendPQExpBuffer(upgrade_buffer,
3949  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_type_oid('%u'::pg_catalog.oid);\n\n",
3950  pg_type_toast_oid);
3951 
3952  toast_set = true;
3953  }
3954 
3955  PQclear(upgrade_res);
3956  destroyPQExpBuffer(upgrade_query);
3957 
3958  return toast_set;
3959 }
3960 
3961 static void
3963  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
3964  bool is_index)
3965 {
3966  PQExpBuffer upgrade_query = createPQExpBuffer();
3967  PGresult *upgrade_res;
3968  Oid pg_class_reltoastrelid;
3969  Oid pg_index_indexrelid;
3970 
3971  appendPQExpBuffer(upgrade_query,
3972  "SELECT c.reltoastrelid, i.indexrelid "
3973  "FROM pg_catalog.pg_class c LEFT JOIN "
3974  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
3975  "WHERE c.oid = '%u'::pg_catalog.oid;",
3976  pg_class_oid);
3977 
3978  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
3979 
3980  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastrelid")));
3981  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
3982 
3983  appendPQExpBufferStr(upgrade_buffer,
3984  "\n-- For binary upgrade, must preserve pg_class oids\n");
3985 
3986  if (!is_index)
3987  {
3988  appendPQExpBuffer(upgrade_buffer,
3989  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
3990  pg_class_oid);
3991  /* only tables have toast tables, not indexes */
3992  if (OidIsValid(pg_class_reltoastrelid))
3993  {
3994  /*
3995  * One complexity is that the table definition might not require
3996  * the creation of a TOAST table, and the TOAST table might have
3997  * been created long after table creation, when the table was
3998  * loaded with wide data. By setting the TOAST oid we force
3999  * creation of the TOAST heap and TOAST index by the backend so we
4000  * can cleanly copy the files during binary upgrade.
4001  */
4002 
4003  appendPQExpBuffer(upgrade_buffer,
4004  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
4005  pg_class_reltoastrelid);
4006 
4007  /* every toast table has an index */
4008  appendPQExpBuffer(upgrade_buffer,
4009  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4010  pg_index_indexrelid);
4011  }
4012  }
4013  else
4014  appendPQExpBuffer(upgrade_buffer,
4015  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4016  pg_class_oid);
4017 
4018  appendPQExpBufferChar(upgrade_buffer, '\n');
4019 
4020  PQclear(upgrade_res);
4021  destroyPQExpBuffer(upgrade_query);
4022 }
4023 
4024 /*
4025  * If the DumpableObject is a member of an extension, add a suitable
4026  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4027  */
4028 static void
4030  DumpableObject *dobj,
4031  const char *objlabel)
4032 {
4033  DumpableObject *extobj = NULL;
4034  int i;
4035 
4036  if (!dobj->ext_member)
4037  return;
4038 
4039  /*
4040  * Find the parent extension. We could avoid this search if we wanted to
4041  * add a link field to DumpableObject, but the space costs of that would
4042  * be considerable. We assume that member objects could only have a
4043  * direct dependency on their own extension, not any others.
4044  */
4045  for (i = 0; i < dobj->nDeps; i++)
4046  {
4047  extobj = findObjectByDumpId(dobj->dependencies[i]);
4048  if (extobj && extobj->objType == DO_EXTENSION)
4049  break;
4050  extobj = NULL;
4051  }
4052  if (extobj == NULL)
4053  exit_horribly(NULL, "could not find parent extension for %s\n", objlabel);
4054 
4055  appendPQExpBufferStr(upgrade_buffer,
4056  "\n-- For binary upgrade, handle extension membership the hard way\n");
4057  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s;\n",
4058  fmtId(extobj->name),
4059  objlabel);
4060 }
4061 
4062 /*
4063  * getNamespaces:
4064  * read all namespaces in the system catalogs and return them in the
4065  * NamespaceInfo* structure
4066  *
4067  * numNamespaces is set to the number of namespaces read in
4068  */
4069 NamespaceInfo *
4071 {
4072  DumpOptions *dopt = fout->dopt;
4073  PGresult *res;
4074  int ntups;
4075  int i;
4076  PQExpBuffer query;
4077  NamespaceInfo *nsinfo;
4078  int i_tableoid;
4079  int i_oid;
4080  int i_nspname;
4081  int i_rolname;
4082  int i_nspacl;
4083  int i_rnspacl;
4084  int i_initnspacl;
4085  int i_initrnspacl;
4086 
4087  query = createPQExpBuffer();
4088 
4089  /* Make sure we are in proper schema */
4090  selectSourceSchema(fout, "pg_catalog");
4091 
4092  /*
4093  * we fetch all namespaces including system ones, so that every object we
4094  * read in can be linked to a containing namespace.
4095  */
4096  if (fout->remoteVersion >= 90600)
4097  {
4098  PQExpBuffer acl_subquery = createPQExpBuffer();
4099  PQExpBuffer racl_subquery = createPQExpBuffer();
4100  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4101  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4102 
4103  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4104  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4105  dopt->binary_upgrade);
4106 
4107  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4108  "(%s nspowner) AS rolname, "
4109  "%s as nspacl, "
4110  "%s as rnspacl, "
4111  "%s as initnspacl, "
4112  "%s as initrnspacl "
4113  "FROM pg_namespace n "
4114  "LEFT JOIN pg_init_privs pip "
4115  "ON (n.oid = pip.objoid "
4116  "AND pip.classoid = 'pg_namespace'::regclass "
4117  "AND pip.objsubid = 0",
4119  acl_subquery->data,
4120  racl_subquery->data,
4121  init_acl_subquery->data,
4122  init_racl_subquery->data);
4123 
4124  /*
4125  * When we are doing a 'clean' run, we will be dropping and recreating
4126  * the 'public' schema (the only object which has that kind of
4127  * treatment in the backend and which has an entry in pg_init_privs)
4128  * and therefore we should not consider any initial privileges in
4129  * pg_init_privs in that case.
4130  *
4131  * See pg_backup_archiver.c:_printTocEntry() for the details on why
4132  * the public schema is special in this regard.
4133  *
4134  * Note that if the public schema is dropped and re-created, this is
4135  * essentially a no-op because the new public schema won't have an
4136  * entry in pg_init_privs anyway, as the entry will be removed when
4137  * the public schema is dropped.
4138  *
4139  * Further, we have to handle the case where the public schema does
4140  * not exist at all.
4141  */
4142  if (dopt->outputClean)
4143  appendPQExpBuffer(query, " AND pip.objoid <> "
4144  "coalesce((select oid from pg_namespace "
4145  "where nspname = 'public'),0)");
4146 
4147  appendPQExpBuffer(query, ") ");
4148 
4149  destroyPQExpBuffer(acl_subquery);
4150  destroyPQExpBuffer(racl_subquery);
4151  destroyPQExpBuffer(init_acl_subquery);
4152  destroyPQExpBuffer(init_racl_subquery);
4153  }
4154  else
4155  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4156  "(%s nspowner) AS rolname, "
4157  "nspacl, NULL as rnspacl, "
4158  "NULL AS initnspacl, NULL as initrnspacl "
4159  "FROM pg_namespace",
4161 
4162  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4163 
4164  ntups = PQntuples(res);
4165 
4166  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4167 
4168  i_tableoid = PQfnumber(res, "tableoid");
4169  i_oid = PQfnumber(res, "oid");
4170  i_nspname = PQfnumber(res, "nspname");
4171  i_rolname = PQfnumber(res, "rolname");
4172  i_nspacl = PQfnumber(res, "nspacl");
4173  i_rnspacl = PQfnumber(res, "rnspacl");
4174  i_initnspacl = PQfnumber(res, "initnspacl");
4175  i_initrnspacl = PQfnumber(res, "initrnspacl");
4176 
4177  for (i = 0; i < ntups; i++)
4178  {
4179  nsinfo[i].dobj.objType = DO_NAMESPACE;
4180  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4181  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4182  AssignDumpId(&nsinfo[i].dobj);
4183  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4184  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4185  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4186  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4187  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4188  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4189 
4190  /* Decide whether to dump this namespace */
4191  selectDumpableNamespace(&nsinfo[i], fout);
4192 
4193  /*
4194  * Do not try to dump ACL if the ACL is empty or the default.
4195  *
4196  * This is useful because, for some schemas/objects, the only
4197  * component we are going to try and dump is the ACL and if we can
4198  * remove that then 'dump' goes to zero/false and we don't consider
4199  * this object for dumping at all later on.
4200  */
4201  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4202  PQgetisnull(res, i, i_initnspacl) &&
4203  PQgetisnull(res, i, i_initrnspacl))
4204  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4205 
4206  if (strlen(nsinfo[i].rolname) == 0)
4207  write_msg(NULL, "WARNING: owner of schema \"%s\" appears to be invalid\n",
4208  nsinfo[i].dobj.name);
4209  }
4210 
4211  PQclear(res);
4212  destroyPQExpBuffer(query);
4213 
4214  *numNamespaces = ntups;
4215 
4216  return nsinfo;
4217 }
4218 
4219 /*
4220  * findNamespace:
4221  * given a namespace OID, look up the info read by getNamespaces
4222  */
4223 static NamespaceInfo *
4225 {
4226  NamespaceInfo *nsinfo;
4227 
4228  nsinfo = findNamespaceByOid(nsoid);
4229  if (nsinfo == NULL)
4230  exit_horribly(NULL, "schema with OID %u does not exist\n", nsoid);
4231  return nsinfo;
4232 }
4233 
4234 /*
4235  * getExtensions:
4236  * read all extensions in the system catalogs and return them in the
4237  * ExtensionInfo* structure
4238  *
4239  * numExtensions is set to the number of extensions read in
4240  */
4241 ExtensionInfo *
4243 {
4244  DumpOptions *dopt = fout->dopt;
4245  PGresult *res;
4246  int ntups;
4247  int i;
4248  PQExpBuffer query;
4249  ExtensionInfo *extinfo;
4250  int i_tableoid;
4251  int i_oid;
4252  int i_extname;
4253  int i_nspname;
4254  int i_extrelocatable;
4255  int i_extversion;
4256  int i_extconfig;
4257  int i_extcondition;
4258 
4259  /*
4260  * Before 9.1, there are no extensions.
4261  */
4262  if (fout->remoteVersion < 90100)
4263  {
4264  *numExtensions = 0;
4265  return NULL;
4266  }
4267 
4268  query = createPQExpBuffer();
4269 
4270  /* Make sure we are in proper schema */
4271  selectSourceSchema(fout, "pg_catalog");
4272 
4273  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4274  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4275  "FROM pg_extension x "
4276  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4277 
4278  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4279 
4280  ntups = PQntuples(res);
4281 
4282  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4283 
4284  i_tableoid = PQfnumber(res, "tableoid");
4285  i_oid = PQfnumber(res, "oid");
4286  i_extname = PQfnumber(res, "extname");
4287  i_nspname = PQfnumber(res, "nspname");
4288  i_extrelocatable = PQfnumber(res, "extrelocatable");
4289  i_extversion = PQfnumber(res, "extversion");
4290  i_extconfig = PQfnumber(res, "extconfig");
4291  i_extcondition = PQfnumber(res, "extcondition");
4292 
4293  for (i = 0; i < ntups; i++)
4294  {
4295  extinfo[i].dobj.objType = DO_EXTENSION;
4296  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4297  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4298  AssignDumpId(&extinfo[i].dobj);
4299  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4300  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4301  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4302  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4303  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4304  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4305 
4306  /* Decide whether we want to dump it */
4307  selectDumpableExtension(&(extinfo[i]), dopt);
4308  }
4309 
4310  PQclear(res);
4311  destroyPQExpBuffer(query);
4312 
4313  *numExtensions = ntups;
4314 
4315  return extinfo;
4316 }
4317 
4318 /*
4319  * getTypes:
4320  * read all types in the system catalogs and return them in the
4321  * TypeInfo* structure
4322  *
4323  * numTypes is set to the number of types read in
4324  *
4325  * NB: this must run after getFuncs() because we assume we can do
4326  * findFuncByOid().
4327  */
4328 TypeInfo *
4330 {
4331  DumpOptions *dopt = fout->dopt;
4332  PGresult *res;
4333  int ntups;
4334  int i;
4335  PQExpBuffer query = createPQExpBuffer();
4336  TypeInfo *tyinfo;
4337  ShellTypeInfo *stinfo;
4338  int i_tableoid;
4339  int i_oid;
4340  int i_typname;
4341  int i_typnamespace;
4342  int i_typacl;
4343  int i_rtypacl;
4344  int i_inittypacl;
4345  int i_initrtypacl;
4346  int i_rolname;
4347  int i_typelem;
4348  int i_typrelid;
4349  int i_typrelkind;
4350  int i_typtype;
4351  int i_typisdefined;
4352  int i_isarray;
4353 
4354  /*
4355  * we include even the built-in types because those may be used as array
4356  * elements by user-defined types
4357  *
4358  * we filter out the built-in types when we dump out the types
4359  *
4360  * same approach for undefined (shell) types and array types
4361  *
4362  * Note: as of 8.3 we can reliably detect whether a type is an
4363  * auto-generated array type by checking the element type's typarray.
4364  * (Before that the test is capable of generating false positives.) We
4365  * still check for name beginning with '_', though, so as to avoid the
4366  * cost of the subselect probe for all standard types. This would have to
4367  * be revisited if the backend ever allows renaming of array types.
4368  */
4369 
4370  /* Make sure we are in proper schema */
4371  selectSourceSchema(fout, "pg_catalog");
4372 
4373  if (fout->remoteVersion >= 90600)
4374  {
4375  PQExpBuffer acl_subquery = createPQExpBuffer();
4376  PQExpBuffer racl_subquery = createPQExpBuffer();
4377  PQExpBuffer initacl_subquery = createPQExpBuffer();
4378  PQExpBuffer initracl_subquery = createPQExpBuffer();
4379 
4380  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
4381  initracl_subquery, "t.typacl", "t.typowner", "'T'",
4382  dopt->binary_upgrade);
4383 
4384  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
4385  "t.typnamespace, "
4386  "%s AS typacl, "
4387  "%s AS rtypacl, "
4388  "%s AS inittypacl, "
4389  "%s AS initrtypacl, "
4390  "(%s t.typowner) AS rolname, "
4391  "t.typelem, t.typrelid, "
4392  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
4393  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
4394  "t.typtype, t.typisdefined, "
4395  "t.typname[0] = '_' AND t.typelem != 0 AND "
4396  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
4397  "FROM pg_type t "
4398  "LEFT JOIN pg_init_privs pip ON "
4399  "(t.oid = pip.objoid "
4400  "AND pip.classoid = 'pg_type'::regclass "
4401  "AND pip.objsubid = 0) ",
4402  acl_subquery->data,
4403  racl_subquery->data,
4404  initacl_subquery->data,
4405  initracl_subquery->data,
4407 
4408  destroyPQExpBuffer(acl_subquery);
4409  destroyPQExpBuffer(racl_subquery);
4410  destroyPQExpBuffer(initacl_subquery);
4411  destroyPQExpBuffer(initracl_subquery);
4412  }
4413  else if (fout->remoteVersion >= 90200)
4414  {
4415  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4416  "typnamespace, typacl, NULL as rtypacl, "
4417  "NULL AS inittypacl, NULL AS initrtypacl, "
4418  "(%s typowner) AS rolname, "
4419  "typelem, typrelid, "
4420  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4421  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4422  "typtype, typisdefined, "
4423  "typname[0] = '_' AND typelem != 0 AND "
4424  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4425  "FROM pg_type",
4427  }
4428  else if (fout->remoteVersion >= 80300)
4429  {
4430  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4431  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4432  "NULL AS inittypacl, NULL AS initrtypacl, "
4433  "(%s typowner) AS rolname, "
4434  "typelem, typrelid, "
4435  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4436  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4437  "typtype, typisdefined, "
4438  "typname[0] = '_' AND typelem != 0 AND "
4439  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4440  "FROM pg_type",
4442  }
4443  else
4444  {
4445  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4446  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4447  "NULL AS inittypacl, NULL AS initrtypacl, "
4448  "(%s typowner) AS rolname, "
4449  "typelem, typrelid, "
4450  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4451  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4452  "typtype, typisdefined, "
4453  "typname[0] = '_' AND typelem != 0 AS isarray "
4454  "FROM pg_type",
4456  }
4457 
4458  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4459 
4460  ntups = PQntuples(res);
4461 
4462  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
4463 
4464  i_tableoid = PQfnumber(res, "tableoid");
4465  i_oid = PQfnumber(res, "oid");
4466  i_typname = PQfnumber(res, "typname");
4467  i_typnamespace = PQfnumber(res, "typnamespace");
4468  i_typacl = PQfnumber(res, "typacl");
4469  i_rtypacl = PQfnumber(res, "rtypacl");
4470  i_inittypacl = PQfnumber(res, "inittypacl");
4471  i_initrtypacl = PQfnumber(res, "initrtypacl");
4472  i_rolname = PQfnumber(res, "rolname");
4473  i_typelem = PQfnumber(res, "typelem");
4474  i_typrelid = PQfnumber(res, "typrelid");
4475  i_typrelkind = PQfnumber(res, "typrelkind");
4476  i_typtype = PQfnumber(res, "typtype");
4477  i_typisdefined = PQfnumber(res, "typisdefined");
4478  i_isarray = PQfnumber(res, "isarray");
4479 
4480  for (i = 0; i < ntups; i++)
4481  {
4482  tyinfo[i].dobj.objType = DO_TYPE;
4483  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4484  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4485  AssignDumpId(&tyinfo[i].dobj);
4486  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
4487  tyinfo[i].dobj.namespace =
4488  findNamespace(fout,
4489  atooid(PQgetvalue(res, i, i_typnamespace)));
4490  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4491  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
4492  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
4493  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
4494  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
4495  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
4496  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
4497  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
4498  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
4499  tyinfo[i].shellType = NULL;
4500 
4501  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
4502  tyinfo[i].isDefined = true;
4503  else
4504  tyinfo[i].isDefined = false;
4505 
4506  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
4507  tyinfo[i].isArray = true;
4508  else
4509  tyinfo[i].isArray = false;
4510 
4511  /* Decide whether we want to dump it */
4512  selectDumpableType(&tyinfo[i], fout);
4513 
4514  /* Do not try to dump ACL if no ACL exists. */
4515  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
4516  PQgetisnull(res, i, i_inittypacl) &&
4517  PQgetisnull(res, i, i_initrtypacl))
4518  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4519 
4520  /*
4521  * If it's a domain, fetch info about its constraints, if any
4522  */
4523  tyinfo[i].nDomChecks = 0;
4524  tyinfo[i].domChecks = NULL;
4525  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4526  tyinfo[i].typtype == TYPTYPE_DOMAIN)
4527  getDomainConstraints(fout, &(tyinfo[i]));
4528 
4529  /*
4530  * If it's a base type, make a DumpableObject representing a shell
4531  * definition of the type. We will need to dump that ahead of the I/O
4532  * functions for the type. Similarly, range types need a shell
4533  * definition in case they have a canonicalize function.
4534  *
4535  * Note: the shell type doesn't have a catId. You might think it
4536  * should copy the base type's catId, but then it might capture the
4537  * pg_depend entries for the type, which we don't want.
4538  */
4539  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4540  (tyinfo[i].typtype == TYPTYPE_BASE ||
4541  tyinfo[i].typtype == TYPTYPE_RANGE))
4542  {
4543  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
4544  stinfo->dobj.objType = DO_SHELL_TYPE;
4545  stinfo->dobj.catId = nilCatalogId;
4546  AssignDumpId(&stinfo->dobj);
4547  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
4548  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
4549  stinfo->baseType = &(tyinfo[i]);
4550  tyinfo[i].shellType = stinfo;
4551 
4552  /*
4553  * Initially mark the shell type as not to be dumped. We'll only
4554  * dump it if the I/O or canonicalize functions need to be dumped;
4555  * this is taken care of while sorting dependencies.
4556  */
4557  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
4558  }
4559 
4560  if (strlen(tyinfo[i].rolname) == 0)
4561  write_msg(NULL, "WARNING: owner of data type \"%s\" appears to be invalid\n",
4562  tyinfo[i].dobj.name);
4563  }
4564 
4565  *numTypes = ntups;
4566 
4567  PQclear(res);
4568 
4569  destroyPQExpBuffer(query);
4570 
4571  return tyinfo;
4572 }
4573 
4574 /*
4575  * getOperators:
4576  * read all operators in the system catalogs and return them in the
4577  * OprInfo* structure
4578  *
4579  * numOprs is set to the number of operators read in
4580  */
4581 OprInfo *
4582 getOperators(Archive *fout, int *numOprs)
4583 {
4584  PGresult *res;
4585  int ntups;
4586  int i;
4587  PQExpBuffer query = createPQExpBuffer();
4588  OprInfo *oprinfo;
4589  int i_tableoid;
4590  int i_oid;
4591  int i_oprname;
4592  int i_oprnamespace;
4593  int i_rolname;
4594  int i_oprkind;
4595  int i_oprcode;
4596 
4597  /*
4598  * find all operators, including builtin operators; we filter out
4599  * system-defined operators at dump-out time.
4600  */
4601 
4602  /* Make sure we are in proper schema */
4603  selectSourceSchema(fout, "pg_catalog");
4604 
4605  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
4606  "oprnamespace, "
4607  "(%s oprowner) AS rolname, "
4608  "oprkind, "
4609  "oprcode::oid AS oprcode "
4610  "FROM pg_operator",
4612 
4613  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4614 
4615  ntups = PQntuples(res);
4616  *numOprs = ntups;
4617 
4618  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
4619 
4620  i_tableoid = PQfnumber(res, "tableoid");
4621  i_oid = PQfnumber(res, "oid");
4622  i_oprname = PQfnumber(res, "oprname");
4623  i_oprnamespace = PQfnumber(res, "oprnamespace");
4624  i_rolname = PQfnumber(res, "rolname");
4625  i_oprkind = PQfnumber(res, "oprkind");
4626  i_oprcode = PQfnumber(res, "oprcode");
4627 
4628  for (i = 0; i < ntups; i++)
4629  {
4630  oprinfo[i].dobj.objType = DO_OPERATOR;
4631  oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4632  oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4633  AssignDumpId(&oprinfo[i].dobj);
4634  oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
4635  oprinfo[i].dobj.namespace =
4636  findNamespace(fout,
4637  atooid(PQgetvalue(res, i, i_oprnamespace)));
4638  oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4639  oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
4640  oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
4641 
4642  /* Decide whether we want to dump it */
4643  selectDumpableObject(&(oprinfo[i].dobj), fout);
4644 
4645  /* Operators do not currently have ACLs. */
4646  oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4647 
4648  if (strlen(oprinfo[i].rolname) == 0)
4649  write_msg(NULL, "WARNING: owner of operator \"%s\" appears to be invalid\n",
4650  oprinfo[i].dobj.name);
4651  }
4652 
4653  PQclear(res);
4654 
4655  destroyPQExpBuffer(query);
4656 
4657  return oprinfo;
4658 }
4659 
4660 /*
4661  * getCollations:
4662  * read all collations in the system catalogs and return them in the
4663  * CollInfo* structure
4664  *
4665  * numCollations is set to the number of collations read in
4666  */
4667 CollInfo *
4669 {
4670  PGresult *res;
4671  int ntups;
4672  int i;
4673  PQExpBuffer query;
4674  CollInfo *collinfo;
4675  int i_tableoid;
4676  int i_oid;
4677  int i_collname;
4678  int i_collnamespace;
4679  int i_rolname;
4680 
4681  /* Collations didn't exist pre-9.1 */
4682  if (fout->remoteVersion < 90100)
4683  {
4684  *numCollations = 0;
4685  return NULL;
4686  }
4687 
4688  query = createPQExpBuffer();
4689 
4690  /*
4691  * find all collations, including builtin collations; we filter out
4692  * system-defined collations at dump-out time.
4693  */
4694 
4695  /* Make sure we are in proper schema */
4696  selectSourceSchema(fout, "pg_catalog");
4697 
4698  appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
4699  "collnamespace, "
4700  "(%s collowner) AS rolname "
4701  "FROM pg_collation",
4703 
4704  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4705 
4706  ntups = PQntuples(res);
4707  *numCollations = ntups;
4708 
4709  collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
4710 
4711  i_tableoid = PQfnumber(res, "tableoid");
4712  i_oid = PQfnumber(res, "oid");
4713  i_collname = PQfnumber(res, "collname");
4714  i_collnamespace = PQfnumber(res, "collnamespace");
4715  i_rolname = PQfnumber(res, "rolname");
4716 
4717  for (i = 0; i < ntups; i++)
4718  {
4719  collinfo[i].dobj.objType = DO_COLLATION;
4720  collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4721  collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4722  AssignDumpId(&collinfo[i].dobj);
4723  collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
4724  collinfo[i].dobj.namespace =
4725  findNamespace(fout,
4726  atooid(PQgetvalue(res, i, i_collnamespace)));
4727  collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4728 
4729  /* Decide whether we want to dump it */
4730  selectDumpableObject(&(collinfo[i].dobj), fout);
4731 
4732  /* Collations do not currently have ACLs. */
4733  collinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4734  }
4735 
4736  PQclear(res);
4737 
4738  destroyPQExpBuffer(query);
4739 
4740  return collinfo;
4741 }
4742 
4743 /*
4744  * getConversions:
4745  * read all conversions in the system catalogs and return them in the
4746  * ConvInfo* structure
4747  *
4748  * numConversions is set to the number of conversions read in
4749  */
4750 ConvInfo *
4751 getConversions(Archive *fout, int *numConversions)
4752 {
4753  PGresult *res;
4754  int ntups;
4755  int i;
4756  PQExpBuffer query;
4757  ConvInfo *convinfo;
4758  int i_tableoid;
4759  int i_oid;
4760  int i_conname;
4761  int i_connamespace;
4762  int i_rolname;
4763 
4764  query = createPQExpBuffer();
4765 
4766  /*
4767  * find all conversions, including builtin conversions; we filter out
4768  * system-defined conversions at dump-out time.
4769  */
4770 
4771  /* Make sure we are in proper schema */
4772  selectSourceSchema(fout, "pg_catalog");
4773 
4774  appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
4775  "connamespace, "
4776  "(%s conowner) AS rolname "
4777  "FROM pg_conversion",
4779 
4780  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4781 
4782  ntups = PQntuples(res);
4783  *numConversions = ntups;
4784 
4785  convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
4786 
4787  i_tableoid = PQfnumber(res, "tableoid");
4788  i_oid = PQfnumber(res, "oid");
4789  i_conname = PQfnumber(res, "conname");
4790  i_connamespace = PQfnumber(res, "connamespace");
4791  i_rolname = PQfnumber(res, "rolname");
4792 
4793  for (i = 0; i < ntups; i++)
4794  {
4795  convinfo[i].dobj.objType = DO_CONVERSION;
4796  convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4797  convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4798  AssignDumpId(&convinfo[i].dobj);
4799  convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
4800  convinfo[i].dobj.namespace =
4801  findNamespace(fout,
4802  atooid(PQgetvalue(res, i, i_connamespace)));
4803  convinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4804 
4805  /* Decide whether we want to dump it */
4806  selectDumpableObject(&(convinfo[i].dobj), fout);
4807 
4808  /* Conversions do not currently have ACLs. */
4809  convinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4810  }
4811 
4812  PQclear(res);
4813 
4814  destroyPQExpBuffer(query);
4815 
4816  return convinfo;
4817 }
4818 
4819 /*
4820  * getAccessMethods:
4821  * read all user-defined access methods in the system catalogs and return
4822  * them in the AccessMethodInfo* structure
4823  *
4824  * numAccessMethods is set to the number of access methods read in
4825  */
4827 getAccessMethods(Archive *fout, int *numAccessMethods)
4828 {
4829  PGresult *res;
4830  int ntups;
4831  int i;
4832  PQExpBuffer query;
4833  AccessMethodInfo *aminfo;
4834  int i_tableoid;
4835  int i_oid;
4836  int i_amname;
4837  int i_amhandler;
4838  int i_amtype;
4839 
4840  /* Before 9.6, there are no user-defined access methods */
4841  if (fout->remoteVersion < 90600)
4842  {
4843  *numAccessMethods = 0;
4844  return NULL;
4845  }
4846 
4847  query = createPQExpBuffer();
4848 
4849  /* Make sure we are in proper schema */
4850  selectSourceSchema(fout, "pg_catalog");
4851 
4852  /* Select all access methods from pg_am table */
4853  appendPQExpBuffer(query, "SELECT tableoid, oid, amname, amtype, "
4854  "amhandler::pg_catalog.regproc AS amhandler "
4855  "FROM pg_am");
4856 
4857  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4858 
4859  ntups = PQntuples(res);
4860  *numAccessMethods = ntups;
4861 
4862  aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
4863 
4864  i_tableoid = PQfnumber(res, "tableoid");
4865  i_oid = PQfnumber(res, "oid");
4866  i_amname = PQfnumber(res, "amname");
4867  i_amhandler = PQfnumber(res, "amhandler");
4868  i_amtype = PQfnumber(res, "amtype");
4869 
4870  for (i = 0; i < ntups; i++)
4871  {
4872  aminfo[i].dobj.objType = DO_ACCESS_METHOD;
4873  aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4874  aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4875  AssignDumpId(&aminfo[i].dobj);
4876  aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
4877  aminfo[i].dobj.namespace = NULL;
4878  aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
4879  aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
4880 
4881  /* Decide whether we want to dump it */
4882  selectDumpableAccessMethod(&(aminfo[i]), fout);
4883 
4884  /* Access methods do not currently have ACLs. */
4885  aminfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4886  }
4887 
4888  PQclear(res);
4889 
4890  destroyPQExpBuffer(query);
4891 
4892  return aminfo;
4893 }
4894 
4895 
4896 /*
4897  * getOpclasses:
4898  * read all opclasses in the system catalogs and return them in the
4899  * OpclassInfo* structure
4900  *
4901  * numOpclasses is set to the number of opclasses read in
4902  */
4903 OpclassInfo *
4904 getOpclasses(Archive *fout, int *numOpclasses)
4905 {
4906  PGresult *res;
4907  int ntups;
4908  int i;
4909  PQExpBuffer query = createPQExpBuffer();
4910  OpclassInfo *opcinfo;
4911  int i_tableoid;
4912  int i_oid;
4913  int i_opcname;
4914  int i_opcnamespace;
4915  int i_rolname;
4916 
4917  /*
4918  * find all opclasses, including builtin opclasses; we filter out
4919  * system-defined opclasses at dump-out time.
4920  */
4921 
4922  /* Make sure we are in proper schema */
4923  selectSourceSchema(fout, "pg_catalog");
4924 
4925  appendPQExpBuffer(query, "SELECT tableoid, oid, opcname, "
4926  "opcnamespace, "
4927  "(%s opcowner) AS rolname "
4928  "FROM pg_opclass",
4930 
4931  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4932 
4933  ntups = PQntuples(res);
4934  *numOpclasses = ntups;
4935 
4936  opcinfo = (OpclassInfo *) pg_malloc(ntups * sizeof(OpclassInfo));
4937 
4938  i_tableoid = PQfnumber(res, "tableoid");
4939  i_oid = PQfnumber(res, "oid");
4940  i_opcname = PQfnumber(res, "opcname");
4941  i_opcnamespace = PQfnumber(res, "opcnamespace");
4942  i_rolname = PQfnumber(res, "rolname");
4943 
4944  for (i = 0; i < ntups; i++)
4945  {
4946  opcinfo[i].dobj.objType = DO_OPCLASS;
4947  opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4948  opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4949  AssignDumpId(&opcinfo[i].dobj);
4950  opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
4951  opcinfo[i].dobj.namespace =
4952  findNamespace(fout,
4953  atooid(PQgetvalue(res, i, i_opcnamespace)));
4954  opcinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4955 
4956  /* Decide whether we want to dump it */
4957  selectDumpableObject(&(opcinfo[i].dobj), fout);
4958 
4959  /* Op Classes do not currently have ACLs. */
4960  opcinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4961 
4962  if (strlen(opcinfo[i].rolname) == 0)
4963  write_msg(NULL, "WARNING: owner of operator class \"%s\" appears to be invalid\n",
4964  opcinfo[i].dobj.name);
4965  }
4966 
4967  PQclear(res);
4968 
4969  destroyPQExpBuffer(query);
4970 
4971  return opcinfo;
4972 }
4973 
4974 /*
4975  * getOpfamilies:
4976  * read all opfamilies in the system catalogs and return them in the
4977  * OpfamilyInfo* structure
4978  *
4979  * numOpfamilies is set to the number of opfamilies read in
4980  */
4981 OpfamilyInfo *
4982 getOpfamilies(Archive *fout, int *numOpfamilies)
4983 {
4984  PGresult *res;
4985  int ntups;
4986  int i;
4987  PQExpBuffer query;
4988  OpfamilyInfo *opfinfo;
4989  int i_tableoid;
4990  int i_oid;
4991  int i_opfname;
4992  int i_opfnamespace;
4993  int i_rolname;
4994 
4995  /* Before 8.3, there is no separate concept of opfamilies */
4996  if (fout->remoteVersion < 80300)
4997  {
4998  *numOpfamilies = 0;
4999  return NULL;
5000  }
5001 
5002  query = createPQExpBuffer();
5003 
5004  /*
5005  * find all opfamilies, including builtin opfamilies; we filter out
5006  * system-defined opfamilies at dump-out time.
5007  */
5008 
5009  /* Make sure we are in proper schema */
5010  selectSourceSchema(fout, "pg_catalog");
5011 
5012  appendPQExpBuffer(query, "SELECT tableoid, oid, opfname, "
5013  "opfnamespace, "
5014  "(%s opfowner) AS rolname "
5015  "FROM pg_opfamily",
5017 
5018  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5019 
5020  ntups = PQntuples(res);
5021  *numOpfamilies = ntups;
5022 
5023  opfinfo = (OpfamilyInfo *) pg_malloc(ntups * sizeof(OpfamilyInfo));
5024 
5025  i_tableoid = PQfnumber(res, "tableoid");
5026  i_oid = PQfnumber(res, "oid");
5027  i_opfname = PQfnumber(res, "opfname");
5028  i_opfnamespace = PQfnumber(res, "opfnamespace");
5029  i_rolname = PQfnumber(res, "rolname");
5030 
5031  for (i = 0; i < ntups; i++)
5032  {
5033  opfinfo[i].dobj.objType = DO_OPFAMILY;
5034  opfinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5035  opfinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5036  AssignDumpId(&opfinfo[i].dobj);
5037  opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
5038  opfinfo[i].dobj.namespace =
5039  findNamespace(fout,
5040  atooid(PQgetvalue(res, i, i_opfnamespace)));
5041  opfinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5042 
5043  /* Decide whether we want to dump it */
5044  selectDumpableObject(&(opfinfo[i].dobj), fout);
5045 
5046  /* Extensions do not currently have ACLs. */
5047  opfinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5048 
5049  if (strlen(opfinfo[i].rolname) == 0)
5050  write_msg(NULL, "WARNING: owner of operator family \"%s\" appears to be invalid\n",
5051  opfinfo[i].dobj.name);
5052  }
5053 
5054  PQclear(res);
5055 
5056  destroyPQExpBuffer(query);
5057 
5058  return opfinfo;
5059 }
5060 
5061 /*
5062  * getAggregates:
5063  * read all the user-defined aggregates in the system catalogs and
5064  * return them in the AggInfo* structure
5065  *
5066  * numAggs is set to the number of aggregates read in
5067  */
5068 AggInfo *
5069 getAggregates(Archive *fout, int *numAggs)
5070 {
5071  DumpOptions *dopt = fout->dopt;
5072  PGresult *res;
5073  int ntups;
5074  int i;
5075  PQExpBuffer query = createPQExpBuffer();
5076  AggInfo *agginfo;
5077  int i_tableoid;
5078  int i_oid;
5079  int i_aggname;
5080  int i_aggnamespace;
5081  int i_pronargs;
5082  int i_proargtypes;
5083  int i_rolname;
5084  int i_aggacl;
5085  int i_raggacl;
5086  int i_initaggacl;
5087  int i_initraggacl;
5088 
5089  /* Make sure we are in proper schema */
5090  selectSourceSchema(fout, "pg_catalog");
5091 
5092  /*
5093  * Find all interesting aggregates. See comment in getFuncs() for the
5094  * rationale behind the filtering logic.
5095  */
5096  if (fout->remoteVersion >= 90600)
5097  {
5098  PQExpBuffer acl_subquery = createPQExpBuffer();
5099  PQExpBuffer racl_subquery = createPQExpBuffer();
5100  PQExpBuffer initacl_subquery = createPQExpBuffer();
5101  PQExpBuffer initracl_subquery = createPQExpBuffer();
5102 
5103  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
5104  initracl_subquery, "p.proacl", "p.proowner", "'f'",
5105  dopt->binary_upgrade);
5106 
5107  appendPQExpBuffer(query, "SELECT p.tableoid, p.oid, "
5108  "p.proname AS aggname, "
5109  "p.pronamespace AS aggnamespace, "
5110  "p.pronargs, p.proargtypes, "
5111  "(%s p.proowner) AS rolname, "
5112  "%s AS aggacl, "
5113  "%s AS raggacl, "
5114  "%s AS initaggacl, "
5115  "%s AS initraggacl "
5116  "FROM pg_proc p "
5117  "LEFT JOIN pg_init_privs pip ON "
5118  "(p.oid = pip.objoid "
5119  "AND pip.classoid = 'pg_proc'::regclass "
5120  "AND pip.objsubid = 0) "
5121  "WHERE p.proisagg AND ("
5122  "p.pronamespace != "
5123  "(SELECT oid FROM pg_namespace "
5124  "WHERE nspname = 'pg_catalog') OR "
5125  "p.proacl IS DISTINCT FROM pip.initprivs",
5127  acl_subquery->data,
5128  racl_subquery->data,
5129  initacl_subquery->data,
5130  initracl_subquery->data);
5131  if (dopt->binary_upgrade)
5132  appendPQExpBufferStr(query,
5133  " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
5134  "classid = 'pg_proc'::regclass AND "
5135  "objid = p.oid AND "
5136  "refclassid = 'pg_extension'::regclass AND "
5137  "deptype = 'e')");
5138  appendPQExpBufferChar(query, ')');
5139 
5140  destroyPQExpBuffer(acl_subquery);
5141  destroyPQExpBuffer(racl_subquery);
5142  destroyPQExpBuffer(initacl_subquery);
5143  destroyPQExpBuffer(initracl_subquery);
5144  }
5145