PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #ifdef HAVE_TERMIOS_H
37 #include <termios.h>
38 #endif
39 
40 #include "getopt_long.h"
41 
42 #include "access/attnum.h"
43 #include "access/sysattr.h"
44 #include "access/transam.h"
45 #include "catalog/pg_am.h"
46 #include "catalog/pg_attribute.h"
47 #include "catalog/pg_cast.h"
48 #include "catalog/pg_class.h"
49 #include "catalog/pg_default_acl.h"
50 #include "catalog/pg_largeobject.h"
52 #include "catalog/pg_proc.h"
53 #include "catalog/pg_trigger.h"
54 #include "catalog/pg_type.h"
55 #include "libpq/libpq-fs.h"
56 
57 #include "dumputils.h"
58 #include "parallel.h"
59 #include "pg_backup_db.h"
60 #include "pg_backup_utils.h"
61 #include "pg_dump.h"
62 #include "fe_utils/string_utils.h"
63 
64 
65 typedef struct
66 {
67  const char *descr; /* comment for an object */
68  Oid classoid; /* object class (catalog OID) */
69  Oid objoid; /* object OID */
70  int objsubid; /* subobject (table column #) */
71 } CommentItem;
72 
73 typedef struct
74 {
75  const char *provider; /* label provider of this security label */
76  const char *label; /* security label for an object */
77  Oid classoid; /* object class (catalog OID) */
78  Oid objoid; /* object OID */
79  int objsubid; /* subobject (table column #) */
80 } SecLabelItem;
81 
82 typedef enum OidOptions
83 {
85  zeroAsAny = 2,
88 } OidOptions;
89 
90 /* global decls */
91 bool g_verbose; /* User wants verbose narration of our
92  * activities. */
93 static bool dosync = true; /* Issue fsync() to make dump durable
94  * on disk. */
95 
96 /* subquery used to convert user ID (eg, datdba) to user name */
97 static const char *username_subquery;
98 
99 /*
100  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
101  * FirstNormalObjectId - 1.
102  */
103 static Oid g_last_builtin_oid; /* value of the last builtin oid */
104 
105 /* The specified names/patterns should to match at least one entity */
106 static int strict_names = 0;
107 
108 /*
109  * Object inclusion/exclusion lists
110  *
111  * The string lists record the patterns given by command-line switches,
112  * which we then convert to lists of OIDs of matching objects.
113  */
118 
125 
126 
127 char g_opaque_type[10]; /* name for the opaque type */
128 
129 /* placeholders for the delimiters for comments */
131 char g_comment_end[10];
132 
133 static const CatalogId nilCatalogId = {0, 0};
134 
135 static void help(const char *progname);
136 static void setup_connection(Archive *AH,
137  const char *dumpencoding, const char *dumpsnapshot,
138  char *use_role);
139 static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
140 static void expand_schema_name_patterns(Archive *fout,
141  SimpleStringList *patterns,
142  SimpleOidList *oids,
143  bool strict_names);
144 static void expand_table_name_patterns(Archive *fout,
145  SimpleStringList *patterns,
146  SimpleOidList *oids,
147  bool strict_names);
148 static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
149 static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
150 static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
151 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
152 static void dumpComment(Archive *fout, const char *target,
153  const char *namespace, const char *owner,
154  CatalogId catalogId, int subid, DumpId dumpId);
155 static int findComments(Archive *fout, Oid classoid, Oid objoid,
156  CommentItem **items);
157 static int collectComments(Archive *fout, CommentItem **items);
158 static void dumpSecLabel(Archive *fout, const char *target,
159  const char *namespace, const char *owner,
160  CatalogId catalogId, int subid, DumpId dumpId);
161 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
162  SecLabelItem **items);
163 static int collectSecLabels(Archive *fout, SecLabelItem **items);
164 static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
165 static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
166 static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
167 static void dumpType(Archive *fout, TypeInfo *tyinfo);
168 static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
169 static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
170 static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
171 static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
172 static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
173 static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
174 static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
175 static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
176 static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
177 static void dumpFunc(Archive *fout, FuncInfo *finfo);
178 static void dumpCast(Archive *fout, CastInfo *cast);
179 static void dumpTransform(Archive *fout, TransformInfo *transform);
180 static void dumpOpr(Archive *fout, OprInfo *oprinfo);
181 static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
182 static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
183 static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
184 static void dumpCollation(Archive *fout, CollInfo *collinfo);
185 static void dumpConversion(Archive *fout, ConvInfo *convinfo);
186 static void dumpRule(Archive *fout, RuleInfo *rinfo);
187 static void dumpAgg(Archive *fout, AggInfo *agginfo);
188 static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
189 static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
190 static void dumpTable(Archive *fout, TableInfo *tbinfo);
191 static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
192 static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
193 static void dumpSequence(Archive *fout, TableInfo *tbinfo);
194 static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
195 static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
196 static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
197 static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
198 static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
199 static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
200 static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
201 static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
202 static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
203 static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
204 static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
205 static void dumpUserMappings(Archive *fout,
206  const char *servername, const char *namespace,
207  const char *owner, CatalogId catalogId, DumpId dumpId);
208 static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
209 
210 static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
211  const char *type, const char *name, const char *subname,
212  const char *tag, const char *nspname, const char *owner,
213  const char *acls, const char *racls,
214  const char *initacls, const char *initracls);
215 
216 static void getDependencies(Archive *fout);
217 static void BuildArchiveDependencies(Archive *fout);
219  DumpId **dependencies, int *nDeps, int *allocDeps);
220 
222 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
223  DumpableObject *boundaryObjs);
224 
225 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
226 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind);
227 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids);
228 static void buildMatViewRefreshDependencies(Archive *fout);
229 static void getTableDataFKConstraints(void);
230 static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
231  bool is_agg);
232 static char *format_function_arguments_old(Archive *fout,
233  FuncInfo *finfo, int nallargs,
234  char **allargtypes,
235  char **argmodes,
236  char **argnames);
237 static char *format_function_signature(Archive *fout,
238  FuncInfo *finfo, bool honor_quotes);
239 static char *convertRegProcReference(Archive *fout,
240  const char *proc);
241 static char *convertOperatorReference(Archive *fout, const char *opr);
242 static char *convertTSFunction(Archive *fout, Oid funcOid);
243 static Oid findLastBuiltinOid_V71(Archive *fout, const char *);
244 static void selectSourceSchema(Archive *fout, const char *schemaName);
245 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
246 static void getBlobs(Archive *fout);
247 static void dumpBlob(Archive *fout, BlobInfo *binfo);
248 static int dumpBlobs(Archive *fout, void *arg);
249 static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
250 static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
251 static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
252 static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
253 static void dumpDatabase(Archive *AH);
254 static void dumpEncoding(Archive *AH);
255 static void dumpStdStrings(Archive *AH);
257  PQExpBuffer upgrade_buffer, Oid pg_type_oid);
259  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
260 static void binary_upgrade_set_pg_class_oids(Archive *fout,
261  PQExpBuffer upgrade_buffer,
262  Oid pg_class_oid, bool is_index);
263 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
264  DumpableObject *dobj,
265  const char *objlabel);
266 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
267 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
268 static bool nonemptyReloptions(const char *reloptions);
269 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
270  const char *prefix, Archive *fout);
271 static char *get_synchronized_snapshot(Archive *fout);
272 static void setupDumpWorker(Archive *AHX);
273 
274 
275 int
276 main(int argc, char **argv)
277 {
278  int c;
279  const char *filename = NULL;
280  const char *format = "p";
281  TableInfo *tblinfo;
282  int numTables;
283  DumpableObject **dobjs;
284  int numObjs;
285  DumpableObject *boundaryObjs;
286  int i;
287  int optindex;
288  RestoreOptions *ropt;
289  Archive *fout; /* the script file */
290  const char *dumpencoding = NULL;
291  const char *dumpsnapshot = NULL;
292  char *use_role = NULL;
293  int numWorkers = 1;
294  trivalue prompt_password = TRI_DEFAULT;
295  int compressLevel = -1;
296  int plainText = 0;
297  ArchiveFormat archiveFormat = archUnknown;
298  ArchiveMode archiveMode;
299 
300  static DumpOptions dopt;
301 
302  static struct option long_options[] = {
303  {"data-only", no_argument, NULL, 'a'},
304  {"blobs", no_argument, NULL, 'b'},
305  {"no-blobs", no_argument, NULL, 'B'},
306  {"clean", no_argument, NULL, 'c'},
307  {"create", no_argument, NULL, 'C'},
308  {"dbname", required_argument, NULL, 'd'},
309  {"file", required_argument, NULL, 'f'},
310  {"format", required_argument, NULL, 'F'},
311  {"host", required_argument, NULL, 'h'},
312  {"jobs", 1, NULL, 'j'},
313  {"no-reconnect", no_argument, NULL, 'R'},
314  {"oids", no_argument, NULL, 'o'},
315  {"no-owner", no_argument, NULL, 'O'},
316  {"port", required_argument, NULL, 'p'},
317  {"schema", required_argument, NULL, 'n'},
318  {"exclude-schema", required_argument, NULL, 'N'},
319  {"schema-only", no_argument, NULL, 's'},
320  {"superuser", required_argument, NULL, 'S'},
321  {"table", required_argument, NULL, 't'},
322  {"exclude-table", required_argument, NULL, 'T'},
323  {"no-password", no_argument, NULL, 'w'},
324  {"password", no_argument, NULL, 'W'},
325  {"username", required_argument, NULL, 'U'},
326  {"verbose", no_argument, NULL, 'v'},
327  {"no-privileges", no_argument, NULL, 'x'},
328  {"no-acl", no_argument, NULL, 'x'},
329  {"compress", required_argument, NULL, 'Z'},
330  {"encoding", required_argument, NULL, 'E'},
331  {"help", no_argument, NULL, '?'},
332  {"version", no_argument, NULL, 'V'},
333 
334  /*
335  * the following options don't have an equivalent short option letter
336  */
337  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
338  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
339  {"column-inserts", no_argument, &dopt.column_inserts, 1},
340  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
341  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
342  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
343  {"exclude-table-data", required_argument, NULL, 4},
344  {"if-exists", no_argument, &dopt.if_exists, 1},
345  {"inserts", no_argument, &dopt.dump_inserts, 1},
346  {"lock-wait-timeout", required_argument, NULL, 2},
347  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
348  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
349  {"role", required_argument, NULL, 3},
350  {"section", required_argument, NULL, 5},
351  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
352  {"snapshot", required_argument, NULL, 6},
353  {"strict-names", no_argument, &strict_names, 1},
354  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
355  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
356  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
357  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
358  {"no-sync", no_argument, NULL, 7},
359 
360  {NULL, 0, NULL, 0}
361  };
362 
363  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
364 
365  /*
366  * Initialize what we need for parallel execution, especially for thread
367  * support on Windows.
368  */
370 
371  g_verbose = false;
372 
373  strcpy(g_comment_start, "-- ");
374  g_comment_end[0] = '\0';
375  strcpy(g_opaque_type, "opaque");
376 
377  progname = get_progname(argv[0]);
378 
379  if (argc > 1)
380  {
381  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
382  {
383  help(progname);
384  exit_nicely(0);
385  }
386  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
387  {
388  puts("pg_dump (PostgreSQL) " PG_VERSION);
389  exit_nicely(0);
390  }
391  }
392 
393  InitDumpOptions(&dopt);
394 
395  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:oOp:RsS:t:T:U:vwWxZ:",
396  long_options, &optindex)) != -1)
397  {
398  switch (c)
399  {
400  case 'a': /* Dump data only */
401  dopt.dataOnly = true;
402  break;
403 
404  case 'b': /* Dump blobs */
405  dopt.outputBlobs = true;
406  break;
407 
408  case 'B': /* Don't dump blobs */
409  dopt.dontOutputBlobs = true;
410  break;
411 
412  case 'c': /* clean (i.e., drop) schema prior to create */
413  dopt.outputClean = 1;
414  break;
415 
416  case 'C': /* Create DB */
417  dopt.outputCreateDB = 1;
418  break;
419 
420  case 'd': /* database name */
421  dopt.dbname = pg_strdup(optarg);
422  break;
423 
424  case 'E': /* Dump encoding */
425  dumpencoding = pg_strdup(optarg);
426  break;
427 
428  case 'f':
429  filename = pg_strdup(optarg);
430  break;
431 
432  case 'F':
433  format = pg_strdup(optarg);
434  break;
435 
436  case 'h': /* server host */
437  dopt.pghost = pg_strdup(optarg);
438  break;
439 
440  case 'j': /* number of dump jobs */
441  numWorkers = atoi(optarg);
442  break;
443 
444  case 'n': /* include schema(s) */
445  simple_string_list_append(&schema_include_patterns, optarg);
446  dopt.include_everything = false;
447  break;
448 
449  case 'N': /* exclude schema(s) */
450  simple_string_list_append(&schema_exclude_patterns, optarg);
451  break;
452 
453  case 'o': /* Dump oids */
454  dopt.oids = true;
455  break;
456 
457  case 'O': /* Don't reconnect to match owner */
458  dopt.outputNoOwner = 1;
459  break;
460 
461  case 'p': /* server port */
462  dopt.pgport = pg_strdup(optarg);
463  break;
464 
465  case 'R':
466  /* no-op, still accepted for backwards compatibility */
467  break;
468 
469  case 's': /* dump schema only */
470  dopt.schemaOnly = true;
471  break;
472 
473  case 'S': /* Username for superuser in plain text output */
475  break;
476 
477  case 't': /* include table(s) */
478  simple_string_list_append(&table_include_patterns, optarg);
479  dopt.include_everything = false;
480  break;
481 
482  case 'T': /* exclude table(s) */
483  simple_string_list_append(&table_exclude_patterns, optarg);
484  break;
485 
486  case 'U':
487  dopt.username = pg_strdup(optarg);
488  break;
489 
490  case 'v': /* verbose */
491  g_verbose = true;
492  break;
493 
494  case 'w':
495  prompt_password = TRI_NO;
496  break;
497 
498  case 'W':
499  prompt_password = TRI_YES;
500  break;
501 
502  case 'x': /* skip ACL dump */
503  dopt.aclsSkip = true;
504  break;
505 
506  case 'Z': /* Compression Level */
507  compressLevel = atoi(optarg);
508  if (compressLevel < 0 || compressLevel > 9)
509  {
510  write_msg(NULL, "compression level must be in range 0..9\n");
511  exit_nicely(1);
512  }
513  break;
514 
515  case 0:
516  /* This covers the long options. */
517  break;
518 
519  case 2: /* lock-wait-timeout */
521  break;
522 
523  case 3: /* SET ROLE */
524  use_role = pg_strdup(optarg);
525  break;
526 
527  case 4: /* exclude table(s) data */
528  simple_string_list_append(&tabledata_exclude_patterns, optarg);
529  break;
530 
531  case 5: /* section */
533  break;
534 
535  case 6: /* snapshot */
536  dumpsnapshot = pg_strdup(optarg);
537  break;
538 
539  case 7: /* no-sync */
540  dosync = false;
541  break;
542 
543  default:
544  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
545  exit_nicely(1);
546  }
547  }
548 
549  /*
550  * Non-option argument specifies database name as long as it wasn't
551  * already specified with -d / --dbname
552  */
553  if (optind < argc && dopt.dbname == NULL)
554  dopt.dbname = argv[optind++];
555 
556  /* Complain if any arguments remain */
557  if (optind < argc)
558  {
559  fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
560  progname, argv[optind]);
561  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
562  progname);
563  exit_nicely(1);
564  }
565 
566  /* --column-inserts implies --inserts */
567  if (dopt.column_inserts)
568  dopt.dump_inserts = 1;
569 
570  /*
571  * Binary upgrade mode implies dumping sequence data even in schema-only
572  * mode. This is not exposed as a separate option, but kept separate
573  * internally for clarity.
574  */
575  if (dopt.binary_upgrade)
576  dopt.sequence_data = 1;
577 
578  if (dopt.dataOnly && dopt.schemaOnly)
579  {
580  write_msg(NULL, "options -s/--schema-only and -a/--data-only cannot be used together\n");
581  exit_nicely(1);
582  }
583 
584  if (dopt.dataOnly && dopt.outputClean)
585  {
586  write_msg(NULL, "options -c/--clean and -a/--data-only cannot be used together\n");
587  exit_nicely(1);
588  }
589 
590  if (dopt.dump_inserts && dopt.oids)
591  {
592  write_msg(NULL, "options --inserts/--column-inserts and -o/--oids cannot be used together\n");
593  write_msg(NULL, "(The INSERT command cannot set OIDs.)\n");
594  exit_nicely(1);
595  }
596 
597  if (dopt.if_exists && !dopt.outputClean)
598  exit_horribly(NULL, "option --if-exists requires option -c/--clean\n");
599 
600  /* Identify archive format to emit */
601  archiveFormat = parseArchiveFormat(format, &archiveMode);
602 
603  /* archiveFormat specific setup */
604  if (archiveFormat == archNull)
605  plainText = 1;
606 
607  /* Custom and directory formats are compressed by default, others not */
608  if (compressLevel == -1)
609  {
610 #ifdef HAVE_LIBZ
611  if (archiveFormat == archCustom || archiveFormat == archDirectory)
612  compressLevel = Z_DEFAULT_COMPRESSION;
613  else
614 #endif
615  compressLevel = 0;
616  }
617 
618 #ifndef HAVE_LIBZ
619  if (compressLevel != 0)
620  write_msg(NULL, "WARNING: requested compression not available in this "
621  "installation -- archive will be uncompressed\n");
622  compressLevel = 0;
623 #endif
624 
625  /*
626  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
627  * parallel jobs because that's the maximum limit for the
628  * WaitForMultipleObjects() call.
629  */
630  if (numWorkers <= 0
631 #ifdef WIN32
632  || numWorkers > MAXIMUM_WAIT_OBJECTS
633 #endif
634  )
635  exit_horribly(NULL, "invalid number of parallel jobs\n");
636 
637  /* Parallel backup only in the directory archive format so far */
638  if (archiveFormat != archDirectory && numWorkers > 1)
639  exit_horribly(NULL, "parallel backup only supported by the directory format\n");
640 
641  /* Open the output file */
642  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
643  archiveMode, setupDumpWorker);
644 
645  /* Make dump options accessible right away */
646  SetArchiveOptions(fout, &dopt, NULL);
647 
648  /* Register the cleanup hook */
649  on_exit_close_archive(fout);
650 
651  /* Let the archiver know how noisy to be */
652  fout->verbose = g_verbose;
653 
654  /*
655  * We allow the server to be back to 8.0, and up to any minor release of
656  * our own major version. (See also version check in pg_dumpall.c.)
657  */
658  fout->minRemoteVersion = 80000;
659  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
660 
661  fout->numWorkers = numWorkers;
662 
663  /*
664  * Open the database using the Archiver, so it knows about it. Errors mean
665  * death.
666  */
667  ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
668  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
669 
670  /*
671  * Disable security label support if server version < v9.1.x (prevents
672  * access to nonexistent pg_seclabel catalog)
673  */
674  if (fout->remoteVersion < 90100)
675  dopt.no_security_labels = 1;
676 
677  /*
678  * On hot standby slaves, never try to dump unlogged table data, since it
679  * will just throw an error.
680  */
681  if (fout->isStandby)
682  dopt.no_unlogged_table_data = true;
683 
684  /* Select the appropriate subquery to convert user IDs to names */
685  if (fout->remoteVersion >= 80100)
686  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
687  else
688  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
689 
690  /* check the version for the synchronized snapshots feature */
691  if (numWorkers > 1 && fout->remoteVersion < 90200
692  && !dopt.no_synchronized_snapshots)
694  "Synchronized snapshots are not supported by this server version.\n"
695  "Run with --no-synchronized-snapshots instead if you do not need\n"
696  "synchronized snapshots.\n");
697 
698  /* check the version when a snapshot is explicitly specified by user */
699  if (dumpsnapshot && fout->remoteVersion < 90200)
701  "Exported snapshots are not supported by this server version.\n");
702 
703  /*
704  * Find the last built-in OID, if needed (prior to 8.1)
705  *
706  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
707  */
708  if (fout->remoteVersion < 80100)
710  PQdb(GetConnection(fout)));
711  else
713 
714  if (g_verbose)
715  write_msg(NULL, "last built-in OID is %u\n", g_last_builtin_oid);
716 
717  /* Expand schema selection patterns into OID lists */
718  if (schema_include_patterns.head != NULL)
719  {
720  expand_schema_name_patterns(fout, &schema_include_patterns,
721  &schema_include_oids,
722  strict_names);
723  if (schema_include_oids.head == NULL)
724  exit_horribly(NULL, "no matching schemas were found\n");
725  }
726  expand_schema_name_patterns(fout, &schema_exclude_patterns,
727  &schema_exclude_oids,
728  false);
729  /* non-matching exclusion patterns aren't an error */
730 
731  /* Expand table selection patterns into OID lists */
732  if (table_include_patterns.head != NULL)
733  {
734  expand_table_name_patterns(fout, &table_include_patterns,
735  &table_include_oids,
736  strict_names);
737  if (table_include_oids.head == NULL)
738  exit_horribly(NULL, "no matching tables were found\n");
739  }
740  expand_table_name_patterns(fout, &table_exclude_patterns,
741  &table_exclude_oids,
742  false);
743 
744  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
745  &tabledata_exclude_oids,
746  false);
747 
748  /* non-matching exclusion patterns aren't an error */
749 
750  /*
751  * Dumping blobs is the default for dumps where an inclusion switch is not
752  * used (an "include everything" dump). -B can be used to exclude blobs
753  * from those dumps. -b can be used to include blobs even when an
754  * inclusion switch is used.
755  *
756  * -s means "schema only" and blobs are data, not schema, so we never
757  * include blobs when -s is used.
758  */
759  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
760  dopt.outputBlobs = true;
761 
762  /*
763  * Now scan the database and create DumpableObject structs for all the
764  * objects we intend to dump.
765  */
766  tblinfo = getSchemaData(fout, &numTables);
767 
768  if (fout->remoteVersion < 80400)
769  guessConstraintInheritance(tblinfo, numTables);
770 
771  if (!dopt.schemaOnly)
772  {
773  getTableData(&dopt, tblinfo, numTables, dopt.oids, 0);
775  if (dopt.dataOnly)
777  }
778 
779  if (dopt.schemaOnly && dopt.sequence_data)
780  getTableData(&dopt, tblinfo, numTables, dopt.oids, RELKIND_SEQUENCE);
781 
782  /*
783  * In binary-upgrade mode, we do not have to worry about the actual blob
784  * data or the associated metadata that resides in the pg_largeobject and
785  * pg_largeobject_metadata tables, respectivly.
786  *
787  * However, we do need to collect blob information as there may be
788  * comments or other information on blobs that we do need to dump out.
789  */
790  if (dopt.outputBlobs || dopt.binary_upgrade)
791  getBlobs(fout);
792 
793  /*
794  * Collect dependency data to assist in ordering the objects.
795  */
796  getDependencies(fout);
797 
798  /* Lastly, create dummy objects to represent the section boundaries */
799  boundaryObjs = createBoundaryObjects();
800 
801  /* Get pointers to all the known DumpableObjects */
802  getDumpableObjects(&dobjs, &numObjs);
803 
804  /*
805  * Add dummy dependencies to enforce the dump section ordering.
806  */
807  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
808 
809  /*
810  * Sort the objects into a safe dump order (no forward references).
811  *
812  * We rely on dependency information to help us determine a safe order, so
813  * the initial sort is mostly for cosmetic purposes: we sort by name to
814  * ensure that logically identical schemas will dump identically.
815  */
816  sortDumpableObjectsByTypeName(dobjs, numObjs);
817 
818  /* If we do a parallel dump, we want the largest tables to go first */
819  if (archiveFormat == archDirectory && numWorkers > 1)
820  sortDataAndIndexObjectsBySize(dobjs, numObjs);
821 
822  sortDumpableObjects(dobjs, numObjs,
823  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
824 
825  /*
826  * Create archive TOC entries for all the objects to be dumped, in a safe
827  * order.
828  */
829 
830  /* First the special ENCODING and STDSTRINGS entries. */
831  dumpEncoding(fout);
832  dumpStdStrings(fout);
833 
834  /* The database item is always next, unless we don't want it at all */
835  if (dopt.include_everything && !dopt.dataOnly)
836  dumpDatabase(fout);
837 
838  /* Now the rearrangeable objects. */
839  for (i = 0; i < numObjs; i++)
840  dumpDumpableObject(fout, dobjs[i]);
841 
842  /*
843  * Set up options info to ensure we dump what we want.
844  */
845  ropt = NewRestoreOptions();
846  ropt->filename = filename;
847 
848  /* if you change this list, see dumpOptionsFromRestoreOptions */
849  ropt->dropSchema = dopt.outputClean;
850  ropt->dataOnly = dopt.dataOnly;
851  ropt->schemaOnly = dopt.schemaOnly;
852  ropt->if_exists = dopt.if_exists;
853  ropt->column_inserts = dopt.column_inserts;
854  ropt->dumpSections = dopt.dumpSections;
855  ropt->aclsSkip = dopt.aclsSkip;
856  ropt->superuser = dopt.outputSuperuser;
857  ropt->createDB = dopt.outputCreateDB;
858  ropt->noOwner = dopt.outputNoOwner;
859  ropt->noTablespace = dopt.outputNoTablespaces;
860  ropt->disable_triggers = dopt.disable_triggers;
861  ropt->use_setsessauth = dopt.use_setsessauth;
863  ropt->dump_inserts = dopt.dump_inserts;
865  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
868  ropt->sequence_data = dopt.sequence_data;
869  ropt->binary_upgrade = dopt.binary_upgrade;
870 
871  if (compressLevel == -1)
872  ropt->compression = 0;
873  else
874  ropt->compression = compressLevel;
875 
876  ropt->suppressDumpWarnings = true; /* We've already shown them */
877 
878  SetArchiveOptions(fout, &dopt, ropt);
879 
880  /* Mark which entries should be output */
882 
883  /*
884  * The archive's TOC entries are now marked as to which ones will actually
885  * be output, so we can set up their dependency lists properly. This isn't
886  * necessary for plain-text output, though.
887  */
888  if (!plainText)
890 
891  /*
892  * And finally we can do the actual output.
893  *
894  * Note: for non-plain-text output formats, the output file is written
895  * inside CloseArchive(). This is, um, bizarre; but not worth changing
896  * right now.
897  */
898  if (plainText)
899  RestoreArchive(fout);
900 
901  CloseArchive(fout);
902 
903  exit_nicely(0);
904 }
905 
906 
907 static void
908 help(const char *progname)
909 {
910  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
911  printf(_("Usage:\n"));
912  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
913 
914  printf(_("\nGeneral options:\n"));
915  printf(_(" -f, --file=FILENAME output file or directory name\n"));
916  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
917  " plain text (default))\n"));
918  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
919  printf(_(" -v, --verbose verbose mode\n"));
920  printf(_(" -V, --version output version information, then exit\n"));
921  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
922  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
923  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
924  printf(_(" -?, --help show this help, then exit\n"));
925 
926  printf(_("\nOptions controlling the output content:\n"));
927  printf(_(" -a, --data-only dump only the data, not the schema\n"));
928  printf(_(" -b, --blobs include large objects in dump\n"));
929  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
930  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
931  printf(_(" -C, --create include commands to create database in dump\n"));
932  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
933  printf(_(" -n, --schema=SCHEMA dump the named schema(s) only\n"));
934  printf(_(" -N, --exclude-schema=SCHEMA do NOT dump the named schema(s)\n"));
935  printf(_(" -o, --oids include OIDs in dump\n"));
936  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
937  " plain-text format\n"));
938  printf(_(" -s, --schema-only dump only the schema, no data\n"));
939  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
940  printf(_(" -t, --table=TABLE dump the named table(s) only\n"));
941  printf(_(" -T, --exclude-table=TABLE do NOT dump the named table(s)\n"));
942  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
943  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
944  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
945  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
946  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
947  printf(_(" --enable-row-security enable row security (dump only content user has\n"
948  " access to)\n"));
949  printf(_(" --exclude-table-data=TABLE do NOT dump data for the named table(s)\n"));
950  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
951  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
952  printf(_(" --no-security-labels do not dump security label assignments\n"));
953  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
954  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
955  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
956  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
957  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
958  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
959  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
960  printf(_(" --strict-names require table and/or schema include patterns to\n"
961  " match at least one entity each\n"));
962  printf(_(" --use-set-session-authorization\n"
963  " use SET SESSION AUTHORIZATION commands instead of\n"
964  " ALTER OWNER commands to set ownership\n"));
965 
966  printf(_("\nConnection options:\n"));
967  printf(_(" -d, --dbname=DBNAME database to dump\n"));
968  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
969  printf(_(" -p, --port=PORT database server port number\n"));
970  printf(_(" -U, --username=NAME connect as specified database user\n"));
971  printf(_(" -w, --no-password never prompt for password\n"));
972  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
973  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
974 
975  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
976  "variable value is used.\n\n"));
977  printf(_("Report bugs to <pgsql-bugs@postgresql.org>.\n"));
978 }
979 
980 static void
981 setup_connection(Archive *AH, const char *dumpencoding,
982  const char *dumpsnapshot, char *use_role)
983 {
984  DumpOptions *dopt = AH->dopt;
985  PGconn *conn = GetConnection(AH);
986  const char *std_strings;
987 
988  /*
989  * Set the client encoding if requested.
990  */
991  if (dumpencoding)
992  {
993  if (PQsetClientEncoding(conn, dumpencoding) < 0)
994  exit_horribly(NULL, "invalid client encoding \"%s\" specified\n",
995  dumpencoding);
996  }
997 
998  /*
999  * Get the active encoding and the standard_conforming_strings setting, so
1000  * we know how to escape strings.
1001  */
1002  AH->encoding = PQclientEncoding(conn);
1003 
1004  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1005  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1006 
1007  /*
1008  * Set the role if requested. In a parallel dump worker, we'll be passed
1009  * use_role == NULL, but AH->use_role is already set (if user specified it
1010  * originally) and we should use that.
1011  */
1012  if (!use_role && AH->use_role)
1013  use_role = AH->use_role;
1014 
1015  /* Set the role if requested */
1016  if (use_role && AH->remoteVersion >= 80100)
1017  {
1018  PQExpBuffer query = createPQExpBuffer();
1019 
1020  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1021  ExecuteSqlStatement(AH, query->data);
1022  destroyPQExpBuffer(query);
1023 
1024  /* save it for possible later use by parallel workers */
1025  if (!AH->use_role)
1026  AH->use_role = pg_strdup(use_role);
1027  }
1028 
1029  /* Set the datestyle to ISO to ensure the dump's portability */
1030  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1031 
1032  /* Likewise, avoid using sql_standard intervalstyle */
1033  if (AH->remoteVersion >= 80400)
1034  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1035 
1036  /*
1037  * Set extra_float_digits so that we can dump float data exactly (given
1038  * correctly implemented float I/O code, anyway)
1039  */
1040  if (AH->remoteVersion >= 90000)
1041  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1042  else
1043  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1044 
1045  /*
1046  * If synchronized scanning is supported, disable it, to prevent
1047  * unpredictable changes in row ordering across a dump and reload.
1048  */
1049  if (AH->remoteVersion >= 80300)
1050  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1051 
1052  /*
1053  * Disable timeouts if supported.
1054  */
1055  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1056  if (AH->remoteVersion >= 90300)
1057  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1058  if (AH->remoteVersion >= 90600)
1059  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1060 
1061  /*
1062  * Quote all identifiers, if requested.
1063  */
1064  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1065  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1066 
1067  /*
1068  * Adjust row-security mode, if supported.
1069  */
1070  if (AH->remoteVersion >= 90500)
1071  {
1072  if (dopt->enable_row_security)
1073  ExecuteSqlStatement(AH, "SET row_security = on");
1074  else
1075  ExecuteSqlStatement(AH, "SET row_security = off");
1076  }
1077 
1078  /*
1079  * Start transaction-snapshot mode transaction to dump consistent data.
1080  */
1081  ExecuteSqlStatement(AH, "BEGIN");
1082  if (AH->remoteVersion >= 90100)
1083  {
1084  /*
1085  * To support the combination of serializable_deferrable with the jobs
1086  * option we use REPEATABLE READ for the worker connections that are
1087  * passed a snapshot. As long as the snapshot is acquired in a
1088  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1089  * REPEATABLE READ transaction provides the appropriate integrity
1090  * guarantees. This is a kluge, but safe for back-patching.
1091  */
1092  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1094  "SET TRANSACTION ISOLATION LEVEL "
1095  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1096  else
1098  "SET TRANSACTION ISOLATION LEVEL "
1099  "REPEATABLE READ, READ ONLY");
1100  }
1101  else
1102  {
1104  "SET TRANSACTION ISOLATION LEVEL "
1105  "SERIALIZABLE, READ ONLY");
1106  }
1107 
1108  /*
1109  * If user specified a snapshot to use, select that. In a parallel dump
1110  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1111  * is already set (if the server can handle it) and we should use that.
1112  */
1113  if (dumpsnapshot)
1114  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1115 
1116  if (AH->sync_snapshot_id)
1117  {
1118  PQExpBuffer query = createPQExpBuffer();
1119 
1120  appendPQExpBuffer(query, "SET TRANSACTION SNAPSHOT ");
1121  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1122  ExecuteSqlStatement(AH, query->data);
1123  destroyPQExpBuffer(query);
1124  }
1125  else if (AH->numWorkers > 1 &&
1126  AH->remoteVersion >= 90200 &&
1128  {
1129  if (AH->isStandby)
1131  "Synchronized snapshots are not supported on standby servers.\n"
1132  "Run with --no-synchronized-snapshots instead if you do not need\n"
1133  "synchronized snapshots.\n");
1134 
1135 
1137  }
1138 }
1139 
1140 /* Set up connection for a parallel worker process */
1141 static void
1143 {
1144  /*
1145  * We want to re-select all the same values the master connection is
1146  * using. We'll have inherited directly-usable values in
1147  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1148  * inherited encoding value back to a string to pass to setup_connection.
1149  */
1150  setup_connection(AH,
1152  NULL,
1153  NULL);
1154 }
1155 
1156 static char *
1158 {
1159  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1160  char *result;
1161  PGresult *res;
1162 
1163  res = ExecuteSqlQueryForSingleRow(fout, query);
1164  result = pg_strdup(PQgetvalue(res, 0, 0));
1165  PQclear(res);
1166 
1167  return result;
1168 }
1169 
1170 static ArchiveFormat
1172 {
1173  ArchiveFormat archiveFormat;
1174 
1175  *mode = archModeWrite;
1176 
1177  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1178  {
1179  /* This is used by pg_dumpall, and is not documented */
1180  archiveFormat = archNull;
1181  *mode = archModeAppend;
1182  }
1183  else if (pg_strcasecmp(format, "c") == 0)
1184  archiveFormat = archCustom;
1185  else if (pg_strcasecmp(format, "custom") == 0)
1186  archiveFormat = archCustom;
1187  else if (pg_strcasecmp(format, "d") == 0)
1188  archiveFormat = archDirectory;
1189  else if (pg_strcasecmp(format, "directory") == 0)
1190  archiveFormat = archDirectory;
1191  else if (pg_strcasecmp(format, "p") == 0)
1192  archiveFormat = archNull;
1193  else if (pg_strcasecmp(format, "plain") == 0)
1194  archiveFormat = archNull;
1195  else if (pg_strcasecmp(format, "t") == 0)
1196  archiveFormat = archTar;
1197  else if (pg_strcasecmp(format, "tar") == 0)
1198  archiveFormat = archTar;
1199  else
1200  exit_horribly(NULL, "invalid output format \"%s\" specified\n", format);
1201  return archiveFormat;
1202 }
1203 
1204 /*
1205  * Find the OIDs of all schemas matching the given list of patterns,
1206  * and append them to the given OID list.
1207  */
1208 static void
1210  SimpleStringList *patterns,
1211  SimpleOidList *oids,
1212  bool strict_names)
1213 {
1214  PQExpBuffer query;
1215  PGresult *res;
1216  SimpleStringListCell *cell;
1217  int i;
1218 
1219  if (patterns->head == NULL)
1220  return; /* nothing to do */
1221 
1222  query = createPQExpBuffer();
1223 
1224  /*
1225  * The loop below runs multiple SELECTs might sometimes result in
1226  * duplicate entries in the OID list, but we don't care.
1227  */
1228 
1229  for (cell = patterns->head; cell; cell = cell->next)
1230  {
1231  appendPQExpBuffer(query,
1232  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1233  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1234  false, NULL, "n.nspname", NULL, NULL);
1235 
1236  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1237  if (strict_names && PQntuples(res) == 0)
1238  exit_horribly(NULL, "no matching schemas were found for pattern \"%s\"\n", cell->val);
1239 
1240  for (i = 0; i < PQntuples(res); i++)
1241  {
1242  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1243  }
1244 
1245  PQclear(res);
1246  resetPQExpBuffer(query);
1247  }
1248 
1249  destroyPQExpBuffer(query);
1250 }
1251 
1252 /*
1253  * Find the OIDs of all tables matching the given list of patterns,
1254  * and append them to the given OID list.
1255  */
1256 static void
1258  SimpleStringList *patterns, SimpleOidList *oids,
1259  bool strict_names)
1260 {
1261  PQExpBuffer query;
1262  PGresult *res;
1263  SimpleStringListCell *cell;
1264  int i;
1265 
1266  if (patterns->head == NULL)
1267  return; /* nothing to do */
1268 
1269  query = createPQExpBuffer();
1270 
1271  /*
1272  * this might sometimes result in duplicate entries in the OID list, but
1273  * we don't care.
1274  */
1275 
1276  for (cell = patterns->head; cell; cell = cell->next)
1277  {
1278  appendPQExpBuffer(query,
1279  "SELECT c.oid"
1280  "\nFROM pg_catalog.pg_class c"
1281  "\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace"
1282  "\nWHERE c.relkind in ('%c', '%c', '%c', '%c', '%c', '%c')\n",
1286  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1287  false, "n.nspname", "c.relname", NULL,
1288  "pg_catalog.pg_table_is_visible(c.oid)");
1289 
1290  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1291  if (strict_names && PQntuples(res) == 0)
1292  exit_horribly(NULL, "no matching tables were found for pattern \"%s\"\n", cell->val);
1293 
1294  for (i = 0; i < PQntuples(res); i++)
1295  {
1296  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1297  }
1298 
1299  PQclear(res);
1300  resetPQExpBuffer(query);
1301  }
1302 
1303  destroyPQExpBuffer(query);
1304 }
1305 
1306 /*
1307  * checkExtensionMembership
1308  * Determine whether object is an extension member, and if so,
1309  * record an appropriate dependency and set the object's dump flag.
1310  *
1311  * It's important to call this for each object that could be an extension
1312  * member. Generally, we integrate this with determining the object's
1313  * to-be-dumped-ness, since extension membership overrides other rules for that.
1314  *
1315  * Returns true if object is an extension member, else false.
1316  */
1317 static bool
1319 {
1320  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1321 
1322  if (ext == NULL)
1323  return false;
1324 
1325  dobj->ext_member = true;
1326 
1327  /* Record dependency so that getDependencies needn't deal with that */
1328  addObjectDependency(dobj, ext->dobj.dumpId);
1329 
1330  /*
1331  * In 9.6 and above, mark the member object to have any non-initial ACL,
1332  * policies, and security labels dumped.
1333  *
1334  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1335  * extract the information about the object. We don't provide support for
1336  * initial policies and security labels and it seems unlikely for those to
1337  * ever exist, but we may have to revisit this later.
1338  *
1339  * Prior to 9.6, we do not include any extension member components.
1340  *
1341  * In binary upgrades, we still dump all components of the members
1342  * individually, since the idea is to exactly reproduce the database
1343  * contents rather than replace the extension contents with something
1344  * different.
1345  */
1346  if (fout->dopt->binary_upgrade)
1347  dobj->dump = ext->dobj.dump;
1348  else
1349  {
1350  if (fout->remoteVersion < 90600)
1351  dobj->dump = DUMP_COMPONENT_NONE;
1352  else
1353  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1356  }
1357 
1358  return true;
1359 }
1360 
1361 /*
1362  * selectDumpableNamespace: policy-setting subroutine
1363  * Mark a namespace as to be dumped or not
1364  */
1365 static void
1367 {
1368  /*
1369  * If specific tables are being dumped, do not dump any complete
1370  * namespaces. If specific namespaces are being dumped, dump just those
1371  * namespaces. Otherwise, dump all non-system namespaces.
1372  */
1373  if (table_include_oids.head != NULL)
1374  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1375  else if (schema_include_oids.head != NULL)
1376  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1377  simple_oid_list_member(&schema_include_oids,
1378  nsinfo->dobj.catId.oid) ?
1380  else if (fout->remoteVersion >= 90600 &&
1381  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1382  {
1383  /*
1384  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1385  * they are interesting (and not the original ACLs which were set at
1386  * initdb time, see pg_init_privs).
1387  */
1388  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1389  }
1390  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1391  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1392  {
1393  /* Other system schemas don't get dumped */
1394  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1395  }
1396  else
1397  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1398 
1399  /*
1400  * In any case, a namespace can be excluded by an exclusion switch
1401  */
1402  if (nsinfo->dobj.dump_contains &&
1403  simple_oid_list_member(&schema_exclude_oids,
1404  nsinfo->dobj.catId.oid))
1405  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1406 
1407  /*
1408  * If the schema belongs to an extension, allow extension membership to
1409  * override the dump decision for the schema itself. However, this does
1410  * not change dump_contains, so this won't change what we do with objects
1411  * within the schema. (If they belong to the extension, they'll get
1412  * suppressed by it, otherwise not.)
1413  */
1414  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1415 }
1416 
1417 /*
1418  * selectDumpableTable: policy-setting subroutine
1419  * Mark a table as to be dumped or not
1420  */
1421 static void
1423 {
1424  if (checkExtensionMembership(&tbinfo->dobj, fout))
1425  return; /* extension membership overrides all else */
1426 
1427  /*
1428  * If specific tables are being dumped, dump just those tables; else, dump
1429  * according to the parent namespace's dump flag.
1430  */
1431  if (table_include_oids.head != NULL)
1432  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1433  tbinfo->dobj.catId.oid) ?
1435  else
1436  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1437 
1438  /*
1439  * In any case, a table can be excluded by an exclusion switch
1440  */
1441  if (tbinfo->dobj.dump &&
1442  simple_oid_list_member(&table_exclude_oids,
1443  tbinfo->dobj.catId.oid))
1444  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1445 }
1446 
1447 /*
1448  * selectDumpableType: policy-setting subroutine
1449  * Mark a type as to be dumped or not
1450  *
1451  * If it's a table's rowtype or an autogenerated array type, we also apply a
1452  * special type code to facilitate sorting into the desired order. (We don't
1453  * want to consider those to be ordinary types because that would bring tables
1454  * up into the datatype part of the dump order.) We still set the object's
1455  * dump flag; that's not going to cause the dummy type to be dumped, but we
1456  * need it so that casts involving such types will be dumped correctly -- see
1457  * dumpCast. This means the flag should be set the same as for the underlying
1458  * object (the table or base type).
1459  */
1460 static void
1462 {
1463  /* skip complex types, except for standalone composite types */
1464  if (OidIsValid(tyinfo->typrelid) &&
1466  {
1467  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1468 
1469  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1470  if (tytable != NULL)
1471  tyinfo->dobj.dump = tytable->dobj.dump;
1472  else
1473  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1474  return;
1475  }
1476 
1477  /* skip auto-generated array types */
1478  if (tyinfo->isArray)
1479  {
1480  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1481 
1482  /*
1483  * Fall through to set the dump flag; we assume that the subsequent
1484  * rules will do the same thing as they would for the array's base
1485  * type. (We cannot reliably look up the base type here, since
1486  * getTypes may not have processed it yet.)
1487  */
1488  }
1489 
1490  if (checkExtensionMembership(&tyinfo->dobj, fout))
1491  return; /* extension membership overrides all else */
1492 
1493  /* Dump based on if the contents of the namespace are being dumped */
1494  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1495 }
1496 
1497 /*
1498  * selectDumpableDefaultACL: policy-setting subroutine
1499  * Mark a default ACL as to be dumped or not
1500  *
1501  * For per-schema default ACLs, dump if the schema is to be dumped.
1502  * Otherwise dump if we are dumping "everything". Note that dataOnly
1503  * and aclsSkip are checked separately.
1504  */
1505 static void
1507 {
1508  /* Default ACLs can't be extension members */
1509 
1510  if (dinfo->dobj.namespace)
1511  /* default ACLs are considered part of the namespace */
1512  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1513  else
1514  dinfo->dobj.dump = dopt->include_everything ?
1516 }
1517 
1518 /*
1519  * selectDumpableCast: policy-setting subroutine
1520  * Mark a cast as to be dumped or not
1521  *
1522  * Casts do not belong to any particular namespace (since they haven't got
1523  * names), nor do they have identifiable owners. To distinguish user-defined
1524  * casts from built-in ones, we must resort to checking whether the cast's
1525  * OID is in the range reserved for initdb.
1526  */
1527 static void
1529 {
1530  if (checkExtensionMembership(&cast->dobj, fout))
1531  return; /* extension membership overrides all else */
1532 
1533  /*
1534  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1535  * support ACLs currently.
1536  */
1537  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1538  cast->dobj.dump = DUMP_COMPONENT_NONE;
1539  else
1540  cast->dobj.dump = fout->dopt->include_everything ?
1542 }
1543 
1544 /*
1545  * selectDumpableProcLang: policy-setting subroutine
1546  * Mark a procedural language as to be dumped or not
1547  *
1548  * Procedural languages do not belong to any particular namespace. To
1549  * identify built-in languages, we must resort to checking whether the
1550  * language's OID is in the range reserved for initdb.
1551  */
1552 static void
1554 {
1555  if (checkExtensionMembership(&plang->dobj, fout))
1556  return; /* extension membership overrides all else */
1557 
1558  /*
1559  * Only include procedural languages when we are dumping everything.
1560  *
1561  * For from-initdb procedural languages, only include ACLs, as we do for
1562  * the pg_catalog namespace. We need this because procedural languages do
1563  * not live in any namespace.
1564  */
1565  if (!fout->dopt->include_everything)
1566  plang->dobj.dump = DUMP_COMPONENT_NONE;
1567  else
1568  {
1569  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1570  plang->dobj.dump = fout->remoteVersion < 90600 ?
1572  else
1573  plang->dobj.dump = DUMP_COMPONENT_ALL;
1574  }
1575 }
1576 
1577 /*
1578  * selectDumpableAccessMethod: policy-setting subroutine
1579  * Mark an access method as to be dumped or not
1580  *
1581  * Access methods do not belong to any particular namespace. To identify
1582  * built-in access methods, we must resort to checking whether the
1583  * method's OID is in the range reserved for initdb.
1584  */
1585 static void
1587 {
1588  if (checkExtensionMembership(&method->dobj, fout))
1589  return; /* extension membership overrides all else */
1590 
1591  /*
1592  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1593  * they do not support ACLs currently.
1594  */
1595  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1596  method->dobj.dump = DUMP_COMPONENT_NONE;
1597  else
1598  method->dobj.dump = fout->dopt->include_everything ?
1600 }
1601 
1602 /*
1603  * selectDumpableExtension: policy-setting subroutine
1604  * Mark an extension as to be dumped or not
1605  *
1606  * Normally, we dump all extensions, or none of them if include_everything
1607  * is false (i.e., a --schema or --table switch was given). However, in
1608  * binary-upgrade mode it's necessary to skip built-in extensions, since we
1609  * assume those will already be installed in the target database. We identify
1610  * such extensions by their having OIDs in the range reserved for initdb.
1611  */
1612 static void
1614 {
1615  /*
1616  * Use DUMP_COMPONENT_ACL for from-initdb extensions, to allow users to
1617  * change permissions on those objects, if they wish to, and have those
1618  * changes preserved.
1619  */
1620  if (dopt->binary_upgrade && extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1621  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1622  else
1623  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1626 }
1627 
1628 /*
1629  * selectDumpablePublicationTable: policy-setting subroutine
1630  * Mark a publication table as to be dumped or not
1631  *
1632  * Publication tables have schemas, but those are ignored in decision making,
1633  * because publications are only dumped when we are dumping everything.
1634  */
1635 static void
1637 {
1638  if (checkExtensionMembership(dobj, fout))
1639  return; /* extension membership overrides all else */
1640 
1641  dobj->dump = fout->dopt->include_everything ?
1643 }
1644 
1645 /*
1646  * selectDumpableObject: policy-setting subroutine
1647  * Mark a generic dumpable object as to be dumped or not
1648  *
1649  * Use this only for object types without a special-case routine above.
1650  */
1651 static void
1653 {
1654  if (checkExtensionMembership(dobj, fout))
1655  return; /* extension membership overrides all else */
1656 
1657  /*
1658  * Default policy is to dump if parent namespace is dumpable, or for
1659  * non-namespace-associated items, dump if we're dumping "everything".
1660  */
1661  if (dobj->namespace)
1662  dobj->dump = dobj->namespace->dobj.dump_contains;
1663  else
1664  dobj->dump = fout->dopt->include_everything ?
1666 }
1667 
1668 /*
1669  * Dump a table's contents for loading using the COPY command
1670  * - this routine is called by the Archiver when it wants the table
1671  * to be dumped.
1672  */
1673 
1674 static int
1675 dumpTableData_copy(Archive *fout, void *dcontext)
1676 {
1677  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1678  TableInfo *tbinfo = tdinfo->tdtable;
1679  const char *classname = tbinfo->dobj.name;
1680  const bool hasoids = tbinfo->hasoids;
1681  const bool oids = tdinfo->oids;
1683 
1684  /*
1685  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1686  * which uses it already.
1687  */
1688  PQExpBuffer clistBuf = createPQExpBuffer();
1689  PGconn *conn = GetConnection(fout);
1690  PGresult *res;
1691  int ret;
1692  char *copybuf;
1693  const char *column_list;
1694 
1695  if (g_verbose)
1696  write_msg(NULL, "dumping contents of table \"%s.%s\"\n",
1697  tbinfo->dobj.namespace->dobj.name, classname);
1698 
1699  /*
1700  * Make sure we are in proper schema. We will qualify the table name
1701  * below anyway (in case its name conflicts with a pg_catalog table); but
1702  * this ensures reproducible results in case the table contains regproc,
1703  * regclass, etc columns.
1704  */
1705  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
1706 
1707  /*
1708  * Specify the column list explicitly so that we have no possibility of
1709  * retrieving data in the wrong column order. (The default column
1710  * ordering of COPY will not be what we want in certain corner cases
1711  * involving ADD COLUMN and inheritance.)
1712  */
1713  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1714 
1715  if (oids && hasoids)
1716  {
1717  appendPQExpBuffer(q, "COPY %s %s WITH OIDS TO stdout;",
1719  tbinfo->dobj.namespace->dobj.name,
1720  classname),
1721  column_list);
1722  }
1723  else if (tdinfo->filtercond)
1724  {
1725  /* Note: this syntax is only supported in 8.2 and up */
1726  appendPQExpBufferStr(q, "COPY (SELECT ");
1727  /* klugery to get rid of parens in column list */
1728  if (strlen(column_list) > 2)
1729  {
1730  appendPQExpBufferStr(q, column_list + 1);
1731  q->data[q->len - 1] = ' ';
1732  }
1733  else
1734  appendPQExpBufferStr(q, "* ");
1735  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1737  tbinfo->dobj.namespace->dobj.name,
1738  classname),
1739  tdinfo->filtercond);
1740  }
1741  else
1742  {
1743  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1745  tbinfo->dobj.namespace->dobj.name,
1746  classname),
1747  column_list);
1748  }
1749  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1750  PQclear(res);
1751  destroyPQExpBuffer(clistBuf);
1752 
1753  for (;;)
1754  {
1755  ret = PQgetCopyData(conn, &copybuf, 0);
1756 
1757  if (ret < 0)
1758  break; /* done or error */
1759 
1760  if (copybuf)
1761  {
1762  WriteData(fout, copybuf, ret);
1763  PQfreemem(copybuf);
1764  }
1765 
1766  /* ----------
1767  * THROTTLE:
1768  *
1769  * There was considerable discussion in late July, 2000 regarding
1770  * slowing down pg_dump when backing up large tables. Users with both
1771  * slow & fast (multi-processor) machines experienced performance
1772  * degradation when doing a backup.
1773  *
1774  * Initial attempts based on sleeping for a number of ms for each ms
1775  * of work were deemed too complex, then a simple 'sleep in each loop'
1776  * implementation was suggested. The latter failed because the loop
1777  * was too tight. Finally, the following was implemented:
1778  *
1779  * If throttle is non-zero, then
1780  * See how long since the last sleep.
1781  * Work out how long to sleep (based on ratio).
1782  * If sleep is more than 100ms, then
1783  * sleep
1784  * reset timer
1785  * EndIf
1786  * EndIf
1787  *
1788  * where the throttle value was the number of ms to sleep per ms of
1789  * work. The calculation was done in each loop.
1790  *
1791  * Most of the hard work is done in the backend, and this solution
1792  * still did not work particularly well: on slow machines, the ratio
1793  * was 50:1, and on medium paced machines, 1:1, and on fast
1794  * multi-processor machines, it had little or no effect, for reasons
1795  * that were unclear.
1796  *
1797  * Further discussion ensued, and the proposal was dropped.
1798  *
1799  * For those people who want this feature, it can be implemented using
1800  * gettimeofday in each loop, calculating the time since last sleep,
1801  * multiplying that by the sleep ratio, then if the result is more
1802  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1803  * function to sleep for a subsecond period ie.
1804  *
1805  * select(0, NULL, NULL, NULL, &tvi);
1806  *
1807  * This will return after the interval specified in the structure tvi.
1808  * Finally, call gettimeofday again to save the 'last sleep time'.
1809  * ----------
1810  */
1811  }
1812  archprintf(fout, "\\.\n\n\n");
1813 
1814  if (ret == -2)
1815  {
1816  /* copy data transfer failed */
1817  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.\n", classname);
1818  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1819  write_msg(NULL, "The command was: %s\n", q->data);
1820  exit_nicely(1);
1821  }
1822 
1823  /* Check command status and return to normal libpq state */
1824  res = PQgetResult(conn);
1825  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1826  {
1827  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetResult() failed.\n", classname);
1828  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1829  write_msg(NULL, "The command was: %s\n", q->data);
1830  exit_nicely(1);
1831  }
1832  PQclear(res);
1833 
1834  /* Do this to ensure we've pumped libpq back to idle state */
1835  if (PQgetResult(conn) != NULL)
1836  write_msg(NULL, "WARNING: unexpected extra results during COPY of table \"%s\"\n",
1837  classname);
1838 
1839  destroyPQExpBuffer(q);
1840  return 1;
1841 }
1842 
1843 /*
1844  * Dump table data using INSERT commands.
1845  *
1846  * Caution: when we restore from an archive file direct to database, the
1847  * INSERT commands emitted by this function have to be parsed by
1848  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
1849  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
1850  */
1851 static int
1852 dumpTableData_insert(Archive *fout, void *dcontext)
1853 {
1854  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1855  TableInfo *tbinfo = tdinfo->tdtable;
1856  const char *classname = tbinfo->dobj.name;
1857  DumpOptions *dopt = fout->dopt;
1859  PQExpBuffer insertStmt = NULL;
1860  PGresult *res;
1861  int tuple;
1862  int nfields;
1863  int field;
1864 
1865  /*
1866  * Make sure we are in proper schema. We will qualify the table name
1867  * below anyway (in case its name conflicts with a pg_catalog table); but
1868  * this ensures reproducible results in case the table contains regproc,
1869  * regclass, etc columns.
1870  */
1871  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
1872 
1873  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
1874  "SELECT * FROM ONLY %s",
1876  tbinfo->dobj.namespace->dobj.name,
1877  classname));
1878  if (tdinfo->filtercond)
1879  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
1880 
1881  ExecuteSqlStatement(fout, q->data);
1882 
1883  while (1)
1884  {
1885  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
1886  PGRES_TUPLES_OK);
1887  nfields = PQnfields(res);
1888  for (tuple = 0; tuple < PQntuples(res); tuple++)
1889  {
1890  /*
1891  * First time through, we build as much of the INSERT statement as
1892  * possible in "insertStmt", which we can then just print for each
1893  * line. If the table happens to have zero columns then this will
1894  * be a complete statement, otherwise it will end in "VALUES(" and
1895  * be ready to have the row's column values appended.
1896  */
1897  if (insertStmt == NULL)
1898  {
1899  insertStmt = createPQExpBuffer();
1900  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
1901  fmtId(classname));
1902 
1903  /* corner case for zero-column table */
1904  if (nfields == 0)
1905  {
1906  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
1907  }
1908  else
1909  {
1910  /* append the list of column names if required */
1911  if (dopt->column_inserts)
1912  {
1913  appendPQExpBufferChar(insertStmt, '(');
1914  for (field = 0; field < nfields; field++)
1915  {
1916  if (field > 0)
1917  appendPQExpBufferStr(insertStmt, ", ");
1918  appendPQExpBufferStr(insertStmt,
1919  fmtId(PQfname(res, field)));
1920  }
1921  appendPQExpBufferStr(insertStmt, ") ");
1922  }
1923 
1924  if (tbinfo->needs_override)
1925  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
1926 
1927  appendPQExpBufferStr(insertStmt, "VALUES (");
1928  }
1929  }
1930 
1931  archputs(insertStmt->data, fout);
1932 
1933  /* if it is zero-column table then we're done */
1934  if (nfields == 0)
1935  continue;
1936 
1937  for (field = 0; field < nfields; field++)
1938  {
1939  if (field > 0)
1940  archputs(", ", fout);
1941  if (PQgetisnull(res, tuple, field))
1942  {
1943  archputs("NULL", fout);
1944  continue;
1945  }
1946 
1947  /* XXX This code is partially duplicated in ruleutils.c */
1948  switch (PQftype(res, field))
1949  {
1950  case INT2OID:
1951  case INT4OID:
1952  case INT8OID:
1953  case OIDOID:
1954  case FLOAT4OID:
1955  case FLOAT8OID:
1956  case NUMERICOID:
1957  {
1958  /*
1959  * These types are printed without quotes unless
1960  * they contain values that aren't accepted by the
1961  * scanner unquoted (e.g., 'NaN'). Note that
1962  * strtod() and friends might accept NaN, so we
1963  * can't use that to test.
1964  *
1965  * In reality we only need to defend against
1966  * infinity and NaN, so we need not get too crazy
1967  * about pattern matching here.
1968  */
1969  const char *s = PQgetvalue(res, tuple, field);
1970 
1971  if (strspn(s, "0123456789 +-eE.") == strlen(s))
1972  archputs(s, fout);
1973  else
1974  archprintf(fout, "'%s'", s);
1975  }
1976  break;
1977 
1978  case BITOID:
1979  case VARBITOID:
1980  archprintf(fout, "B'%s'",
1981  PQgetvalue(res, tuple, field));
1982  break;
1983 
1984  case BOOLOID:
1985  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
1986  archputs("true", fout);
1987  else
1988  archputs("false", fout);
1989  break;
1990 
1991  default:
1992  /* All other types are printed as string literals. */
1993  resetPQExpBuffer(q);
1995  PQgetvalue(res, tuple, field),
1996  fout);
1997  archputs(q->data, fout);
1998  break;
1999  }
2000  }
2001  archputs(");\n", fout);
2002  }
2003 
2004  if (PQntuples(res) <= 0)
2005  {
2006  PQclear(res);
2007  break;
2008  }
2009  PQclear(res);
2010  }
2011 
2012  archputs("\n\n", fout);
2013 
2014  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2015 
2016  destroyPQExpBuffer(q);
2017  if (insertStmt != NULL)
2018  destroyPQExpBuffer(insertStmt);
2019 
2020  return 1;
2021 }
2022 
2023 
2024 /*
2025  * dumpTableData -
2026  * dump the contents of a single table
2027  *
2028  * Actually, this just makes an ArchiveEntry for the table contents.
2029  */
2030 static void
2032 {
2033  DumpOptions *dopt = fout->dopt;
2034  TableInfo *tbinfo = tdinfo->tdtable;
2035  PQExpBuffer copyBuf = createPQExpBuffer();
2036  PQExpBuffer clistBuf = createPQExpBuffer();
2037  DataDumperPtr dumpFn;
2038  char *copyStmt;
2039 
2040  if (!dopt->dump_inserts)
2041  {
2042  /* Dump/restore using COPY */
2043  dumpFn = dumpTableData_copy;
2044  /* must use 2 steps here 'cause fmtId is nonreentrant */
2045  appendPQExpBuffer(copyBuf, "COPY %s ",
2046  fmtId(tbinfo->dobj.name));
2047  appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n",
2048  fmtCopyColumnList(tbinfo, clistBuf),
2049  (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
2050  copyStmt = copyBuf->data;
2051  }
2052  else
2053  {
2054  /* Restore using INSERT */
2055  dumpFn = dumpTableData_insert;
2056  copyStmt = NULL;
2057  }
2058 
2059  /*
2060  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2061  * dependency on its table as "special" and pass it to ArchiveEntry now.
2062  * See comments for BuildArchiveDependencies.
2063  */
2064  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2065  ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2066  tbinfo->dobj.name, tbinfo->dobj.namespace->dobj.name,
2067  NULL, tbinfo->rolname,
2068  false, "TABLE DATA", SECTION_DATA,
2069  "", "", copyStmt,
2070  &(tbinfo->dobj.dumpId), 1,
2071  dumpFn, tdinfo);
2072 
2073  destroyPQExpBuffer(copyBuf);
2074  destroyPQExpBuffer(clistBuf);
2075 }
2076 
2077 /*
2078  * refreshMatViewData -
2079  * load or refresh the contents of a single materialized view
2080  *
2081  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2082  * statement.
2083  */
2084 static void
2086 {
2087  TableInfo *tbinfo = tdinfo->tdtable;
2088  PQExpBuffer q;
2089 
2090  /* If the materialized view is not flagged as populated, skip this. */
2091  if (!tbinfo->relispopulated)
2092  return;
2093 
2094  q = createPQExpBuffer();
2095 
2096  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2097  fmtId(tbinfo->dobj.name));
2098 
2099  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2100  ArchiveEntry(fout,
2101  tdinfo->dobj.catId, /* catalog ID */
2102  tdinfo->dobj.dumpId, /* dump ID */
2103  tbinfo->dobj.name, /* Name */
2104  tbinfo->dobj.namespace->dobj.name, /* Namespace */
2105  NULL, /* Tablespace */
2106  tbinfo->rolname, /* Owner */
2107  false, /* with oids */
2108  "MATERIALIZED VIEW DATA", /* Desc */
2109  SECTION_POST_DATA, /* Section */
2110  q->data, /* Create */
2111  "", /* Del */
2112  NULL, /* Copy */
2113  tdinfo->dobj.dependencies, /* Deps */
2114  tdinfo->dobj.nDeps, /* # Deps */
2115  NULL, /* Dumper */
2116  NULL); /* Dumper Arg */
2117 
2118  destroyPQExpBuffer(q);
2119 }
2120 
2121 /*
2122  * getTableData -
2123  * set up dumpable objects representing the contents of tables
2124  */
2125 static void
2126 getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind)
2127 {
2128  int i;
2129 
2130  for (i = 0; i < numTables; i++)
2131  {
2132  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2133  (!relkind || tblinfo[i].relkind == relkind))
2134  makeTableDataInfo(dopt, &(tblinfo[i]), oids);
2135  }
2136 }
2137 
2138 /*
2139  * Make a dumpable object for the data of this specific table
2140  *
2141  * Note: we make a TableDataInfo if and only if we are going to dump the
2142  * table data; the "dump" flag in such objects isn't used.
2143  */
2144 static void
2145 makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids)
2146 {
2147  TableDataInfo *tdinfo;
2148 
2149  /*
2150  * Nothing to do if we already decided to dump the table. This will
2151  * happen for "config" tables.
2152  */
2153  if (tbinfo->dataObj != NULL)
2154  return;
2155 
2156  /* Skip VIEWs (no data to dump) */
2157  if (tbinfo->relkind == RELKIND_VIEW)
2158  return;
2159  /* Skip FOREIGN TABLEs (no data to dump) */
2160  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2161  return;
2162  /* Skip partitioned tables (data in partitions) */
2163  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2164  return;
2165 
2166  /* Don't dump data in unlogged tables, if so requested */
2167  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2168  dopt->no_unlogged_table_data)
2169  return;
2170 
2171  /* Check that the data is not explicitly excluded */
2172  if (simple_oid_list_member(&tabledata_exclude_oids,
2173  tbinfo->dobj.catId.oid))
2174  return;
2175 
2176  /* OK, let's dump it */
2177  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2178 
2179  if (tbinfo->relkind == RELKIND_MATVIEW)
2180  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2181  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2182  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2183  else
2184  tdinfo->dobj.objType = DO_TABLE_DATA;
2185 
2186  /*
2187  * Note: use tableoid 0 so that this object won't be mistaken for
2188  * something that pg_depend entries apply to.
2189  */
2190  tdinfo->dobj.catId.tableoid = 0;
2191  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2192  AssignDumpId(&tdinfo->dobj);
2193  tdinfo->dobj.name = tbinfo->dobj.name;
2194  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2195  tdinfo->tdtable = tbinfo;
2196  tdinfo->oids = oids;
2197  tdinfo->filtercond = NULL; /* might get set later */
2198  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2199 
2200  tbinfo->dataObj = tdinfo;
2201 }
2202 
2203 /*
2204  * The refresh for a materialized view must be dependent on the refresh for
2205  * any materialized view that this one is dependent on.
2206  *
2207  * This must be called after all the objects are created, but before they are
2208  * sorted.
2209  */
2210 static void
2212 {
2213  PQExpBuffer query;
2214  PGresult *res;
2215  int ntups,
2216  i;
2217  int i_classid,
2218  i_objid,
2219  i_refobjid;
2220 
2221  /* No Mat Views before 9.3. */
2222  if (fout->remoteVersion < 90300)
2223  return;
2224 
2225  /* Make sure we are in proper schema */
2226  selectSourceSchema(fout, "pg_catalog");
2227 
2228  query = createPQExpBuffer();
2229 
2230  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2231  "( "
2232  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2233  "FROM pg_depend d1 "
2234  "JOIN pg_class c1 ON c1.oid = d1.objid "
2235  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2236  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2237  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2238  "AND d2.objid = r1.oid "
2239  "AND d2.refobjid <> d1.objid "
2240  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2241  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2243  "WHERE d1.classid = 'pg_class'::regclass "
2244  "UNION "
2245  "SELECT w.objid, d3.refobjid, c3.relkind "
2246  "FROM w "
2247  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2248  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2249  "AND d3.objid = r3.oid "
2250  "AND d3.refobjid <> w.refobjid "
2251  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2252  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2254  ") "
2255  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2256  "FROM w "
2257  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2258 
2259  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2260 
2261  ntups = PQntuples(res);
2262 
2263  i_classid = PQfnumber(res, "classid");
2264  i_objid = PQfnumber(res, "objid");
2265  i_refobjid = PQfnumber(res, "refobjid");
2266 
2267  for (i = 0; i < ntups; i++)
2268  {
2269  CatalogId objId;
2270  CatalogId refobjId;
2271  DumpableObject *dobj;
2272  DumpableObject *refdobj;
2273  TableInfo *tbinfo;
2274  TableInfo *reftbinfo;
2275 
2276  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2277  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2278  refobjId.tableoid = objId.tableoid;
2279  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2280 
2281  dobj = findObjectByCatalogId(objId);
2282  if (dobj == NULL)
2283  continue;
2284 
2285  Assert(dobj->objType == DO_TABLE);
2286  tbinfo = (TableInfo *) dobj;
2287  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2288  dobj = (DumpableObject *) tbinfo->dataObj;
2289  if (dobj == NULL)
2290  continue;
2291  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2292 
2293  refdobj = findObjectByCatalogId(refobjId);
2294  if (refdobj == NULL)
2295  continue;
2296 
2297  Assert(refdobj->objType == DO_TABLE);
2298  reftbinfo = (TableInfo *) refdobj;
2299  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2300  refdobj = (DumpableObject *) reftbinfo->dataObj;
2301  if (refdobj == NULL)
2302  continue;
2303  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2304 
2305  addObjectDependency(dobj, refdobj->dumpId);
2306 
2307  if (!reftbinfo->relispopulated)
2308  tbinfo->relispopulated = false;
2309  }
2310 
2311  PQclear(res);
2312 
2313  destroyPQExpBuffer(query);
2314 }
2315 
2316 /*
2317  * getTableDataFKConstraints -
2318  * add dump-order dependencies reflecting foreign key constraints
2319  *
2320  * This code is executed only in a data-only dump --- in schema+data dumps
2321  * we handle foreign key issues by not creating the FK constraints until
2322  * after the data is loaded. In a data-only dump, however, we want to
2323  * order the table data objects in such a way that a table's referenced
2324  * tables are restored first. (In the presence of circular references or
2325  * self-references this may be impossible; we'll detect and complain about
2326  * that during the dependency sorting step.)
2327  */
2328 static void
2330 {
2331  DumpableObject **dobjs;
2332  int numObjs;
2333  int i;
2334 
2335  /* Search through all the dumpable objects for FK constraints */
2336  getDumpableObjects(&dobjs, &numObjs);
2337  for (i = 0; i < numObjs; i++)
2338  {
2339  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2340  {
2341  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2342  TableInfo *ftable;
2343 
2344  /* Not interesting unless both tables are to be dumped */
2345  if (cinfo->contable == NULL ||
2346  cinfo->contable->dataObj == NULL)
2347  continue;
2348  ftable = findTableByOid(cinfo->confrelid);
2349  if (ftable == NULL ||
2350  ftable->dataObj == NULL)
2351  continue;
2352 
2353  /*
2354  * Okay, make referencing table's TABLE_DATA object depend on the
2355  * referenced table's TABLE_DATA object.
2356  */
2358  ftable->dataObj->dobj.dumpId);
2359  }
2360  }
2361  free(dobjs);
2362 }
2363 
2364 
2365 /*
2366  * guessConstraintInheritance:
2367  * In pre-8.4 databases, we can't tell for certain which constraints
2368  * are inherited. We assume a CHECK constraint is inherited if its name
2369  * matches the name of any constraint in the parent. Originally this code
2370  * tried to compare the expression texts, but that can fail for various
2371  * reasons --- for example, if the parent and child tables are in different
2372  * schemas, reverse-listing of function calls may produce different text
2373  * (schema-qualified or not) depending on search path.
2374  *
2375  * In 8.4 and up we can rely on the conislocal field to decide which
2376  * constraints must be dumped; much safer.
2377  *
2378  * This function assumes all conislocal flags were initialized to TRUE.
2379  * It clears the flag on anything that seems to be inherited.
2380  */
2381 static void
2383 {
2384  int i,
2385  j,
2386  k;
2387 
2388  for (i = 0; i < numTables; i++)
2389  {
2390  TableInfo *tbinfo = &(tblinfo[i]);
2391  int numParents;
2392  TableInfo **parents;
2393  TableInfo *parent;
2394 
2395  /* Sequences and views never have parents */
2396  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2397  tbinfo->relkind == RELKIND_VIEW)
2398  continue;
2399 
2400  /* Don't bother computing anything for non-target tables, either */
2401  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2402  continue;
2403 
2404  numParents = tbinfo->numParents;
2405  parents = tbinfo->parents;
2406 
2407  if (numParents == 0)
2408  continue; /* nothing to see here, move along */
2409 
2410  /* scan for inherited CHECK constraints */
2411  for (j = 0; j < tbinfo->ncheck; j++)
2412  {
2413  ConstraintInfo *constr;
2414 
2415  constr = &(tbinfo->checkexprs[j]);
2416 
2417  for (k = 0; k < numParents; k++)
2418  {
2419  int l;
2420 
2421  parent = parents[k];
2422  for (l = 0; l < parent->ncheck; l++)
2423  {
2424  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2425 
2426  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2427  {
2428  constr->conislocal = false;
2429  break;
2430  }
2431  }
2432  if (!constr->conislocal)
2433  break;
2434  }
2435  }
2436  }
2437 }
2438 
2439 
2440 /*
2441  * dumpDatabase:
2442  * dump the database definition
2443  */
2444 static void
2446 {
2447  DumpOptions *dopt = fout->dopt;
2448  PQExpBuffer dbQry = createPQExpBuffer();
2449  PQExpBuffer delQry = createPQExpBuffer();
2450  PQExpBuffer creaQry = createPQExpBuffer();
2451  PGconn *conn = GetConnection(fout);
2452  PGresult *res;
2453  int i_tableoid,
2454  i_oid,
2455  i_dba,
2456  i_encoding,
2457  i_collate,
2458  i_ctype,
2459  i_frozenxid,
2460  i_minmxid,
2461  i_tablespace;
2462  CatalogId dbCatId;
2463  DumpId dbDumpId;
2464  const char *datname,
2465  *dba,
2466  *encoding,
2467  *collate,
2468  *ctype,
2469  *tablespace;
2470  uint32 frozenxid,
2471  minmxid;
2472 
2473  datname = PQdb(conn);
2474 
2475  if (g_verbose)
2476  write_msg(NULL, "saving database definition\n");
2477 
2478  /* Make sure we are in proper schema */
2479  selectSourceSchema(fout, "pg_catalog");
2480 
2481  /* Get the database owner and parameters from pg_database */
2482  if (fout->remoteVersion >= 90300)
2483  {
2484  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2485  "(%s datdba) AS dba, "
2486  "pg_encoding_to_char(encoding) AS encoding, "
2487  "datcollate, datctype, datfrozenxid, datminmxid, "
2488  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2489  "shobj_description(oid, 'pg_database') AS description "
2490 
2491  "FROM pg_database "
2492  "WHERE datname = ",
2494  appendStringLiteralAH(dbQry, datname, fout);
2495  }
2496  else if (fout->remoteVersion >= 80400)
2497  {
2498  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2499  "(%s datdba) AS dba, "
2500  "pg_encoding_to_char(encoding) AS encoding, "
2501  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2502  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2503  "shobj_description(oid, 'pg_database') AS description "
2504 
2505  "FROM pg_database "
2506  "WHERE datname = ",
2508  appendStringLiteralAH(dbQry, datname, fout);
2509  }
2510  else if (fout->remoteVersion >= 80200)
2511  {
2512  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2513  "(%s datdba) AS dba, "
2514  "pg_encoding_to_char(encoding) AS encoding, "
2515  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2516  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2517  "shobj_description(oid, 'pg_database') AS description "
2518 
2519  "FROM pg_database "
2520  "WHERE datname = ",
2522  appendStringLiteralAH(dbQry, datname, fout);
2523  }
2524  else
2525  {
2526  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2527  "(%s datdba) AS dba, "
2528  "pg_encoding_to_char(encoding) AS encoding, "
2529  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2530  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2531  "FROM pg_database "
2532  "WHERE datname = ",
2534  appendStringLiteralAH(dbQry, datname, fout);
2535  }
2536 
2537  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2538 
2539  i_tableoid = PQfnumber(res, "tableoid");
2540  i_oid = PQfnumber(res, "oid");
2541  i_dba = PQfnumber(res, "dba");
2542  i_encoding = PQfnumber(res, "encoding");
2543  i_collate = PQfnumber(res, "datcollate");
2544  i_ctype = PQfnumber(res, "datctype");
2545  i_frozenxid = PQfnumber(res, "datfrozenxid");
2546  i_minmxid = PQfnumber(res, "datminmxid");
2547  i_tablespace = PQfnumber(res, "tablespace");
2548 
2549  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2550  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2551  dba = PQgetvalue(res, 0, i_dba);
2552  encoding = PQgetvalue(res, 0, i_encoding);
2553  collate = PQgetvalue(res, 0, i_collate);
2554  ctype = PQgetvalue(res, 0, i_ctype);
2555  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2556  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2557  tablespace = PQgetvalue(res, 0, i_tablespace);
2558 
2559  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2560  fmtId(datname));
2561  if (strlen(encoding) > 0)
2562  {
2563  appendPQExpBufferStr(creaQry, " ENCODING = ");
2564  appendStringLiteralAH(creaQry, encoding, fout);
2565  }
2566  if (strlen(collate) > 0)
2567  {
2568  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2569  appendStringLiteralAH(creaQry, collate, fout);
2570  }
2571  if (strlen(ctype) > 0)
2572  {
2573  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2574  appendStringLiteralAH(creaQry, ctype, fout);
2575  }
2576  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2577  !dopt->outputNoTablespaces)
2578  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2579  fmtId(tablespace));
2580  appendPQExpBufferStr(creaQry, ";\n");
2581 
2582  if (dopt->binary_upgrade)
2583  {
2584  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
2585  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
2586  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
2587  "WHERE datname = ",
2588  frozenxid, minmxid);
2589  appendStringLiteralAH(creaQry, datname, fout);
2590  appendPQExpBufferStr(creaQry, ";\n");
2591 
2592  }
2593 
2594  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2595  fmtId(datname));
2596 
2597  dbDumpId = createDumpId();
2598 
2599  ArchiveEntry(fout,
2600  dbCatId, /* catalog ID */
2601  dbDumpId, /* dump ID */
2602  datname, /* Name */
2603  NULL, /* Namespace */
2604  NULL, /* Tablespace */
2605  dba, /* Owner */
2606  false, /* with oids */
2607  "DATABASE", /* Desc */
2608  SECTION_PRE_DATA, /* Section */
2609  creaQry->data, /* Create */
2610  delQry->data, /* Del */
2611  NULL, /* Copy */
2612  NULL, /* Deps */
2613  0, /* # Deps */
2614  NULL, /* Dumper */
2615  NULL); /* Dumper Arg */
2616 
2617  /*
2618  * pg_largeobject and pg_largeobject_metadata come from the old system
2619  * intact, so set their relfrozenxids and relminmxids.
2620  */
2621  if (dopt->binary_upgrade)
2622  {
2623  PGresult *lo_res;
2624  PQExpBuffer loFrozenQry = createPQExpBuffer();
2625  PQExpBuffer loOutQry = createPQExpBuffer();
2626  int i_relfrozenxid,
2627  i_relminmxid;
2628 
2629  /*
2630  * pg_largeobject
2631  */
2632  if (fout->remoteVersion >= 90300)
2633  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2634  "FROM pg_catalog.pg_class\n"
2635  "WHERE oid = %u;\n",
2637  else
2638  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2639  "FROM pg_catalog.pg_class\n"
2640  "WHERE oid = %u;\n",
2642 
2643  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2644 
2645  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2646  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2647 
2648  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
2649  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2650  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2651  "WHERE oid = %u;\n",
2652  atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2653  atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
2655  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2656  "pg_largeobject", NULL, NULL, "",
2657  false, "pg_largeobject", SECTION_PRE_DATA,
2658  loOutQry->data, "", NULL,
2659  NULL, 0,
2660  NULL, NULL);
2661 
2662  PQclear(lo_res);
2663 
2664  /*
2665  * pg_largeobject_metadata
2666  */
2667  if (fout->remoteVersion >= 90000)
2668  {
2669  resetPQExpBuffer(loFrozenQry);
2670  resetPQExpBuffer(loOutQry);
2671 
2672  if (fout->remoteVersion >= 90300)
2673  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2674  "FROM pg_catalog.pg_class\n"
2675  "WHERE oid = %u;\n",
2677  else
2678  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2679  "FROM pg_catalog.pg_class\n"
2680  "WHERE oid = %u;\n",
2682 
2683  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2684 
2685  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2686  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2687 
2688  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
2689  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2690  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2691  "WHERE oid = %u;\n",
2692  atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2693  atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
2695  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2696  "pg_largeobject_metadata", NULL, NULL, "",
2697  false, "pg_largeobject_metadata", SECTION_PRE_DATA,
2698  loOutQry->data, "", NULL,
2699  NULL, 0,
2700  NULL, NULL);
2701 
2702  PQclear(lo_res);
2703  }
2704 
2705  destroyPQExpBuffer(loFrozenQry);
2706  destroyPQExpBuffer(loOutQry);
2707  }
2708 
2709  /* Dump DB comment if any */
2710  if (fout->remoteVersion >= 80200)
2711  {
2712  /*
2713  * 8.2 keeps comments on shared objects in a shared table, so we
2714  * cannot use the dumpComment used for other database objects.
2715  */
2716  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2717 
2718  if (comment && strlen(comment))
2719  {
2720  resetPQExpBuffer(dbQry);
2721 
2722  /*
2723  * Generates warning when loaded into a differently-named
2724  * database.
2725  */
2726  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", fmtId(datname));
2727  appendStringLiteralAH(dbQry, comment, fout);
2728  appendPQExpBufferStr(dbQry, ";\n");
2729 
2730  ArchiveEntry(fout, dbCatId, createDumpId(), datname, NULL, NULL,
2731  dba, false, "COMMENT", SECTION_NONE,
2732  dbQry->data, "", NULL,
2733  &dbDumpId, 1, NULL, NULL);
2734  }
2735  }
2736  else
2737  {
2738  resetPQExpBuffer(dbQry);
2739  appendPQExpBuffer(dbQry, "DATABASE %s", fmtId(datname));
2740  dumpComment(fout, dbQry->data, NULL, "",
2741  dbCatId, 0, dbDumpId);
2742  }
2743 
2744  /* Dump shared security label. */
2745  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2746  {
2747  PGresult *shres;
2748  PQExpBuffer seclabelQry;
2749 
2750  seclabelQry = createPQExpBuffer();
2751 
2752  buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry);
2753  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2754  resetPQExpBuffer(seclabelQry);
2755  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2756  if (strlen(seclabelQry->data))
2757  ArchiveEntry(fout, dbCatId, createDumpId(), datname, NULL, NULL,
2758  dba, false, "SECURITY LABEL", SECTION_NONE,
2759  seclabelQry->data, "", NULL,
2760  &dbDumpId, 1, NULL, NULL);
2761  destroyPQExpBuffer(seclabelQry);
2762  PQclear(shres);
2763  }
2764 
2765  PQclear(res);
2766 
2767  destroyPQExpBuffer(dbQry);
2768  destroyPQExpBuffer(delQry);
2769  destroyPQExpBuffer(creaQry);
2770 }
2771 
2772 /*
2773  * dumpEncoding: put the correct encoding into the archive
2774  */
2775 static void
2777 {
2778  const char *encname = pg_encoding_to_char(AH->encoding);
2780 
2781  if (g_verbose)
2782  write_msg(NULL, "saving encoding = %s\n", encname);
2783 
2784  appendPQExpBufferStr(qry, "SET client_encoding = ");
2785  appendStringLiteralAH(qry, encname, AH);
2786  appendPQExpBufferStr(qry, ";\n");
2787 
2788  ArchiveEntry(AH, nilCatalogId, createDumpId(),
2789  "ENCODING", NULL, NULL, "",
2790  false, "ENCODING", SECTION_PRE_DATA,
2791  qry->data, "", NULL,
2792  NULL, 0,
2793  NULL, NULL);
2794 
2795  destroyPQExpBuffer(qry);
2796 }
2797 
2798 
2799 /*
2800  * dumpStdStrings: put the correct escape string behavior into the archive
2801  */
2802 static void
2804 {
2805  const char *stdstrings = AH->std_strings ? "on" : "off";
2807 
2808  if (g_verbose)
2809  write_msg(NULL, "saving standard_conforming_strings = %s\n",
2810  stdstrings);
2811 
2812  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
2813  stdstrings);
2814 
2815  ArchiveEntry(AH, nilCatalogId, createDumpId(),
2816  "STDSTRINGS", NULL, NULL, "",
2817  false, "STDSTRINGS", SECTION_PRE_DATA,
2818  qry->data, "", NULL,
2819  NULL, 0,
2820  NULL, NULL);
2821 
2822  destroyPQExpBuffer(qry);
2823 }
2824 
2825 
2826 /*
2827  * getBlobs:
2828  * Collect schema-level data about large objects
2829  */
2830 static void
2832 {
2833  DumpOptions *dopt = fout->dopt;
2834  PQExpBuffer blobQry = createPQExpBuffer();
2835  BlobInfo *binfo;
2836  DumpableObject *bdata;
2837  PGresult *res;
2838  int ntups;
2839  int i;
2840  int i_oid;
2841  int i_lomowner;
2842  int i_lomacl;
2843  int i_rlomacl;
2844  int i_initlomacl;
2845  int i_initrlomacl;
2846 
2847  /* Verbose message */
2848  if (g_verbose)
2849  write_msg(NULL, "reading large objects\n");
2850 
2851  /* Make sure we are in proper schema */
2852  selectSourceSchema(fout, "pg_catalog");
2853 
2854  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
2855  if (fout->remoteVersion >= 90600)
2856  {
2857  PQExpBuffer acl_subquery = createPQExpBuffer();
2858  PQExpBuffer racl_subquery = createPQExpBuffer();
2859  PQExpBuffer init_acl_subquery = createPQExpBuffer();
2860  PQExpBuffer init_racl_subquery = createPQExpBuffer();
2861 
2862  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
2863  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
2864  dopt->binary_upgrade);
2865 
2866  appendPQExpBuffer(blobQry,
2867  "SELECT l.oid, (%s l.lomowner) AS rolname, "
2868  "%s AS lomacl, "
2869  "%s AS rlomacl, "
2870  "%s AS initlomacl, "
2871  "%s AS initrlomacl "
2872  "FROM pg_largeobject_metadata l "
2873  "LEFT JOIN pg_init_privs pip ON "
2874  "(l.oid = pip.objoid "
2875  "AND pip.classoid = 'pg_largeobject'::regclass "
2876  "AND pip.objsubid = 0) ",
2878  acl_subquery->data,
2879  racl_subquery->data,
2880  init_acl_subquery->data,
2881  init_racl_subquery->data);
2882 
2883  destroyPQExpBuffer(acl_subquery);
2884  destroyPQExpBuffer(racl_subquery);
2885  destroyPQExpBuffer(init_acl_subquery);
2886  destroyPQExpBuffer(init_racl_subquery);
2887  }
2888  else if (fout->remoteVersion >= 90000)
2889  appendPQExpBuffer(blobQry,
2890  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
2891  "NULL AS rlomacl, NULL AS initlomacl, "
2892  "NULL AS initrlomacl "
2893  " FROM pg_largeobject_metadata",
2895  else
2896  appendPQExpBufferStr(blobQry,
2897  "SELECT DISTINCT loid AS oid, "
2898  "NULL::name AS rolname, NULL::oid AS lomacl, "
2899  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
2900  "NULL::oid AS initrlomacl "
2901  " FROM pg_largeobject");
2902 
2903  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
2904 
2905  i_oid = PQfnumber(res, "oid");
2906  i_lomowner = PQfnumber(res, "rolname");
2907  i_lomacl = PQfnumber(res, "lomacl");
2908  i_rlomacl = PQfnumber(res, "rlomacl");
2909  i_initlomacl = PQfnumber(res, "initlomacl");
2910  i_initrlomacl = PQfnumber(res, "initrlomacl");
2911 
2912  ntups = PQntuples(res);
2913 
2914  /*
2915  * Each large object has its own BLOB archive entry.
2916  */
2917  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
2918 
2919  for (i = 0; i < ntups; i++)
2920  {
2921  binfo[i].dobj.objType = DO_BLOB;
2923  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
2924  AssignDumpId(&binfo[i].dobj);
2925 
2926  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
2927  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
2928  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
2929  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
2930  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
2931  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
2932 
2933  if (PQgetisnull(res, i, i_lomacl) &&
2934  PQgetisnull(res, i, i_rlomacl) &&
2935  PQgetisnull(res, i, i_initlomacl) &&
2936  PQgetisnull(res, i, i_initrlomacl))
2937  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
2938 
2939  /*
2940  * In binary-upgrade mode for blobs, we do *not* dump out the data or
2941  * the ACLs, should any exist. The data and ACL (if any) will be
2942  * copied by pg_upgrade, which simply copies the pg_largeobject and
2943  * pg_largeobject_metadata tables.
2944  *
2945  * We *do* dump out the definition of the blob because we need that to
2946  * make the restoration of the comments, and anything else, work since
2947  * pg_upgrade copies the files behind pg_largeobject and
2948  * pg_largeobject_metadata after the dump is restored.
2949  */
2950  if (dopt->binary_upgrade)
2952  }
2953 
2954  /*
2955  * If we have any large objects, a "BLOBS" archive entry is needed. This
2956  * is just a placeholder for sorting; it carries no data now.
2957  */
2958  if (ntups > 0)
2959  {
2960  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
2961  bdata->objType = DO_BLOB_DATA;
2962  bdata->catId = nilCatalogId;
2963  AssignDumpId(bdata);
2964  bdata->name = pg_strdup("BLOBS");
2965  }
2966 
2967  PQclear(res);
2968  destroyPQExpBuffer(blobQry);
2969 }
2970 
2971 /*
2972  * dumpBlob
2973  *
2974  * dump the definition (metadata) of the given large object
2975  */
2976 static void
2977 dumpBlob(Archive *fout, BlobInfo *binfo)
2978 {
2979  PQExpBuffer cquery = createPQExpBuffer();
2980  PQExpBuffer dquery = createPQExpBuffer();
2981 
2982  appendPQExpBuffer(cquery,
2983  "SELECT pg_catalog.lo_create('%s');\n",
2984  binfo->dobj.name);
2985 
2986  appendPQExpBuffer(dquery,
2987  "SELECT pg_catalog.lo_unlink('%s');\n",
2988  binfo->dobj.name);
2989 
2990  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
2991  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
2992  binfo->dobj.name,
2993  NULL, NULL,
2994  binfo->rolname, false,
2995  "BLOB", SECTION_PRE_DATA,
2996  cquery->data, dquery->data, NULL,
2997  NULL, 0,
2998  NULL, NULL);
2999 
3000  /* set up tag for comment and/or ACL */
3001  resetPQExpBuffer(cquery);
3002  appendPQExpBuffer(cquery, "LARGE OBJECT %s", binfo->dobj.name);
3003 
3004  /* Dump comment if any */
3005  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3006  dumpComment(fout, cquery->data,
3007  NULL, binfo->rolname,
3008  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3009 
3010  /* Dump security label if any */
3011  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3012  dumpSecLabel(fout, cquery->data,
3013  NULL, binfo->rolname,
3014  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3015 
3016  /* Dump ACL if any */
3017  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3018  dumpACL(fout, binfo->dobj.catId, binfo->dobj.dumpId, "LARGE OBJECT",
3019  binfo->dobj.name, NULL, cquery->data,
3020  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3021  binfo->initblobacl, binfo->initrblobacl);
3022 
3023  destroyPQExpBuffer(cquery);
3024  destroyPQExpBuffer(dquery);
3025 }
3026 
3027 /*
3028  * dumpBlobs:
3029  * dump the data contents of all large objects
3030  */
3031 static int
3032 dumpBlobs(Archive *fout, void *arg)
3033 {
3034  const char *blobQry;
3035  const char *blobFetchQry;
3036  PGconn *conn = GetConnection(fout);
3037  PGresult *res;
3038  char buf[LOBBUFSIZE];
3039  int ntups;
3040  int i;
3041  int cnt;
3042 
3043  if (g_verbose)
3044  write_msg(NULL, "saving large objects\n");
3045 
3046  /* Make sure we are in proper schema */
3047  selectSourceSchema(fout, "pg_catalog");
3048 
3049  /*
3050  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3051  * the already-in-memory dumpable objects instead...
3052  */
3053  if (fout->remoteVersion >= 90000)
3054  blobQry = "DECLARE bloboid CURSOR FOR SELECT oid FROM pg_largeobject_metadata";
3055  else
3056  blobQry = "DECLARE bloboid CURSOR FOR SELECT DISTINCT loid FROM pg_largeobject";
3057 
3058  ExecuteSqlStatement(fout, blobQry);
3059 
3060  /* Command to fetch from cursor */
3061  blobFetchQry = "FETCH 1000 IN bloboid";
3062 
3063  do
3064  {
3065  /* Do a fetch */
3066  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3067 
3068  /* Process the tuples, if any */
3069  ntups = PQntuples(res);
3070  for (i = 0; i < ntups; i++)
3071  {
3072  Oid blobOid;
3073  int loFd;
3074 
3075  blobOid = atooid(PQgetvalue(res, i, 0));
3076  /* Open the BLOB */
3077  loFd = lo_open(conn, blobOid, INV_READ);
3078  if (loFd == -1)
3079  exit_horribly(NULL, "could not open large object %u: %s",
3080  blobOid, PQerrorMessage(conn));
3081 
3082  StartBlob(fout, blobOid);
3083 
3084  /* Now read it in chunks, sending data to archive */
3085  do
3086  {
3087  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3088  if (cnt < 0)
3089  exit_horribly(NULL, "error reading large object %u: %s",
3090  blobOid, PQerrorMessage(conn));
3091 
3092  WriteData(fout, buf, cnt);
3093  } while (cnt > 0);
3094 
3095  lo_close(conn, loFd);
3096 
3097  EndBlob(fout, blobOid);
3098  }
3099 
3100  PQclear(res);
3101  } while (ntups > 0);
3102 
3103  return 1;
3104 }
3105 
3106 /*
3107  * getPolicies
3108  * get information about policies on a dumpable table.
3109  */
3110 void
3111 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3112 {
3113  PQExpBuffer query;
3114  PGresult *res;
3115  PolicyInfo *polinfo;
3116  int i_oid;
3117  int i_tableoid;
3118  int i_polname;
3119  int i_polcmd;
3120  int i_polpermissive;
3121  int i_polroles;
3122  int i_polqual;
3123  int i_polwithcheck;
3124  int i,
3125  j,
3126  ntups;
3127 
3128  if (fout->remoteVersion < 90500)
3129  return;
3130 
3131  query = createPQExpBuffer();
3132 
3133  for (i = 0; i < numTables; i++)
3134  {
3135  TableInfo *tbinfo = &tblinfo[i];
3136 
3137  /* Ignore row security on tables not to be dumped */
3138  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3139  continue;
3140 
3141  if (g_verbose)
3142  write_msg(NULL, "reading row security enabled for table \"%s.%s\"\n",
3143  tbinfo->dobj.namespace->dobj.name,
3144  tbinfo->dobj.name);
3145 
3146  /*
3147  * Get row security enabled information for the table. We represent
3148  * RLS enabled on a table by creating PolicyInfo object with an empty
3149  * policy.
3150  */
3151  if (tbinfo->rowsec)
3152  {
3153  /*
3154  * Note: use tableoid 0 so that this object won't be mistaken for
3155  * something that pg_depend entries apply to.
3156  */
3157  polinfo = pg_malloc(sizeof(PolicyInfo));
3158  polinfo->dobj.objType = DO_POLICY;
3159  polinfo->dobj.catId.tableoid = 0;
3160  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3161  AssignDumpId(&polinfo->dobj);
3162  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3163  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3164  polinfo->poltable = tbinfo;
3165  polinfo->polname = NULL;
3166  polinfo->polcmd = '\0';
3167  polinfo->polpermissive = 0;
3168  polinfo->polroles = NULL;
3169  polinfo->polqual = NULL;
3170  polinfo->polwithcheck = NULL;
3171  }
3172 
3173  if (g_verbose)
3174  write_msg(NULL, "reading policies for table \"%s.%s\"\n",
3175  tbinfo->dobj.namespace->dobj.name,
3176  tbinfo->dobj.name);
3177 
3178  /*
3179  * select table schema to ensure regproc name is qualified if needed
3180  */
3181  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
3182 
3183  resetPQExpBuffer(query);
3184 
3185  /* Get the policies for the table. */
3186  if (fout->remoteVersion >= 100000)
3187  appendPQExpBuffer(query,
3188  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3189  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3190  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3191  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3192  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3193  "FROM pg_catalog.pg_policy pol "
3194  "WHERE polrelid = '%u'",
3195  tbinfo->dobj.catId.oid);
3196  else
3197  appendPQExpBuffer(query,
3198  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3199  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3200  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3201  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3202  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3203  "FROM pg_catalog.pg_policy pol "
3204  "WHERE polrelid = '%u'",
3205  tbinfo->dobj.catId.oid);
3206  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3207 
3208  ntups = PQntuples(res);
3209 
3210  if (ntups == 0)
3211  {
3212  /*
3213  * No explicit policies to handle (only the default-deny policy,
3214  * which is handled as part of the table definition). Clean up
3215  * and return.
3216  */
3217  PQclear(res);
3218  continue;
3219  }
3220 
3221  i_oid = PQfnumber(res, "oid");
3222  i_tableoid = PQfnumber(res, "tableoid");
3223  i_polname = PQfnumber(res, "polname");
3224  i_polcmd = PQfnumber(res, "polcmd");
3225  i_polpermissive = PQfnumber(res, "polpermissive");
3226  i_polroles = PQfnumber(res, "polroles");
3227  i_polqual = PQfnumber(res, "polqual");
3228  i_polwithcheck = PQfnumber(res, "polwithcheck");
3229 
3230  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3231 
3232  for (j = 0; j < ntups; j++)
3233  {
3234  polinfo[j].dobj.objType = DO_POLICY;
3235  polinfo[j].dobj.catId.tableoid =
3236  atooid(PQgetvalue(res, j, i_tableoid));
3237  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3238  AssignDumpId(&polinfo[j].dobj);
3239  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3240  polinfo[j].poltable = tbinfo;
3241  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3242  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3243 
3244  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3245  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3246 
3247  if (PQgetisnull(res, j, i_polroles))
3248  polinfo[j].polroles = NULL;
3249  else
3250  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3251 
3252  if (PQgetisnull(res, j, i_polqual))
3253  polinfo[j].polqual = NULL;
3254  else
3255  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3256 
3257  if (PQgetisnull(res, j, i_polwithcheck))
3258  polinfo[j].polwithcheck = NULL;
3259  else
3260  polinfo[j].polwithcheck
3261  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3262  }
3263  PQclear(res);
3264  }
3265  destroyPQExpBuffer(query);
3266 }
3267 
3268 /*
3269  * dumpPolicy
3270  * dump the definition of the given policy
3271  */
3272 static void
3274 {
3275  DumpOptions *dopt = fout->dopt;
3276  TableInfo *tbinfo = polinfo->poltable;
3277  PQExpBuffer query;
3278  PQExpBuffer delqry;
3279  const char *cmd;
3280  char *tag;
3281 
3282  if (dopt->dataOnly)
3283  return;
3284 
3285  /*
3286  * If polname is NULL, then this record is just indicating that ROW LEVEL
3287  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3288  * ROW LEVEL SECURITY.
3289  */
3290  if (polinfo->polname == NULL)
3291  {
3292  query = createPQExpBuffer();
3293 
3294  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3295  fmtId(polinfo->dobj.name));
3296 
3297  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3298  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3299  polinfo->dobj.name,
3300  polinfo->dobj.namespace->dobj.name,
3301  NULL,
3302  tbinfo->rolname, false,
3303  "ROW SECURITY", SECTION_POST_DATA,
3304  query->data, "", NULL,
3305  NULL, 0,
3306  NULL, NULL);
3307 
3308  destroyPQExpBuffer(query);
3309  return;
3310  }
3311 
3312  if (polinfo->polcmd == '*')
3313  cmd = "";
3314  else if (polinfo->polcmd == 'r')
3315  cmd = " FOR SELECT";
3316  else if (polinfo->polcmd == 'a')
3317  cmd = " FOR INSERT";
3318  else if (polinfo->polcmd == 'w')
3319  cmd = " FOR UPDATE";
3320  else if (polinfo->polcmd == 'd')
3321  cmd = " FOR DELETE";
3322  else
3323  {
3324  write_msg(NULL, "unexpected policy command type: %c\n",
3325  polinfo->polcmd);
3326  exit_nicely(1);
3327  }
3328 
3329  query = createPQExpBuffer();
3330  delqry = createPQExpBuffer();
3331 
3332  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3333 
3334  appendPQExpBuffer(query, " ON %s%s%s", fmtId(tbinfo->dobj.name),
3335  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3336 
3337  if (polinfo->polroles != NULL)
3338  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3339 
3340  if (polinfo->polqual != NULL)
3341  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3342 
3343  if (polinfo->polwithcheck != NULL)
3344  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3345 
3346  appendPQExpBuffer(query, ";\n");
3347 
3348  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3349  appendPQExpBuffer(delqry, " ON %s;\n", fmtId(tbinfo->dobj.name));
3350 
3351  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3352 
3353  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3354  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3355  tag,
3356  polinfo->dobj.namespace->dobj.name,
3357  NULL,
3358  tbinfo->rolname, false,
3359  "POLICY", SECTION_POST_DATA,
3360  query->data, delqry->data, NULL,
3361  NULL, 0,
3362  NULL, NULL);
3363 
3364  free(tag);
3365  destroyPQExpBuffer(query);
3366  destroyPQExpBuffer(delqry);
3367 }
3368 
3369 /*
3370  * getPublications
3371  * get information about publications
3372  */
3373 void
3375 {
3376  PQExpBuffer query;
3377  PGresult *res;
3378  PublicationInfo *pubinfo;
3379  int i_tableoid;
3380  int i_oid;
3381  int i_pubname;
3382  int i_rolname;
3383  int i_puballtables;
3384  int i_pubinsert;
3385  int i_pubupdate;
3386  int i_pubdelete;
3387  int i,
3388  ntups;
3389 
3390  if (fout->remoteVersion < 100000)
3391  return;
3392 
3393  query = createPQExpBuffer();
3394 
3395  resetPQExpBuffer(query);
3396 
3397  /* Get the publications. */
3398  appendPQExpBuffer(query,
3399  "SELECT p.tableoid, p.oid, p.pubname, "
3400  "(%s p.pubowner) AS rolname, "
3401  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete "
3402  "FROM pg_catalog.pg_publication p",
3404 
3405  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3406 
3407  ntups = PQntuples(res);
3408 
3409  i_tableoid = PQfnumber(res, "tableoid");
3410  i_oid = PQfnumber(res, "oid");
3411  i_pubname = PQfnumber(res, "pubname");
3412  i_rolname = PQfnumber(res, "rolname");
3413  i_puballtables = PQfnumber(res, "puballtables");
3414  i_pubinsert = PQfnumber(res, "pubinsert");
3415  i_pubupdate = PQfnumber(res, "pubupdate");
3416  i_pubdelete = PQfnumber(res, "pubdelete");
3417 
3418  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3419 
3420  for (i = 0; i < ntups; i++)
3421  {
3422  pubinfo[i].dobj.objType = DO_PUBLICATION;
3423  pubinfo[i].dobj.catId.tableoid =
3424  atooid(PQgetvalue(res, i, i_tableoid));
3425  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3426  AssignDumpId(&pubinfo[i].dobj);
3427  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3428  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3429  pubinfo[i].puballtables =
3430  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3431  pubinfo[i].pubinsert =
3432  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3433  pubinfo[i].pubupdate =
3434  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3435  pubinfo[i].pubdelete =
3436  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3437 
3438  if (strlen(pubinfo[i].rolname) == 0)
3439  write_msg(NULL, "WARNING: owner of publication \"%s\" appears to be invalid\n",
3440  pubinfo[i].dobj.name);
3441 
3442  /* Decide whether we want to dump it */
3443  selectDumpableObject(&(pubinfo[i].dobj), fout);
3444  }
3445  PQclear(res);
3446 
3447  destroyPQExpBuffer(query);
3448 }
3449 
3450 /*
3451  * dumpPublication
3452  * dump the definition of the given publication
3453  */
3454 static void
3456 {
3457  PQExpBuffer delq;
3458  PQExpBuffer query;
3459  PQExpBuffer labelq;
3460 
3461  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3462  return;
3463 
3464  delq = createPQExpBuffer();
3465  query = createPQExpBuffer();
3466  labelq = createPQExpBuffer();
3467 
3468  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3469  fmtId(pubinfo->dobj.name));
3470 
3471  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3472  fmtId(pubinfo->dobj.name));
3473 
3474  appendPQExpBuffer(labelq, "PUBLICATION %s", fmtId(pubinfo->dobj.name));
3475 
3476  if (pubinfo->puballtables)
3477  appendPQExpBufferStr(query, " FOR ALL TABLES");
3478 
3479  appendPQExpBufferStr(query, " WITH (");
3480  if (pubinfo->pubinsert)
3481  appendPQExpBufferStr(query, "PUBLISH INSERT");
3482  else
3483  appendPQExpBufferStr(query, "NOPUBLISH INSERT");
3484 
3485  if (pubinfo->pubupdate)
3486  appendPQExpBufferStr(query, ", PUBLISH UPDATE");
3487  else
3488  appendPQExpBufferStr(query, ", NOPUBLISH UPDATE");
3489 
3490  if (pubinfo->pubdelete)
3491  appendPQExpBufferStr(query, ", PUBLISH DELETE");
3492  else
3493  appendPQExpBufferStr(query, ", NOPUBLISH DELETE");
3494 
3495  appendPQExpBufferStr(query, ");\n");
3496 
3497  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
3498  pubinfo->dobj.name,
3499  NULL,
3500  NULL,
3501  pubinfo->rolname, false,
3502  "PUBLICATION", SECTION_POST_DATA,
3503  query->data, delq->data, NULL,
3504  NULL, 0,
3505  NULL, NULL);
3506 
3507  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3508  dumpComment(fout, labelq->data,
3509  NULL, pubinfo->rolname,
3510  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3511 
3512  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3513  dumpSecLabel(fout, labelq->data,
3514  NULL, pubinfo->rolname,
3515  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3516 
3517  destroyPQExpBuffer(delq);
3518  destroyPQExpBuffer(query);
3519 }
3520 
3521 /*
3522  * getPublicationTables
3523  * get information about publication membership for dumpable tables.
3524  */
3525 void
3527 {
3528  PQExpBuffer query;
3529  PGresult *res;
3530  PublicationRelInfo *pubrinfo;
3531  int i_tableoid;
3532  int i_oid;
3533  int i_pubname;
3534  int i,
3535  j,
3536  ntups;
3537 
3538  if (fout->remoteVersion < 100000)
3539  return;
3540 
3541  query = createPQExpBuffer();
3542 
3543  for (i = 0; i < numTables; i++)
3544  {
3545  TableInfo *tbinfo = &tblinfo[i];
3546 
3547  /* Only plain tables can be aded to publications. */
3548  if (tbinfo->relkind != RELKIND_RELATION)
3549  continue;
3550 
3551  /*
3552  * Ignore publication membership of tables whose definitions are
3553  * not to be dumped.
3554  */
3555  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3556  continue;
3557 
3558  if (g_verbose)
3559  write_msg(NULL, "reading publication membership for table \"%s.%s\"\n",
3560  tbinfo->dobj.namespace->dobj.name,
3561  tbinfo->dobj.name);
3562 
3563  resetPQExpBuffer(query);
3564 
3565  /* Get the publication membership for the table. */
3566  appendPQExpBuffer(query,
3567  "SELECT pr.tableoid, pr.oid, p.pubname "
3568  "FROM pg_catalog.pg_publication_rel pr,"
3569  " pg_catalog.pg_publication p "
3570  "WHERE pr.prrelid = '%u'"
3571  " AND p.oid = pr.prpubid",
3572  tbinfo->dobj.catId.oid);
3573  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3574 
3575  ntups = PQntuples(res);
3576 
3577  if (ntups == 0)
3578  {
3579  /*
3580  * Table is not member of any publications. Clean up and return.
3581  */
3582  PQclear(res);
3583  continue;
3584  }
3585 
3586  i_tableoid = PQfnumber(res, "tableoid");
3587  i_oid = PQfnumber(res, "oid");
3588  i_pubname = PQfnumber(res, "pubname");
3589 
3590  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
3591 
3592  for (j = 0; j < ntups; j++)
3593  {
3594  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
3595  pubrinfo[j].dobj.catId.tableoid =
3596  atooid(PQgetvalue(res, j, i_tableoid));
3597  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3598  AssignDumpId(&pubrinfo[j].dobj);
3599  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3600  pubrinfo[j].dobj.name = tbinfo->dobj.name;
3601  pubrinfo[j].pubname = pg_strdup(PQgetvalue(res, j, i_pubname));
3602  pubrinfo[j].pubtable = tbinfo;
3603 
3604  /* Decide whether we want to dump it */
3605  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
3606  }
3607  PQclear(res);
3608  }
3609  destroyPQExpBuffer(query);
3610 }
3611 
3612 /*
3613  * dumpPublicationTable
3614  * dump the definition of the given publication table mapping
3615  */
3616 static void
3618 {
3619  TableInfo *tbinfo = pubrinfo->pubtable;
3620  PQExpBuffer query;
3621  char *tag;
3622 
3623  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3624  return;
3625 
3626  tag = psprintf("%s %s", pubrinfo->pubname, tbinfo->dobj.name);
3627 
3628  query = createPQExpBuffer();
3629 
3630  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
3631  fmtId(pubrinfo->pubname));
3632  appendPQExpBuffer(query, " %s;",
3633  fmtId(tbinfo->dobj.name));
3634 
3635  /*
3636  * There is no point in creating drop query as drop query as the drop
3637  * is done by table drop.
3638  */
3639  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
3640  tag,
3641  tbinfo->dobj.namespace->dobj.name,
3642  NULL,
3643  "", false,
3644  "PUBLICATION TABLE", SECTION_POST_DATA,
3645  query->data, "", NULL,
3646  NULL, 0,
3647  NULL, NULL);
3648 
3649  free(tag);
3650  destroyPQExpBuffer(query);
3651 }
3652 
3653 /*
3654  * Is the currently connected user a superuser?
3655  */
3656 static bool
3658 {
3659  ArchiveHandle *AH = (ArchiveHandle *) fout;
3660  const char *val;
3661 
3662  val = PQparameterStatus(AH->connection, "is_superuser");
3663 
3664  if (val && strcmp(val, "on") == 0)
3665  return true;
3666 
3667  return false;
3668 }
3669 
3670 /*
3671  * getSubscriptions
3672  * get information about subscriptions
3673  */
3674 void
3676 {
3677  PQExpBuffer query;
3678  PGresult *res;
3679  SubscriptionInfo *subinfo;
3680  int i_tableoid;
3681  int i_oid;
3682  int i_subname;
3683  int i_rolname;
3684  int i_subconninfo;
3685  int i_subslotname;
3686  int i_subsynccommit;
3687  int i_subpublications;
3688  int i,
3689  ntups;
3690 
3691  if (fout->remoteVersion < 100000)
3692  return;
3693 
3694  if (!is_superuser(fout))
3695  {
3696  int n;
3697 
3698  res = ExecuteSqlQuery(fout,
3699  "SELECT count(*) FROM pg_subscription "
3700  "WHERE subdbid = (SELECT oid FROM pg_catalog.pg_database"
3701  " WHERE datname = current_database())",
3702  PGRES_TUPLES_OK);
3703  n = atoi(PQgetvalue(res, 0, 0));
3704  if (n > 0)
3705  write_msg(NULL, "WARNING: subscriptions not dumped because current user is not a superuser\n");
3706  PQclear(res);
3707  return;
3708  }
3709 
3710  query = createPQExpBuffer();
3711 
3712  resetPQExpBuffer(query);
3713 
3714  /* Get the subscriptions in current database. */
3715  appendPQExpBuffer(query,
3716  "SELECT s.tableoid, s.oid, s.subname,"
3717  "(%s s.subowner) AS rolname, "
3718  " s.subconninfo, s.subslotname, s.subsynccommit, "
3719  " s.subpublications "
3720  "FROM pg_catalog.pg_subscription s "
3721  "WHERE s.subdbid = (SELECT oid FROM pg_catalog.pg_database"
3722  " WHERE datname = current_database())",
3724  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3725 
3726  ntups = PQntuples(res);
3727 
3728  i_tableoid = PQfnumber(res, "tableoid");
3729  i_oid = PQfnumber(res, "oid");
3730  i_subname = PQfnumber(res, "subname");
3731  i_rolname = PQfnumber(res, "rolname");
3732  i_subconninfo = PQfnumber(res, "subconninfo");
3733  i_subslotname = PQfnumber(res, "subslotname");
3734  i_subsynccommit = PQfnumber(res, "subsynccommit");
3735  i_subpublications = PQfnumber(res, "subpublications");
3736 
3737  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
3738 
3739  for (i = 0; i < ntups; i++)
3740  {
3741  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
3742  subinfo[i].dobj.catId.tableoid =
3743  atooid(PQgetvalue(res, i, i_tableoid));
3744  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3745  AssignDumpId(&subinfo[i].dobj);
3746  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
3747  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3748  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
3749  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
3750  subinfo[i].subsynccommit =
3751  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
3752  subinfo[i].subpublications =
3753  pg_strdup(PQgetvalue(res, i, i_subpublications));
3754 
3755  if (strlen(subinfo[i].rolname) == 0)
3756  write_msg(NULL, "WARNING: owner of subscription \"%s\" appears to be invalid\n",
3757  subinfo[i].dobj.name);
3758 
3759  /* Decide whether we want to dump it */
3760  selectDumpableObject(&(subinfo[i].dobj), fout);
3761  }
3762  PQclear(res);
3763 
3764  destroyPQExpBuffer(query);
3765 }
3766 
3767 /*
3768  * dumpSubscription
3769  * dump the definition of the given subscription
3770  */
3771 static void
3773 {
3774  PQExpBuffer delq;
3775  PQExpBuffer query;
3776  PQExpBuffer labelq;
3777  PQExpBuffer publications;
3778  char **pubnames = NULL;
3779  int npubnames = 0;
3780  int i;
3781 
3782  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3783  return;
3784 
3785  delq = createPQExpBuffer();
3786  query = createPQExpBuffer();
3787  labelq = createPQExpBuffer();
3788 
3789  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
3790  fmtId(subinfo->dobj.name));
3791 
3792  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
3793  fmtId(subinfo->dobj.name));
3794  appendStringLiteralAH(query, subinfo->subconninfo, fout);
3795 
3796  /* Build list of quoted publications and append them to query. */
3797  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
3798  {
3799  write_msg(NULL,
3800  "WARNING: could not parse subpublications array\n");
3801  if (pubnames)
3802  free(pubnames);
3803  pubnames = NULL;
3804  npubnames = 0;
3805  }
3806 
3807  publications = createPQExpBuffer();
3808  for (i = 0; i < npubnames; i++)
3809  {
3810  if (i > 0)
3811  appendPQExpBufferStr(publications, ", ");
3812 
3813  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
3814  }
3815 
3816  appendPQExpBuffer(query, " PUBLICATION %s WITH (NOCONNECT, SLOT NAME = ", publications->data);
3817  appendStringLiteralAH(query, subinfo->subslotname, fout);
3818 
3819  if (strcmp(subinfo->subsynccommit, "off") != 0)
3820  appendPQExpBuffer(query, ", SYNCHRONOUS_COMMIT = %s", fmtId(subinfo->subsynccommit));
3821 
3822  appendPQExpBufferStr(query, ");\n");
3823 
3824  appendPQExpBuffer(labelq, "SUBSCRIPTION %s", fmtId(subinfo->dobj.name));
3825 
3826  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
3827  subinfo->dobj.name,
3828  NULL,
3829  NULL,
3830  subinfo->rolname, false,
3831  "SUBSCRIPTION", SECTION_POST_DATA,
3832  query->data, delq->data, NULL,
3833  NULL, 0,
3834  NULL, NULL);
3835 
3836  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3837  dumpComment(fout, labelq->data,
3838  NULL, subinfo->rolname,
3839  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
3840 
3841  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3842  dumpSecLabel(fout, labelq->data,
3843  NULL, subinfo->rolname,
3844  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
3845 
3846  destroyPQExpBuffer(publications);
3847  if (pubnames)
3848  free(pubnames);
3849 
3850  destroyPQExpBuffer(delq);
3851  destroyPQExpBuffer(query);
3852 }
3853 
3854 static void
3856  PQExpBuffer upgrade_buffer,
3857  Oid pg_type_oid)
3858 {
3859  PQExpBuffer upgrade_query = createPQExpBuffer();
3860  PGresult *upgrade_res;
3861  Oid pg_type_array_oid;
3862 
3863  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
3864  appendPQExpBuffer(upgrade_buffer,
3865  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
3866  pg_type_oid);
3867 
3868  /* we only support old >= 8.3 for binary upgrades */
3869  appendPQExpBuffer(upgrade_query,
3870  "SELECT typarray "
3871  "FROM pg_catalog.pg_type "
3872  "WHERE pg_type.oid = '%u'::pg_catalog.oid;",
3873  pg_type_oid);
3874 
3875  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
3876 
3877  pg_type_array_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "typarray")));
3878 
3879  if (OidIsValid(pg_type_array_oid))
3880  {
3881  appendPQExpBufferStr(upgrade_buffer,
3882  "\n-- For binary upgrade, must preserve pg_type array oid\n");
3883  appendPQExpBuffer(upgrade_buffer,
3884  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
3885  pg_type_array_oid);
3886  }
3887 
3888  PQclear(upgrade_res);
3889  destroyPQExpBuffer(upgrade_query);
3890 }
3891 
3892 static bool
3894  PQExpBuffer upgrade_buffer,
3895  Oid pg_rel_oid)
3896 {
3897  PQExpBuffer upgrade_query = createPQExpBuffer();
3898  PGresult *upgrade_res;
3899  Oid pg_type_oid;
3900  bool toast_set = false;
3901 
3902  /* we only support old >= 8.3 for binary upgrades */
3903  appendPQExpBuffer(upgrade_query,
3904  "SELECT c.reltype AS crel, t.reltype AS trel "
3905  "FROM pg_catalog.pg_class c "
3906  "LEFT JOIN pg_catalog.pg_class t ON "
3907  " (c.reltoastrelid = t.oid) "
3908  "WHERE c.oid = '%u'::pg_catalog.oid;",
3909  pg_rel_oid);
3910 
3911  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
3912 
3913  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
3914 
3915  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
3916  pg_type_oid);
3917 
3918  if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
3919  {
3920  /* Toast tables do not have pg_type array rows */
3921  Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
3922  PQfnumber(upgrade_res, "trel")));
3923 
3924  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
3925  appendPQExpBuffer(upgrade_buffer,
3926  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_type_oid('%u'::pg_catalog.oid);\n\n",
3927  pg_type_toast_oid);
3928 
3929  toast_set = true;
3930  }
3931 
3932  PQclear(upgrade_res);
3933  destroyPQExpBuffer(upgrade_query);
3934 
3935  return toast_set;
3936 }
3937 
3938 static void
3940  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
3941  bool is_index)
3942 {
3943  PQExpBuffer upgrade_query = createPQExpBuffer();
3944  PGresult *upgrade_res;
3945  Oid pg_class_reltoastrelid;
3946  Oid pg_index_indexrelid;
3947 
3948  appendPQExpBuffer(upgrade_query,
3949  "SELECT c.reltoastrelid, i.indexrelid "
3950  "FROM pg_catalog.pg_class c LEFT JOIN "
3951  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
3952  "WHERE c.oid = '%u'::pg_catalog.oid;",
3953  pg_class_oid);
3954 
3955  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
3956 
3957  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastrelid")));
3958  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
3959 
3960  appendPQExpBufferStr(upgrade_buffer,
3961  "\n-- For binary upgrade, must preserve pg_class oids\n");
3962 
3963  if (!is_index)
3964  {
3965  appendPQExpBuffer(upgrade_buffer,
3966  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
3967  pg_class_oid);
3968  /* only tables have toast tables, not indexes */
3969  if (OidIsValid(pg_class_reltoastrelid))
3970  {
3971  /*
3972  * One complexity is that the table definition might not require
3973  * the creation of a TOAST table, and the TOAST table might have
3974  * been created long after table creation, when the table was
3975  * loaded with wide data. By setting the TOAST oid we force
3976  * creation of the TOAST heap and TOAST index by the backend so we
3977  * can cleanly copy the files during binary upgrade.
3978  */
3979 
3980  appendPQExpBuffer(upgrade_buffer,
3981  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
3982  pg_class_reltoastrelid);
3983 
3984  /* every toast table has an index */
3985  appendPQExpBuffer(upgrade_buffer,
3986  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
3987  pg_index_indexrelid);
3988  }
3989  }
3990  else
3991  appendPQExpBuffer(upgrade_buffer,
3992  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
3993  pg_class_oid);
3994 
3995  appendPQExpBufferChar(upgrade_buffer, '\n');
3996 
3997  PQclear(upgrade_res);
3998  destroyPQExpBuffer(upgrade_query);
3999 }
4000 
4001 /*
4002  * If the DumpableObject is a member of an extension, add a suitable
4003  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4004  */
4005 static void
4007  DumpableObject *dobj,
4008  const char *objlabel)
4009 {
4010  DumpableObject *extobj = NULL;
4011  int i;
4012 
4013  if (!dobj->ext_member)
4014  return;
4015 
4016  /*
4017  * Find the parent extension. We could avoid this search if we wanted to
4018  * add a link field to DumpableObject, but the space costs of that would
4019  * be considerable. We assume that member objects could only have a
4020  * direct dependency on their own extension, not any others.
4021  */
4022  for (i = 0; i < dobj->nDeps; i++)
4023  {
4024  extobj = findObjectByDumpId(dobj->dependencies[i]);
4025  if (extobj && extobj->objType == DO_EXTENSION)
4026  break;
4027  extobj = NULL;
4028  }
4029  if (extobj == NULL)
4030  exit_horribly(NULL, "could not find parent extension for %s\n", objlabel);
4031 
4032  appendPQExpBufferStr(upgrade_buffer,
4033  "\n-- For binary upgrade, handle extension membership the hard way\n");
4034  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s;\n",
4035  fmtId(extobj->name),
4036  objlabel);
4037 }
4038 
4039 /*
4040  * getNamespaces:
4041  * read all namespaces in the system catalogs and return them in the
4042  * NamespaceInfo* structure
4043  *
4044  * numNamespaces is set to the number of namespaces read in
4045  */
4046 NamespaceInfo *
4048 {
4049  DumpOptions *dopt = fout->dopt;
4050  PGresult *res;
4051  int ntups;
4052  int i;
4053  PQExpBuffer query;
4054  NamespaceInfo *nsinfo;
4055  int i_tableoid;
4056  int i_oid;
4057  int i_nspname;
4058  int i_rolname;
4059  int i_nspacl;
4060  int i_rnspacl;
4061  int i_initnspacl;
4062  int i_initrnspacl;
4063 
4064  query = createPQExpBuffer();
4065 
4066  /* Make sure we are in proper schema */
4067  selectSourceSchema(fout, "pg_catalog");
4068 
4069  /*
4070  * we fetch all namespaces including system ones, so that every object we
4071  * read in can be linked to a containing namespace.
4072  */
4073  if (fout->remoteVersion >= 90600)
4074  {
4075  PQExpBuffer acl_subquery = createPQExpBuffer();
4076  PQExpBuffer racl_subquery = createPQExpBuffer();
4077  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4078  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4079 
4080  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4081  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4082  dopt->binary_upgrade);
4083 
4084  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4085  "(%s nspowner) AS rolname, "
4086  "%s as nspacl, "
4087  "%s as rnspacl, "
4088  "%s as initnspacl, "
4089  "%s as initrnspacl "
4090  "FROM pg_namespace n "
4091  "LEFT JOIN pg_init_privs pip "
4092  "ON (n.oid = pip.objoid "
4093  "AND pip.classoid = 'pg_namespace'::regclass "
4094  "AND pip.objsubid = 0",
4096  acl_subquery->data,
4097  racl_subquery->data,
4098  init_acl_subquery->data,
4099  init_racl_subquery->data);
4100 
4101  /*
4102  * When we are doing a 'clean' run, we will be dropping and recreating
4103  * the 'public' schema (the only object which has that kind of
4104  * treatment in the backend and which has an entry in pg_init_privs)
4105  * and therefore we should not consider any initial privileges in
4106  * pg_init_privs in that case.
4107  *
4108  * See pg_backup_archiver.c:_printTocEntry() for the details on why
4109  * the public schema is special in this regard.
4110  *
4111  * Note that if the public schema is dropped and re-created, this is
4112  * essentially a no-op because the new public schema won't have an
4113  * entry in pg_init_privs anyway, as the entry will be removed when
4114  * the public schema is dropped.
4115  */
4116  if (dopt->outputClean)
4117  appendPQExpBuffer(query," AND pip.objoid <> 'public'::regnamespace");
4118 
4119  appendPQExpBuffer(query,") ");
4120 
4121  destroyPQExpBuffer(acl_subquery);
4122  destroyPQExpBuffer(racl_subquery);
4123  destroyPQExpBuffer(init_acl_subquery);
4124  destroyPQExpBuffer(init_racl_subquery);
4125  }
4126  else
4127  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4128  "(%s nspowner) AS rolname, "
4129  "nspacl, NULL as rnspacl, "
4130  "NULL AS initnspacl, NULL as initrnspacl "
4131  "FROM pg_namespace",
4133 
4134  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4135 
4136  ntups = PQntuples(res);
4137 
4138  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4139 
4140  i_tableoid = PQfnumber(res, "tableoid");
4141  i_oid = PQfnumber(res, "oid");
4142  i_nspname = PQfnumber(res, "nspname");
4143  i_rolname = PQfnumber(res, "rolname");
4144  i_nspacl = PQfnumber(res, "nspacl");
4145  i_rnspacl = PQfnumber(res, "rnspacl");
4146  i_initnspacl = PQfnumber(res, "initnspacl");
4147  i_initrnspacl = PQfnumber(res, "initrnspacl");
4148 
4149  for (i = 0; i < ntups; i++)
4150  {
4151  nsinfo[i].dobj.objType = DO_NAMESPACE;
4152  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4153  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4154  AssignDumpId(&nsinfo[i].dobj);
4155  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4156  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4157  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4158  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4159  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4160  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4161 
4162  /* Decide whether to dump this namespace */
4163  selectDumpableNamespace(&nsinfo[i], fout);
4164 
4165  /*
4166  * Do not try to dump ACL if the ACL is empty or the default.
4167  *
4168  * This is useful because, for some schemas/objects, the only
4169  * component we are going to try and dump is the ACL and if we can
4170  * remove that then 'dump' goes to zero/false and we don't consider
4171  * this object for dumping at all later on.
4172  */
4173  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4174  PQgetisnull(res, i, i_initnspacl) &&
4175  PQgetisnull(res, i, i_initrnspacl))
4176  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4177 
4178  if (strlen(nsinfo[i].rolname) == 0)
4179  write_msg(NULL, "WARNING: owner of schema \"%s\" appears to be invalid\n",
4180  nsinfo[i].dobj.name);
4181  }
4182 
4183  PQclear(res);
4184  destroyPQExpBuffer(query);
4185 
4186  *numNamespaces = ntups;
4187 
4188  return nsinfo;
4189 }
4190 
4191 /*
4192  * findNamespace:
4193  * given a namespace OID, look up the info read by getNamespaces
4194  */
4195 static NamespaceInfo *
4197 {
4198  NamespaceInfo *nsinfo;
4199 
4200  nsinfo = findNamespaceByOid(nsoid);
4201  if (nsinfo == NULL)
4202  exit_horribly(NULL, "schema with OID %u does not exist\n", nsoid);
4203  return nsinfo;
4204 }
4205 
4206 /*
4207  * getExtensions:
4208  * read all extensions in the system catalogs and return them in the
4209  * ExtensionInfo* structure
4210  *
4211  * numExtensions is set to the number of extensions read in
4212  */
4213 ExtensionInfo *
4215 {
4216  DumpOptions *dopt = fout->dopt;
4217  PGresult *res;
4218  int ntups;
4219  int i;
4220  PQExpBuffer query;
4221  ExtensionInfo *extinfo;
4222  int i_tableoid;
4223  int i_oid;
4224  int i_extname;
4225  int i_nspname;
4226  int i_extrelocatable;
4227  int i_extversion;
4228  int i_extconfig;
4229  int i_extcondition;
4230 
4231  /*
4232  * Before 9.1, there are no extensions.
4233  */
4234  if (fout->remoteVersion < 90100)
4235  {
4236  *numExtensions = 0;
4237  return NULL;
4238  }
4239 
4240  query = createPQExpBuffer();
4241 
4242  /* Make sure we are in proper schema */
4243  selectSourceSchema(fout, "pg_catalog");
4244 
4245  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4246  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4247  "FROM pg_extension x "
4248  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4249 
4250  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4251 
4252  ntups = PQntuples(res);
4253 
4254  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4255 
4256  i_tableoid = PQfnumber(res, "tableoid");
4257  i_oid = PQfnumber(res, "oid");
4258  i_extname = PQfnumber(res, "extname");
4259  i_nspname = PQfnumber(res, "nspname");
4260  i_extrelocatable = PQfnumber(res, "extrelocatable");
4261  i_extversion = PQfnumber(res, "extversion");
4262  i_extconfig = PQfnumber(res, "extconfig");
4263  i_extcondition = PQfnumber(res, "extcondition");
4264 
4265  for (i = 0; i < ntups; i++)
4266  {
4267  extinfo[i].dobj.objType = DO_EXTENSION;
4268  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4269  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4270  AssignDumpId(&extinfo[i].dobj);
4271  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4272  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4273  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4274  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4275  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4276  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4277 
4278  /* Decide whether we want to dump it */
4279  selectDumpableExtension(&(extinfo[i]), dopt);
4280  }
4281 
4282  PQclear(res);
4283  destroyPQExpBuffer(query);
4284 
4285  *numExtensions = ntups;
4286 
4287  return extinfo;
4288 }
4289 
4290 /*
4291  * getTypes:
4292  * read all types in the system catalogs and return them in the
4293  * TypeInfo* structure
4294  *
4295  * numTypes is set to the number of types read in
4296  *
4297  * NB: this must run after getFuncs() because we assume we can do
4298  * findFuncByOid().
4299  */
4300 TypeInfo *
4302 {
4303  DumpOptions *dopt = fout->dopt;
4304  PGresult *res;
4305  int ntups;
4306  int i;
4307  PQExpBuffer query = createPQExpBuffer();
4308  TypeInfo *tyinfo;
4309  ShellTypeInfo *stinfo;
4310  int i_tableoid;
4311  int i_oid;
4312  int i_typname;
4313  int i_typnamespace;
4314  int i_typacl;
4315  int i_rtypacl;
4316  int i_inittypacl;
4317  int i_initrtypacl;
4318  int i_rolname;
4319  int i_typelem;
4320  int i_typrelid;
4321  int i_typrelkind;
4322  int i_typtype;
4323  int i_typisdefined;
4324  int i_isarray;
4325 
4326  /*
4327  * we include even the built-in types because those may be used as array
4328  * elements by user-defined types
4329  *
4330  * we filter out the built-in types when we dump out the types
4331  *
4332  * same approach for undefined (shell) types and array types
4333  *
4334  * Note: as of 8.3 we can reliably detect whether a type is an
4335  * auto-generated array type by checking the element type's typarray.
4336  * (Before that the test is capable of generating false positives.) We
4337  * still check for name beginning with '_', though, so as to avoid the
4338  * cost of the subselect probe for all standard types. This would have to
4339  * be revisited if the backend ever allows renaming of array types.
4340  */
4341 
4342  /* Make sure we are in proper schema */
4343  selectSourceSchema(fout, "pg_catalog");
4344 
4345  if (fout->remoteVersion >= 90600)
4346  {
4347  PQExpBuffer acl_subquery = createPQExpBuffer();
4348  PQExpBuffer racl_subquery = createPQExpBuffer();
4349  PQExpBuffer initacl_subquery = createPQExpBuffer();
4350  PQExpBuffer initracl_subquery = createPQExpBuffer();
4351 
4352  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
4353  initracl_subquery, "t.typacl", "t.typowner", "'T'",
4354  dopt->binary_upgrade);
4355 
4356  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
4357  "t.typnamespace, "
4358  "%s AS typacl, "
4359  "%s AS rtypacl, "
4360  "%s AS inittypacl, "
4361  "%s AS initrtypacl, "
4362  "(%s t.typowner) AS rolname, "
4363  "t.typelem, t.typrelid, "
4364  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
4365  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
4366  "t.typtype, t.typisdefined, "
4367  "t.typname[0] = '_' AND t.typelem != 0 AND "
4368  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
4369  "FROM pg_type t "
4370  "LEFT JOIN pg_init_privs pip ON "
4371  "(t.oid = pip.objoid "
4372  "AND pip.classoid = 'pg_type'::regclass "
4373  "AND pip.objsubid = 0) ",
4374  acl_subquery->data,
4375  racl_subquery->data,
4376  initacl_subquery->data,
4377  initracl_subquery->data,
4379 
4380  destroyPQExpBuffer(acl_subquery);
4381  destroyPQExpBuffer(racl_subquery);
4382  destroyPQExpBuffer(initacl_subquery);
4383  destroyPQExpBuffer(initracl_subquery);
4384  }
4385  else if (fout->remoteVersion >= 90200)
4386  {
4387  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4388  "typnamespace, typacl, NULL as rtypacl, "
4389  "NULL AS inittypacl, NULL AS initrtypacl, "
4390  "(%s typowner) AS rolname, "
4391  "typelem, typrelid, "
4392  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4393  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4394  "typtype, typisdefined, "
4395  "typname[0] = '_' AND typelem != 0 AND "
4396  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4397  "FROM pg_type",
4399  }
4400  else if (fout->remoteVersion >= 80300)
4401  {
4402  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4403  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4404  "NULL AS inittypacl, NULL AS initrtypacl, "
4405  "(%s typowner) AS rolname, "
4406  "typelem, typrelid, "
4407  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4408  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4409  "typtype, typisdefined, "
4410  "typname[0] = '_' AND typelem != 0 AND "
4411  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4412  "FROM pg_type",
4414  }
4415  else
4416  {
4417  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4418  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4419  "NULL AS inittypacl, NULL AS initrtypacl, "
4420  "(%s typowner) AS rolname, "
4421  "typelem, typrelid, "
4422  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4423  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4424  "typtype, typisdefined, "
4425  "typname[0] = '_' AND typelem != 0 AS isarray "
4426  "FROM pg_type",
4428  }
4429 
4430  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4431 
4432  ntups = PQntuples(res);
4433 
4434  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
4435 
4436  i_tableoid = PQfnumber(res, "tableoid");
4437  i_oid = PQfnumber(res, "oid");
4438  i_typname = PQfnumber(res, "typname");
4439  i_typnamespace = PQfnumber(res, "typnamespace");
4440  i_typacl = PQfnumber(res, "typacl");
4441  i_rtypacl = PQfnumber(res, "rtypacl");
4442  i_inittypacl = PQfnumber(res, "inittypacl");
4443  i_initrtypacl = PQfnumber(res, "initrtypacl");
4444  i_rolname = PQfnumber(res, "rolname");
4445  i_typelem = PQfnumber(res, "typelem");
4446  i_typrelid = PQfnumber(res, "typrelid");
4447  i_typrelkind = PQfnumber(res, "typrelkind");
4448  i_typtype = PQfnumber(res, "typtype");
4449  i_typisdefined = PQfnumber(res, "typisdefined");
4450  i_isarray = PQfnumber(res, "isarray");
4451 
4452  for (i = 0; i < ntups; i++)
4453  {
4454  tyinfo[i].dobj.objType = DO_TYPE;
4455  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4456  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4457  AssignDumpId(&tyinfo[i].dobj);
4458  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
4459  tyinfo[i].dobj.namespace =
4460  findNamespace(fout,
4461  atooid(PQgetvalue(res, i, i_typnamespace)));
4462  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4463  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
4464  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
4465  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
4466  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
4467  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
4468  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
4469  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
4470  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
4471  tyinfo[i].shellType = NULL;
4472 
4473  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
4474  tyinfo[i].isDefined = true;
4475  else
4476  tyinfo[i].isDefined = false;
4477 
4478  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
4479  tyinfo[i].isArray = true;
4480  else
4481  tyinfo[i].isArray = false;
4482 
4483  /* Decide whether we want to dump it */
4484  selectDumpableType(&tyinfo[i], fout);
4485 
4486  /* Do not try to dump ACL if no ACL exists. */
4487  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
4488  PQgetisnull(res, i, i_inittypacl) &&
4489  PQgetisnull(res, i, i_initrtypacl))
4490  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4491 
4492  /*
4493  * If it's a domain, fetch info about its constraints, if any
4494  */
4495  tyinfo[i].nDomChecks = 0;
4496  tyinfo[i].domChecks = NULL;
4497  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4498  tyinfo[i].typtype == TYPTYPE_DOMAIN)
4499  getDomainConstraints(fout, &(tyinfo[i]));
4500 
4501  /*
4502  * If it's a base type, make a DumpableObject representing a shell
4503  * definition of the type. We will need to dump that ahead of the I/O
4504  * functions for the type. Similarly, range types need a shell
4505  * definition in case they have a canonicalize function.
4506  *
4507  * Note: the shell type doesn't have a catId. You might think it
4508  * should copy the base type's catId, but then it might capture the
4509  * pg_depend entries for the type, which we don't want.
4510  */
4511  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4512  (tyinfo[i].typtype == TYPTYPE_BASE ||
4513  tyinfo[i].typtype == TYPTYPE_RANGE))
4514  {
4515  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
4516  stinfo->dobj.objType = DO_SHELL_TYPE;
4517  stinfo->dobj.catId = nilCatalogId;
4518  AssignDumpId(&stinfo->dobj);
4519  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
4520  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
4521  stinfo->baseType = &(tyinfo[i]);
4522  tyinfo[i].shellType = stinfo;
4523 
4524  /*
4525  * Initially mark the shell type as not to be dumped. We'll only
4526  * dump it if the I/O or canonicalize functions need to be dumped;
4527  * this is taken care of while sorting dependencies.
4528  */
4529  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
4530  }
4531 
4532  if (strlen(tyinfo[i].rolname) == 0)
4533  write_msg(NULL, "WARNING: owner of data type \"%s\" appears to be invalid\n",
4534  tyinfo[i].dobj.name);
4535  }
4536 
4537  *numTypes = ntups;
4538 
4539  PQclear(res);
4540 
4541  destroyPQExpBuffer(query);
4542 
4543  return tyinfo;
4544 }
4545 
4546 /*
4547  * getOperators:
4548  * read all operators in the system catalogs and return them in the
4549  * OprInfo* structure
4550  *
4551  * numOprs is set to the number of operators read in
4552  */
4553 OprInfo *
4554 getOperators(Archive *fout, int *numOprs)
4555 {
4556  PGresult *res;
4557  int ntups;
4558  int i;
4559  PQExpBuffer query = createPQExpBuffer();
4560  OprInfo *oprinfo;
4561  int i_tableoid;
4562  int i_oid;
4563  int i_oprname;
4564  int i_oprnamespace;
4565  int i_rolname;
4566  int i_oprkind;
4567  int i_oprcode;
4568 
4569  /*
4570  * find all operators, including builtin operators; we filter out
4571  * system-defined operators at dump-out time.
4572  */
4573 
4574  /* Make sure we are in proper schema */
4575  selectSourceSchema(fout, "pg_catalog");
4576 
4577  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
4578  "oprnamespace, "
4579  "(%s oprowner) AS rolname, "
4580  "oprkind, "
4581  "oprcode::oid AS oprcode "
4582  "FROM pg_operator",
4584 
4585  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4586 
4587  ntups = PQntuples(res);
4588  *numOprs = ntups;
4589 
4590  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
4591 
4592  i_tableoid = PQfnumber(res, "tableoid");
4593  i_oid = PQfnumber(res, "oid");
4594  i_oprname = PQfnumber(res, "oprname");
4595  i_oprnamespace = PQfnumber(res, "oprnamespace");
4596  i_rolname = PQfnumber(res, "rolname");
4597  i_oprkind = PQfnumber(res, "oprkind");
4598  i_oprcode = PQfnumber(res, "oprcode");
4599 
4600  for (i = 0; i < ntups; i++)
4601  {
4602  oprinfo[i].dobj.objType = DO_OPERATOR;
4603  oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4604  oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4605  AssignDumpId(&oprinfo[i].dobj);
4606  oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
4607  oprinfo[i].dobj.namespace =
4608  findNamespace(fout,
4609  atooid(PQgetvalue(res, i, i_oprnamespace)));
4610  oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4611  oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
4612  oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
4613 
4614  /* Decide whether we want to dump it */
4615  selectDumpableObject(&(oprinfo[i].dobj), fout);
4616 
4617  /* Operators do not currently have ACLs. */
4618  oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4619 
4620  if (strlen(oprinfo[i].rolname) == 0)
4621  write_msg(NULL, "WARNING: owner of operator \"%s\" appears to be invalid\n",
4622  oprinfo[i].dobj.name);
4623  }
4624 
4625  PQclear(res);
4626 
4627  destroyPQExpBuffer(query);
4628 
4629  return oprinfo;
4630 }
4631 
4632 /*
4633  * getCollations:
4634  * read all collations in the system catalogs and return them in the
4635  * CollInfo* structure
4636  *
4637  * numCollations is set to the number of collations read in
4638  */
4639 CollInfo *
4641 {
4642  PGresult *res;
4643  int ntups;
4644  int i;
4645  PQExpBuffer query;
4646  CollInfo *collinfo;
4647  int i_tableoid;
4648  int i_oid;
4649  int i_collname;
4650  int i_collnamespace;
4651  int i_rolname;
4652 
4653  /* Collations didn't exist pre-9.1 */
4654  if (fout->remoteVersion < 90100)
4655  {
4656  *numCollations = 0;
4657  return NULL;
4658  }
4659 
4660  query = createPQExpBuffer();
4661 
4662  /*
4663  * find all collations, including builtin collations; we filter out
4664  * system-defined collations at dump-out time.
4665  */
4666 
4667  /* Make sure we are in proper schema */
4668  selectSourceSchema(fout, "pg_catalog");
4669 
4670  appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
4671  "collnamespace, "
4672  "(%s collowner) AS rolname "
4673  "FROM pg_collation",
4675 
4676  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4677 
4678  ntups = PQntuples(res);
4679  *numCollations = ntups;
4680 
4681  collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
4682 
4683  i_tableoid = PQfnumber(res, "tableoid");
4684  i_oid = PQfnumber(res, "oid");
4685  i_collname = PQfnumber(res, "collname");
4686  i_collnamespace = PQfnumber(res, "collnamespace");
4687  i_rolname = PQfnumber(res, "rolname");
4688 
4689  for (i = 0; i < ntups; i++)
4690  {
4691  collinfo[i].dobj.objType = DO_COLLATION;
4692  collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4693  collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4694  AssignDumpId(&collinfo[i].dobj);
4695  collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
4696  collinfo[i].dobj.namespace =
4697  findNamespace(fout,
4698  atooid(PQgetvalue(res, i, i_collnamespace)));
4699  collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4700 
4701  /* Decide whether we want to dump it */
4702  selectDumpableObject(&(collinfo[i].dobj), fout);
4703 
4704  /* Collations do not currently have ACLs. */
4705  collinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4706  }
4707 
4708  PQclear(res);
4709 
4710  destroyPQExpBuffer(query);
4711 
4712  return collinfo;
4713 }
4714 
4715 /*
4716  * getConversions:
4717  * read all conversions in the system catalogs and return them in the
4718  * ConvInfo* structure
4719  *
4720  * numConversions is set to the number of conversions read in
4721  */
4722 ConvInfo *
4723 getConversions(Archive *fout, int *numConversions)
4724 {
4725  PGresult *res;
4726  int ntups;
4727  int i;
4728  PQExpBuffer query;
4729  ConvInfo *convinfo;
4730  int i_tableoid;
4731  int i_oid;
4732  int i_conname;
4733  int i_connamespace;
4734  int i_rolname;
4735 
4736  query = createPQExpBuffer();
4737 
4738  /*
4739  * find all conversions, including builtin conversions; we filter out
4740  * system-defined conversions at dump-out time.
4741  */
4742 
4743  /* Make sure we are in proper schema */
4744  selectSourceSchema(fout, "pg_catalog");
4745 
4746  appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
4747  "connamespace, "
4748  "(%s conowner) AS rolname "
4749  "FROM pg_conversion",
4751 
4752  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4753 
4754  ntups = PQntuples(res);
4755  *numConversions = ntups;
4756 
4757  convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
4758 
4759  i_tableoid = PQfnumber(res, "tableoid");
4760  i_oid = PQfnumber(res, "oid");
4761  i_conname = PQfnumber(res, "conname");
4762  i_connamespace = PQfnumber(res, "connamespace");
4763  i_rolname = PQfnumber(res, "rolname");
4764 
4765  for (i = 0; i < ntups; i++)
4766  {
4767  convinfo[i].dobj.objType = DO_CONVERSION;
4768  convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4769  convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4770  AssignDumpId(&convinfo[i].dobj);
4771  convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
4772  convinfo[i].dobj.namespace =
4773  findNamespace(fout,
4774  atooid(PQgetvalue(res, i, i_connamespace)));
4775  convinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4776 
4777  /* Decide whether we want to dump it */
4778  selectDumpableObject(&(convinfo[i].dobj), fout);
4779 
4780  /* Conversions do not currently have ACLs. */
4781  convinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4782  }
4783 
4784  PQclear(res);
4785 
4786  destroyPQExpBuffer(query);
4787 
4788  return convinfo;
4789 }
4790 
4791 /*
4792  * getAccessMethods:
4793  * read all user-defined access methods in the system catalogs and return
4794  * them in the AccessMethodInfo* structure
4795  *
4796  * numAccessMethods is set to the number of access methods read in
4797  */
4799 getAccessMethods(Archive *fout, int *numAccessMethods)
4800 {
4801  PGresult *res;
4802  int ntups;
4803  int i;
4804  PQExpBuffer query;
4805  AccessMethodInfo *aminfo;
4806  int i_tableoid;
4807  int i_oid;
4808  int i_amname;
4809  int i_amhandler;
4810  int i_amtype;
4811 
4812  /* Before 9.6, there are no user-defined access methods */
4813  if (fout->remoteVersion < 90600)
4814  {
4815  *numAccessMethods = 0;
4816  return NULL;
4817  }
4818 
4819  query = createPQExpBuffer();
4820 
4821  /* Make sure we are in proper schema */
4822  selectSourceSchema(fout, "pg_catalog");
4823 
4824  /* Select all access methods from pg_am table */
4825  appendPQExpBuffer(query, "SELECT tableoid, oid, amname, amtype, "
4826  "amhandler::pg_catalog.regproc AS amhandler "
4827  "FROM pg_am");
4828 
4829  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4830 
4831  ntups = PQntuples(res);
4832  *numAccessMethods = ntups;
4833 
4834  aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
4835 
4836  i_tableoid = PQfnumber(res, "tableoid");
4837  i_oid = PQfnumber(res, "oid");
4838  i_amname = PQfnumber(res, "amname");
4839  i_amhandler = PQfnumber(res, "amhandler");
4840  i_amtype = PQfnumber(res, "amtype");
4841 
4842  for (i = 0; i < ntups; i++)
4843  {
4844  aminfo[i].dobj.objType = DO_ACCESS_METHOD;
4845  aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4846  aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4847  AssignDumpId(&aminfo[i].dobj);
4848  aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
4849  aminfo[i].dobj.namespace = NULL;
4850  aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
4851  aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
4852 
4853  /* Decide whether we want to dump it */
4854  selectDumpableAccessMethod(&(aminfo[i]), fout);
4855 
4856  /* Access methods do not currently have ACLs. */
4857  aminfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4858  }
4859 
4860  PQclear(res);
4861 
4862  destroyPQExpBuffer(query);
4863 
4864  return aminfo;
4865 }
4866 
4867 
4868 /*
4869  * getOpclasses:
4870  * read all opclasses in the system catalogs and return them in the
4871  * OpclassInfo* structure
4872  *
4873  * numOpclasses is set to the number of opclasses read in
4874  */
4875 OpclassInfo *
4876 getOpclasses(Archive *fout, int *numOpclasses)
4877 {
4878  PGresult *res;
4879  int ntups;
4880  int i;
4881  PQExpBuffer query = createPQExpBuffer();
4882  OpclassInfo *opcinfo;
4883  int i_tableoid;
4884  int i_oid;
4885  int i_opcname;
4886  int i_opcnamespace;
4887  int i_rolname;
4888 
4889  /*
4890  * find all opclasses, including builtin opclasses; we filter out
4891  * system-defined opclasses at dump-out time.
4892  */
4893 
4894  /* Make sure we are in proper schema */
4895  selectSourceSchema(fout, "pg_catalog");
4896 
4897  appendPQExpBuffer(query, "SELECT tableoid, oid, opcname, "
4898  "opcnamespace, "
4899  "(%s opcowner) AS rolname "
4900  "FROM pg_opclass",
4902 
4903  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4904 
4905  ntups = PQntuples(res);
4906  *numOpclasses = ntups;
4907 
4908  opcinfo = (OpclassInfo *) pg_malloc(ntups * sizeof(OpclassInfo));
4909 
4910  i_tableoid = PQfnumber(res, "tableoid");
4911  i_oid = PQfnumber(res, "oid");
4912  i_opcname = PQfnumber(res, "opcname");
4913  i_opcnamespace = PQfnumber(res, "opcnamespace");
4914  i_rolname = PQfnumber(res, "rolname");
4915 
4916  for (i = 0; i < ntups; i++)
4917  {
4918  opcinfo[i].dobj.objType = DO_OPCLASS;
4919  opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4920  opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4921  AssignDumpId(&opcinfo[i].dobj);
4922  opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
4923  opcinfo[i].dobj.namespace =
4924  findNamespace(fout,
4925  atooid(PQgetvalue(res, i, i_opcnamespace)));
4926  opcinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4927 
4928  /* Decide whether we want to dump it */
4929  selectDumpableObject(&(opcinfo[i].dobj), fout);
4930 
4931  /* Op Classes do not currently have ACLs. */
4932  opcinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4933 
4934  if (strlen(opcinfo[i].rolname) == 0)
4935  write_msg(NULL, "WARNING: owner of operator class \"%s\" appears to be invalid\n",
4936  opcinfo[i].dobj.name);
4937  }
4938 
4939  PQclear(res);
4940 
4941  destroyPQExpBuffer(query);
4942 
4943  return opcinfo;
4944 }
4945 
4946 /*
4947  * getOpfamilies:
4948  * read all opfamilies in the system catalogs and return them in the
4949  * OpfamilyInfo* structure
4950  *
4951  * numOpfamilies is set to the number of opfamilies read in
4952  */
4953 OpfamilyInfo *
4954 getOpfamilies(Archive *fout, int *numOpfamilies)
4955 {
4956  PGresult *res;
4957  int ntups;
4958  int i;
4959  PQExpBuffer query;
4960  OpfamilyInfo *opfinfo;
4961  int i_tableoid;
4962  int i_oid;
4963  int i_opfname;
4964  int i_opfnamespace;
4965  int i_rolname;
4966 
4967  /* Before 8.3, there is no separate concept of opfamilies */
4968  if (fout->remoteVersion < 80300)
4969  {
4970  *numOpfamilies = 0;
4971  return NULL;
4972  }
4973 
4974  query = createPQExpBuffer();
4975 
4976  /*
4977  * find all opfamilies, including builtin opfamilies; we filter out
4978  * system-defined opfamilies at dump-out time.
4979  */
4980 
4981  /* Make sure we are in proper schema */
4982  selectSourceSchema(fout, "pg_catalog");
4983 
4984  appendPQExpBuffer(query, "SELECT tableoid, oid, opfname, "
4985  "opfnamespace, "
4986  "(%s opfowner) AS rolname "
4987  "FROM pg_opfamily",
4989 
4990  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4991 
4992  ntups = PQntuples(res);
4993  *numOpfamilies = ntups;
4994 
4995  opfinfo = (OpfamilyInfo *) pg_malloc(ntups * sizeof(OpfamilyInfo));
4996 
4997  i_tableoid = PQfnumber(res, "tableoid");
4998  i_oid = PQfnumber(res, "oid");
4999  i_opfname = PQfnumber(res, "opfname");
5000  i_opfnamespace = PQfnumber(res, "opfnamespace");
5001  i_rolname = PQfnumber(res, "rolname");
5002 
5003  for (i = 0; i < ntups; i++)
5004  {
5005  opfinfo[i].dobj.objType = DO_OPFAMILY;
5006  opfinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5007  opfinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5008  AssignDumpId(&opfinfo[i].dobj);
5009  opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
5010  opfinfo[i].dobj.namespace =
5011  findNamespace(fout,
5012  atooid(PQgetvalue(res, i, i_opfnamespace)));
5013  opfinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5014 
5015  /* Decide whether we want to dump it */
5016  selectDumpableObject(&(opfinfo[i].dobj), fout);
5017 
5018  /* Extensions do not currently have ACLs. */
5019  opfinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5020 
5021  if (strlen(opfinfo[i].rolname) == 0)
5022  write_msg(NULL, "WARNING: owner of operator family \"%s\" appears to be invalid\n",
5023  opfinfo[i].dobj.name);
5024  }
5025 
5026  PQclear(res);
5027 
5028  destroyPQExpBuffer(query);
5029 
5030  return opfinfo;
5031 }
5032 
5033 /*
5034  * getAggregates:
5035  * read all the user-defined aggregates in the system catalogs and
5036  * return them in the AggInfo* structure
5037  *
5038  * numAggs is set to the number of aggregates read in
5039  */
5040 AggInfo *
5041 getAggregates(Archive *fout, int *numAggs)
5042 {
5043  DumpOptions *dopt = fout->dopt;
5044  PGresult *res;
5045  int ntups;
5046  int i;
5047  PQExpBuffer query = createPQExpBuffer();
5048  AggInfo *agginfo;
5049  int i_tableoid;
5050  int i_oid;
5051  int i_aggname;
5052  int i_aggnamespace;
5053  int i_pronargs;
5054  int i_proargtypes;
5055  int i_rolname;
5056  int i_aggacl;
5057  int i_raggacl;
5058  int i_initaggacl;
5059  int i_initraggacl;
5060 
5061  /* Make sure we are in proper schema */
5062  selectSourceSchema(fout, "pg_catalog");
5063 
5064  /*
5065  * Find all interesting aggregates. See comment in getFuncs() for the
5066  * rationale behind the filtering logic.
5067  */
5068  if (fout->remoteVersion >= 90600)
5069  {
5070  PQExpBuffer acl_subquery = createPQExpBuffer();
5071  PQExpBuffer racl_subquery = createPQExpBuffer();
5072  PQExpBuffer initacl_subquery = createPQExpBuffer();
5073  PQExpBuffer initracl_subquery = createPQExpBuffer();
5074 
5075  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
5076  initracl_subquery, "p.proacl", "p.proowner", "'f'",
5077  dopt->binary_upgrade);
5078 
5079  appendPQExpBuffer(query, "SELECT p.tableoid, p.oid, "
5080  "p.proname AS aggname, "
5081  "p.pronamespace AS aggnamespace, "
5082  "p.pronargs, p.proargtypes, "
5083  "(%s p.proowner) AS rolname, "
5084  "%s AS aggacl, "
5085  "%s AS raggacl, "
5086  "%s AS initaggacl, "
5087  "%s AS initraggacl "
5088  "FROM pg_proc p "
5089  "LEFT JOIN pg_init_privs pip ON "
5090  "(p.oid = pip.objoid "
5091  "AND pip.classoid = 'pg_proc'::regclass "
5092  "AND pip.objsubid = 0) "
5093  "WHERE p.proisagg AND ("
5094  "p.pronamespace != "
5095  "(SELECT oid FROM pg_namespace "
5096  "WHERE nspname = 'pg_catalog') OR "
5097  "p.proacl IS DISTINCT FROM pip.initprivs",
5099  acl_subquery->data,
5100  racl_subquery->data,
5101  initacl_subquery->data,
5102  initracl_subquery->data);
5103  if (dopt->binary_upgrade)
5104  appendPQExpBufferStr(query,
5105  " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
5106  "classid = 'pg_proc'::regclass AND "
5107  "objid = p.oid AND "
5108  "refclassid = 'pg_extension'::regclass AND "
5109  "deptype = 'e')");
5110  appendPQExpBufferChar(query, ')');
5111 
5112  destroyPQExpBuffer(acl_subquery);
5113  destroyPQExpBuffer(racl_subquery);
5114  destroyPQExpBuffer(initacl_subquery);
5115  destroyPQExpBuffer(initracl_subquery);
5116  }
5117  else if (fout->remoteVersion >= 80200)
5118  {
5119  appendPQExpBuffer(query, "SELECT tableoid, oid, proname AS aggname, "
5120  "pronamespace AS aggnamespace, "
5121  "pronargs, proargtypes, "
5122  "(%s proowner) AS rolname, "
5123  "proacl AS aggacl, "
5124  "NULL AS raggacl, "
5125  "NULL AS initaggacl, NULL AS initraggacl "
5126  "FROM pg_proc p "
5127  "WHERE proisagg AND ("
5128  "pronamespace != "
5129  "(SELECT oid FROM pg_namespace "
5130  "WHERE nspname = 'pg_catalog')",
5132  if (dopt->binary_upgrade && fout->remoteVersion >= 90100)
5133  appendPQExpBufferStr(query,
5134  " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
5135  "classid = 'pg_proc'::regclass AND "
5136  "objid = p.oid AND "
5137  "refclassid = 'pg_extension'::regclass AND "
5138  "deptype = 'e')");
5139  appendPQExpBufferChar(query, ')');
5140  }
5141  else
5142  {
5143  appendPQExpBuffer(query, "SELECT tableoid, oid, proname AS aggname, "
5144  "pronamespace AS aggnamespace, "
5145  "CASE WHEN proargtypes[0] = 'pg_catalog.\"any\"'::pg_catalog.regtype THEN 0 ELSE 1 END AS pronargs, "
5146  "proargtypes, "