PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
pg_dump.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pg_dump.c
4  * pg_dump is a utility for dumping out a postgres database
5  * into a script file.
6  *
7  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * pg_dump will read the system catalogs in a database and dump out a
11  * script that reproduces the schema in terms of SQL that is understood
12  * by PostgreSQL
13  *
14  * Note that pg_dump runs in a transaction-snapshot mode transaction,
15  * so it sees a consistent snapshot of the database including system
16  * catalogs. However, it relies in part on various specialized backend
17  * functions like pg_get_indexdef(), and those things tend to look at
18  * the currently committed state. So it is possible to get 'cache
19  * lookup failed' error if someone performs DDL changes while a dump is
20  * happening. The window for this sort of thing is from the acquisition
21  * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22  * AccessShareLock on every table it intends to dump). It isn't very large,
23  * but it can happen.
24  *
25  * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26  *
27  * IDENTIFICATION
28  * src/bin/pg_dump/pg_dump.c
29  *
30  *-------------------------------------------------------------------------
31  */
32 #include "postgres_fe.h"
33 
34 #include <unistd.h>
35 #include <ctype.h>
36 #ifdef HAVE_TERMIOS_H
37 #include <termios.h>
38 #endif
39 
40 #include "getopt_long.h"
41 
42 #include "access/attnum.h"
43 #include "access/sysattr.h"
44 #include "access/transam.h"
45 #include "catalog/pg_am.h"
46 #include "catalog/pg_attribute.h"
47 #include "catalog/pg_cast.h"
48 #include "catalog/pg_class.h"
49 #include "catalog/pg_default_acl.h"
50 #include "catalog/pg_largeobject.h"
52 #include "catalog/pg_proc.h"
53 #include "catalog/pg_trigger.h"
54 #include "catalog/pg_type.h"
55 #include "libpq/libpq-fs.h"
56 
57 #include "dumputils.h"
58 #include "parallel.h"
59 #include "pg_backup_db.h"
60 #include "pg_backup_utils.h"
61 #include "pg_dump.h"
62 #include "fe_utils/string_utils.h"
63 
64 
65 typedef struct
66 {
67  const char *descr; /* comment for an object */
68  Oid classoid; /* object class (catalog OID) */
69  Oid objoid; /* object OID */
70  int objsubid; /* subobject (table column #) */
71 } CommentItem;
72 
73 typedef struct
74 {
75  const char *provider; /* label provider of this security label */
76  const char *label; /* security label for an object */
77  Oid classoid; /* object class (catalog OID) */
78  Oid objoid; /* object OID */
79  int objsubid; /* subobject (table column #) */
80 } SecLabelItem;
81 
82 typedef enum OidOptions
83 {
85  zeroAsAny = 2,
88 } OidOptions;
89 
90 /* global decls */
91 bool g_verbose; /* User wants verbose narration of our
92  * activities. */
93 static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
94 
95 /* subquery used to convert user ID (eg, datdba) to user name */
96 static const char *username_subquery;
97 
98 /*
99  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
100  * FirstNormalObjectId - 1.
101  */
102 static Oid g_last_builtin_oid; /* value of the last builtin oid */
103 
104 /* The specified names/patterns should to match at least one entity */
105 static int strict_names = 0;
106 
107 /*
108  * Object inclusion/exclusion lists
109  *
110  * The string lists record the patterns given by command-line switches,
111  * which we then convert to lists of OIDs of matching objects.
112  */
117 
124 
125 
126 char g_opaque_type[10]; /* name for the opaque type */
127 
128 /* placeholders for the delimiters for comments */
130 char g_comment_end[10];
131 
132 static const CatalogId nilCatalogId = {0, 0};
133 
134 static void help(const char *progname);
135 static void setup_connection(Archive *AH,
136  const char *dumpencoding, const char *dumpsnapshot,
137  char *use_role);
138 static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
139 static void expand_schema_name_patterns(Archive *fout,
140  SimpleStringList *patterns,
141  SimpleOidList *oids,
142  bool strict_names);
143 static void expand_table_name_patterns(Archive *fout,
144  SimpleStringList *patterns,
145  SimpleOidList *oids,
146  bool strict_names);
147 static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
148 static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
149 static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
150 static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
151 static void dumpComment(Archive *fout, const char *target,
152  const char *namespace, const char *owner,
153  CatalogId catalogId, int subid, DumpId dumpId);
154 static int findComments(Archive *fout, Oid classoid, Oid objoid,
155  CommentItem **items);
156 static int collectComments(Archive *fout, CommentItem **items);
157 static void dumpSecLabel(Archive *fout, const char *target,
158  const char *namespace, const char *owner,
159  CatalogId catalogId, int subid, DumpId dumpId);
160 static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
161  SecLabelItem **items);
162 static int collectSecLabels(Archive *fout, SecLabelItem **items);
163 static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
164 static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
165 static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
166 static void dumpType(Archive *fout, TypeInfo *tyinfo);
167 static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
168 static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
169 static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
170 static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
171 static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
172 static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
173 static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
174 static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
175 static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
176 static void dumpFunc(Archive *fout, FuncInfo *finfo);
177 static void dumpCast(Archive *fout, CastInfo *cast);
178 static void dumpTransform(Archive *fout, TransformInfo *transform);
179 static void dumpOpr(Archive *fout, OprInfo *oprinfo);
180 static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
181 static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
182 static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
183 static void dumpCollation(Archive *fout, CollInfo *collinfo);
184 static void dumpConversion(Archive *fout, ConvInfo *convinfo);
185 static void dumpRule(Archive *fout, RuleInfo *rinfo);
186 static void dumpAgg(Archive *fout, AggInfo *agginfo);
187 static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
188 static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
189 static void dumpTable(Archive *fout, TableInfo *tbinfo);
190 static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
191 static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
192 static void dumpSequence(Archive *fout, TableInfo *tbinfo);
193 static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
194 static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
195 static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
196 static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
197 static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
198 static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
199 static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
200 static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
201 static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
202 static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
203 static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
204 static void dumpUserMappings(Archive *fout,
205  const char *servername, const char *namespace,
206  const char *owner, CatalogId catalogId, DumpId dumpId);
207 static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
208 
209 static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
210  const char *type, const char *name, const char *subname,
211  const char *tag, const char *nspname, const char *owner,
212  const char *acls, const char *racls,
213  const char *initacls, const char *initracls);
214 
215 static void getDependencies(Archive *fout);
216 static void BuildArchiveDependencies(Archive *fout);
218  DumpId **dependencies, int *nDeps, int *allocDeps);
219 
221 static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
222  DumpableObject *boundaryObjs);
223 
224 static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
225 static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind);
226 static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids);
227 static void buildMatViewRefreshDependencies(Archive *fout);
228 static void getTableDataFKConstraints(void);
229 static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
230  bool is_agg);
231 static char *format_function_arguments_old(Archive *fout,
232  FuncInfo *finfo, int nallargs,
233  char **allargtypes,
234  char **argmodes,
235  char **argnames);
236 static char *format_function_signature(Archive *fout,
237  FuncInfo *finfo, bool honor_quotes);
238 static char *convertRegProcReference(Archive *fout,
239  const char *proc);
240 static char *convertOperatorReference(Archive *fout, const char *opr);
241 static char *convertTSFunction(Archive *fout, Oid funcOid);
242 static Oid findLastBuiltinOid_V71(Archive *fout, const char *);
243 static void selectSourceSchema(Archive *fout, const char *schemaName);
244 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
245 static void getBlobs(Archive *fout);
246 static void dumpBlob(Archive *fout, BlobInfo *binfo);
247 static int dumpBlobs(Archive *fout, void *arg);
248 static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
249 static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
250 static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
251 static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
252 static void dumpDatabase(Archive *AH);
253 static void dumpEncoding(Archive *AH);
254 static void dumpStdStrings(Archive *AH);
256  PQExpBuffer upgrade_buffer, Oid pg_type_oid);
258  PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
259 static void binary_upgrade_set_pg_class_oids(Archive *fout,
260  PQExpBuffer upgrade_buffer,
261  Oid pg_class_oid, bool is_index);
262 static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
263  DumpableObject *dobj,
264  const char *objlabel);
265 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
266 static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
267 static bool nonemptyReloptions(const char *reloptions);
268 static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
269  const char *prefix, Archive *fout);
270 static char *get_synchronized_snapshot(Archive *fout);
271 static void setupDumpWorker(Archive *AHX);
272 static TableInfo *getRootTableInfo(TableInfo *tbinfo);
273 
274 
275 int
276 main(int argc, char **argv)
277 {
278  int c;
279  const char *filename = NULL;
280  const char *format = "p";
281  TableInfo *tblinfo;
282  int numTables;
283  DumpableObject **dobjs;
284  int numObjs;
285  DumpableObject *boundaryObjs;
286  int i;
287  int optindex;
288  RestoreOptions *ropt;
289  Archive *fout; /* the script file */
290  const char *dumpencoding = NULL;
291  const char *dumpsnapshot = NULL;
292  char *use_role = NULL;
293  int numWorkers = 1;
294  trivalue prompt_password = TRI_DEFAULT;
295  int compressLevel = -1;
296  int plainText = 0;
297  ArchiveFormat archiveFormat = archUnknown;
298  ArchiveMode archiveMode;
299 
300  static DumpOptions dopt;
301 
302  static struct option long_options[] = {
303  {"data-only", no_argument, NULL, 'a'},
304  {"blobs", no_argument, NULL, 'b'},
305  {"no-blobs", no_argument, NULL, 'B'},
306  {"clean", no_argument, NULL, 'c'},
307  {"create", no_argument, NULL, 'C'},
308  {"dbname", required_argument, NULL, 'd'},
309  {"file", required_argument, NULL, 'f'},
310  {"format", required_argument, NULL, 'F'},
311  {"host", required_argument, NULL, 'h'},
312  {"jobs", 1, NULL, 'j'},
313  {"no-reconnect", no_argument, NULL, 'R'},
314  {"oids", no_argument, NULL, 'o'},
315  {"no-owner", no_argument, NULL, 'O'},
316  {"port", required_argument, NULL, 'p'},
317  {"schema", required_argument, NULL, 'n'},
318  {"exclude-schema", required_argument, NULL, 'N'},
319  {"schema-only", no_argument, NULL, 's'},
320  {"superuser", required_argument, NULL, 'S'},
321  {"table", required_argument, NULL, 't'},
322  {"exclude-table", required_argument, NULL, 'T'},
323  {"no-password", no_argument, NULL, 'w'},
324  {"password", no_argument, NULL, 'W'},
325  {"username", required_argument, NULL, 'U'},
326  {"verbose", no_argument, NULL, 'v'},
327  {"no-privileges", no_argument, NULL, 'x'},
328  {"no-acl", no_argument, NULL, 'x'},
329  {"compress", required_argument, NULL, 'Z'},
330  {"encoding", required_argument, NULL, 'E'},
331  {"help", no_argument, NULL, '?'},
332  {"version", no_argument, NULL, 'V'},
333 
334  /*
335  * the following options don't have an equivalent short option letter
336  */
337  {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
338  {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
339  {"column-inserts", no_argument, &dopt.column_inserts, 1},
340  {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
341  {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
342  {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
343  {"exclude-table-data", required_argument, NULL, 4},
344  {"if-exists", no_argument, &dopt.if_exists, 1},
345  {"inserts", no_argument, &dopt.dump_inserts, 1},
346  {"lock-wait-timeout", required_argument, NULL, 2},
347  {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
348  {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
349  {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
350  {"role", required_argument, NULL, 3},
351  {"section", required_argument, NULL, 5},
352  {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
353  {"snapshot", required_argument, NULL, 6},
354  {"strict-names", no_argument, &strict_names, 1},
355  {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
356  {"no-publications", no_argument, &dopt.no_publications, 1},
357  {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
358  {"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
359  {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
360  {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
361  {"no-sync", no_argument, NULL, 7},
362 
363  {NULL, 0, NULL, 0}
364  };
365 
366  set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
367 
368  /*
369  * Initialize what we need for parallel execution, especially for thread
370  * support on Windows.
371  */
373 
374  g_verbose = false;
375 
376  strcpy(g_comment_start, "-- ");
377  g_comment_end[0] = '\0';
378  strcpy(g_opaque_type, "opaque");
379 
380  progname = get_progname(argv[0]);
381 
382  if (argc > 1)
383  {
384  if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
385  {
386  help(progname);
387  exit_nicely(0);
388  }
389  if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
390  {
391  puts("pg_dump (PostgreSQL) " PG_VERSION);
392  exit_nicely(0);
393  }
394  }
395 
396  InitDumpOptions(&dopt);
397 
398  while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:oOp:RsS:t:T:U:vwWxZ:",
399  long_options, &optindex)) != -1)
400  {
401  switch (c)
402  {
403  case 'a': /* Dump data only */
404  dopt.dataOnly = true;
405  break;
406 
407  case 'b': /* Dump blobs */
408  dopt.outputBlobs = true;
409  break;
410 
411  case 'B': /* Don't dump blobs */
412  dopt.dontOutputBlobs = true;
413  break;
414 
415  case 'c': /* clean (i.e., drop) schema prior to create */
416  dopt.outputClean = 1;
417  break;
418 
419  case 'C': /* Create DB */
420  dopt.outputCreateDB = 1;
421  break;
422 
423  case 'd': /* database name */
424  dopt.dbname = pg_strdup(optarg);
425  break;
426 
427  case 'E': /* Dump encoding */
428  dumpencoding = pg_strdup(optarg);
429  break;
430 
431  case 'f':
432  filename = pg_strdup(optarg);
433  break;
434 
435  case 'F':
436  format = pg_strdup(optarg);
437  break;
438 
439  case 'h': /* server host */
440  dopt.pghost = pg_strdup(optarg);
441  break;
442 
443  case 'j': /* number of dump jobs */
444  numWorkers = atoi(optarg);
445  break;
446 
447  case 'n': /* include schema(s) */
448  simple_string_list_append(&schema_include_patterns, optarg);
449  dopt.include_everything = false;
450  break;
451 
452  case 'N': /* exclude schema(s) */
453  simple_string_list_append(&schema_exclude_patterns, optarg);
454  break;
455 
456  case 'o': /* Dump oids */
457  dopt.oids = true;
458  break;
459 
460  case 'O': /* Don't reconnect to match owner */
461  dopt.outputNoOwner = 1;
462  break;
463 
464  case 'p': /* server port */
465  dopt.pgport = pg_strdup(optarg);
466  break;
467 
468  case 'R':
469  /* no-op, still accepted for backwards compatibility */
470  break;
471 
472  case 's': /* dump schema only */
473  dopt.schemaOnly = true;
474  break;
475 
476  case 'S': /* Username for superuser in plain text output */
478  break;
479 
480  case 't': /* include table(s) */
481  simple_string_list_append(&table_include_patterns, optarg);
482  dopt.include_everything = false;
483  break;
484 
485  case 'T': /* exclude table(s) */
486  simple_string_list_append(&table_exclude_patterns, optarg);
487  break;
488 
489  case 'U':
490  dopt.username = pg_strdup(optarg);
491  break;
492 
493  case 'v': /* verbose */
494  g_verbose = true;
495  break;
496 
497  case 'w':
498  prompt_password = TRI_NO;
499  break;
500 
501  case 'W':
502  prompt_password = TRI_YES;
503  break;
504 
505  case 'x': /* skip ACL dump */
506  dopt.aclsSkip = true;
507  break;
508 
509  case 'Z': /* Compression Level */
510  compressLevel = atoi(optarg);
511  if (compressLevel < 0 || compressLevel > 9)
512  {
513  write_msg(NULL, "compression level must be in range 0..9\n");
514  exit_nicely(1);
515  }
516  break;
517 
518  case 0:
519  /* This covers the long options. */
520  break;
521 
522  case 2: /* lock-wait-timeout */
524  break;
525 
526  case 3: /* SET ROLE */
527  use_role = pg_strdup(optarg);
528  break;
529 
530  case 4: /* exclude table(s) data */
531  simple_string_list_append(&tabledata_exclude_patterns, optarg);
532  break;
533 
534  case 5: /* section */
536  break;
537 
538  case 6: /* snapshot */
539  dumpsnapshot = pg_strdup(optarg);
540  break;
541 
542  case 7: /* no-sync */
543  dosync = false;
544  break;
545 
546  default:
547  fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
548  exit_nicely(1);
549  }
550  }
551 
552  /*
553  * Non-option argument specifies database name as long as it wasn't
554  * already specified with -d / --dbname
555  */
556  if (optind < argc && dopt.dbname == NULL)
557  dopt.dbname = argv[optind++];
558 
559  /* Complain if any arguments remain */
560  if (optind < argc)
561  {
562  fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
563  progname, argv[optind]);
564  fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
565  progname);
566  exit_nicely(1);
567  }
568 
569  /* --column-inserts implies --inserts */
570  if (dopt.column_inserts)
571  dopt.dump_inserts = 1;
572 
573  /*
574  * Binary upgrade mode implies dumping sequence data even in schema-only
575  * mode. This is not exposed as a separate option, but kept separate
576  * internally for clarity.
577  */
578  if (dopt.binary_upgrade)
579  dopt.sequence_data = 1;
580 
581  if (dopt.dataOnly && dopt.schemaOnly)
582  {
583  write_msg(NULL, "options -s/--schema-only and -a/--data-only cannot be used together\n");
584  exit_nicely(1);
585  }
586 
587  if (dopt.dataOnly && dopt.outputClean)
588  {
589  write_msg(NULL, "options -c/--clean and -a/--data-only cannot be used together\n");
590  exit_nicely(1);
591  }
592 
593  if (dopt.dump_inserts && dopt.oids)
594  {
595  write_msg(NULL, "options --inserts/--column-inserts and -o/--oids cannot be used together\n");
596  write_msg(NULL, "(The INSERT command cannot set OIDs.)\n");
597  exit_nicely(1);
598  }
599 
600  if (dopt.if_exists && !dopt.outputClean)
601  exit_horribly(NULL, "option --if-exists requires option -c/--clean\n");
602 
603  /* Identify archive format to emit */
604  archiveFormat = parseArchiveFormat(format, &archiveMode);
605 
606  /* archiveFormat specific setup */
607  if (archiveFormat == archNull)
608  plainText = 1;
609 
610  /* Custom and directory formats are compressed by default, others not */
611  if (compressLevel == -1)
612  {
613 #ifdef HAVE_LIBZ
614  if (archiveFormat == archCustom || archiveFormat == archDirectory)
615  compressLevel = Z_DEFAULT_COMPRESSION;
616  else
617 #endif
618  compressLevel = 0;
619  }
620 
621 #ifndef HAVE_LIBZ
622  if (compressLevel != 0)
623  write_msg(NULL, "WARNING: requested compression not available in this "
624  "installation -- archive will be uncompressed\n");
625  compressLevel = 0;
626 #endif
627 
628  /*
629  * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
630  * parallel jobs because that's the maximum limit for the
631  * WaitForMultipleObjects() call.
632  */
633  if (numWorkers <= 0
634 #ifdef WIN32
635  || numWorkers > MAXIMUM_WAIT_OBJECTS
636 #endif
637  )
638  exit_horribly(NULL, "invalid number of parallel jobs\n");
639 
640  /* Parallel backup only in the directory archive format so far */
641  if (archiveFormat != archDirectory && numWorkers > 1)
642  exit_horribly(NULL, "parallel backup only supported by the directory format\n");
643 
644  /* Open the output file */
645  fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
646  archiveMode, setupDumpWorker);
647 
648  /* Make dump options accessible right away */
649  SetArchiveOptions(fout, &dopt, NULL);
650 
651  /* Register the cleanup hook */
652  on_exit_close_archive(fout);
653 
654  /* Let the archiver know how noisy to be */
655  fout->verbose = g_verbose;
656 
657  /*
658  * We allow the server to be back to 8.0, and up to any minor release of
659  * our own major version. (See also version check in pg_dumpall.c.)
660  */
661  fout->minRemoteVersion = 80000;
662  fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
663 
664  fout->numWorkers = numWorkers;
665 
666  /*
667  * Open the database using the Archiver, so it knows about it. Errors mean
668  * death.
669  */
670  ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
671  setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
672 
673  /*
674  * Disable security label support if server version < v9.1.x (prevents
675  * access to nonexistent pg_seclabel catalog)
676  */
677  if (fout->remoteVersion < 90100)
678  dopt.no_security_labels = 1;
679 
680  /*
681  * On hot standbys, never try to dump unlogged table data, since it will
682  * just throw an error.
683  */
684  if (fout->isStandby)
685  dopt.no_unlogged_table_data = true;
686 
687  /* Select the appropriate subquery to convert user IDs to names */
688  if (fout->remoteVersion >= 80100)
689  username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
690  else
691  username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
692 
693  /* check the version for the synchronized snapshots feature */
694  if (numWorkers > 1 && fout->remoteVersion < 90200
695  && !dopt.no_synchronized_snapshots)
697  "Synchronized snapshots are not supported by this server version.\n"
698  "Run with --no-synchronized-snapshots instead if you do not need\n"
699  "synchronized snapshots.\n");
700 
701  /* check the version when a snapshot is explicitly specified by user */
702  if (dumpsnapshot && fout->remoteVersion < 90200)
704  "Exported snapshots are not supported by this server version.\n");
705 
706  /*
707  * Find the last built-in OID, if needed (prior to 8.1)
708  *
709  * With 8.1 and above, we can just use FirstNormalObjectId - 1.
710  */
711  if (fout->remoteVersion < 80100)
713  PQdb(GetConnection(fout)));
714  else
716 
717  if (g_verbose)
718  write_msg(NULL, "last built-in OID is %u\n", g_last_builtin_oid);
719 
720  /* Expand schema selection patterns into OID lists */
721  if (schema_include_patterns.head != NULL)
722  {
723  expand_schema_name_patterns(fout, &schema_include_patterns,
724  &schema_include_oids,
725  strict_names);
726  if (schema_include_oids.head == NULL)
727  exit_horribly(NULL, "no matching schemas were found\n");
728  }
729  expand_schema_name_patterns(fout, &schema_exclude_patterns,
730  &schema_exclude_oids,
731  false);
732  /* non-matching exclusion patterns aren't an error */
733 
734  /* Expand table selection patterns into OID lists */
735  if (table_include_patterns.head != NULL)
736  {
737  expand_table_name_patterns(fout, &table_include_patterns,
738  &table_include_oids,
739  strict_names);
740  if (table_include_oids.head == NULL)
741  exit_horribly(NULL, "no matching tables were found\n");
742  }
743  expand_table_name_patterns(fout, &table_exclude_patterns,
744  &table_exclude_oids,
745  false);
746 
747  expand_table_name_patterns(fout, &tabledata_exclude_patterns,
748  &tabledata_exclude_oids,
749  false);
750 
751  /* non-matching exclusion patterns aren't an error */
752 
753  /*
754  * Dumping blobs is the default for dumps where an inclusion switch is not
755  * used (an "include everything" dump). -B can be used to exclude blobs
756  * from those dumps. -b can be used to include blobs even when an
757  * inclusion switch is used.
758  *
759  * -s means "schema only" and blobs are data, not schema, so we never
760  * include blobs when -s is used.
761  */
762  if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
763  dopt.outputBlobs = true;
764 
765  /*
766  * Now scan the database and create DumpableObject structs for all the
767  * objects we intend to dump.
768  */
769  tblinfo = getSchemaData(fout, &numTables);
770 
771  if (fout->remoteVersion < 80400)
772  guessConstraintInheritance(tblinfo, numTables);
773 
774  if (!dopt.schemaOnly)
775  {
776  getTableData(&dopt, tblinfo, numTables, dopt.oids, 0);
778  if (dopt.dataOnly)
780  }
781 
782  if (dopt.schemaOnly && dopt.sequence_data)
783  getTableData(&dopt, tblinfo, numTables, dopt.oids, RELKIND_SEQUENCE);
784 
785  /*
786  * In binary-upgrade mode, we do not have to worry about the actual blob
787  * data or the associated metadata that resides in the pg_largeobject and
788  * pg_largeobject_metadata tables, respectivly.
789  *
790  * However, we do need to collect blob information as there may be
791  * comments or other information on blobs that we do need to dump out.
792  */
793  if (dopt.outputBlobs || dopt.binary_upgrade)
794  getBlobs(fout);
795 
796  /*
797  * Collect dependency data to assist in ordering the objects.
798  */
799  getDependencies(fout);
800 
801  /* Lastly, create dummy objects to represent the section boundaries */
802  boundaryObjs = createBoundaryObjects();
803 
804  /* Get pointers to all the known DumpableObjects */
805  getDumpableObjects(&dobjs, &numObjs);
806 
807  /*
808  * Add dummy dependencies to enforce the dump section ordering.
809  */
810  addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
811 
812  /*
813  * Sort the objects into a safe dump order (no forward references).
814  *
815  * We rely on dependency information to help us determine a safe order, so
816  * the initial sort is mostly for cosmetic purposes: we sort by name to
817  * ensure that logically identical schemas will dump identically.
818  */
819  sortDumpableObjectsByTypeName(dobjs, numObjs);
820 
821  /* If we do a parallel dump, we want the largest tables to go first */
822  if (archiveFormat == archDirectory && numWorkers > 1)
823  sortDataAndIndexObjectsBySize(dobjs, numObjs);
824 
825  sortDumpableObjects(dobjs, numObjs,
826  boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
827 
828  /*
829  * Create archive TOC entries for all the objects to be dumped, in a safe
830  * order.
831  */
832 
833  /* First the special ENCODING and STDSTRINGS entries. */
834  dumpEncoding(fout);
835  dumpStdStrings(fout);
836 
837  /* The database item is always next, unless we don't want it at all */
838  if (dopt.include_everything && !dopt.dataOnly)
839  dumpDatabase(fout);
840 
841  /* Now the rearrangeable objects. */
842  for (i = 0; i < numObjs; i++)
843  dumpDumpableObject(fout, dobjs[i]);
844 
845  /*
846  * Set up options info to ensure we dump what we want.
847  */
848  ropt = NewRestoreOptions();
849  ropt->filename = filename;
850 
851  /* if you change this list, see dumpOptionsFromRestoreOptions */
852  ropt->dropSchema = dopt.outputClean;
853  ropt->dataOnly = dopt.dataOnly;
854  ropt->schemaOnly = dopt.schemaOnly;
855  ropt->if_exists = dopt.if_exists;
856  ropt->column_inserts = dopt.column_inserts;
857  ropt->dumpSections = dopt.dumpSections;
858  ropt->aclsSkip = dopt.aclsSkip;
859  ropt->superuser = dopt.outputSuperuser;
860  ropt->createDB = dopt.outputCreateDB;
861  ropt->noOwner = dopt.outputNoOwner;
862  ropt->noTablespace = dopt.outputNoTablespaces;
863  ropt->disable_triggers = dopt.disable_triggers;
864  ropt->use_setsessauth = dopt.use_setsessauth;
866  ropt->dump_inserts = dopt.dump_inserts;
867  ropt->no_publications = dopt.no_publications;
869  ropt->no_subscriptions = dopt.no_subscriptions;
870  ropt->lockWaitTimeout = dopt.lockWaitTimeout;
873  ropt->sequence_data = dopt.sequence_data;
874  ropt->binary_upgrade = dopt.binary_upgrade;
875 
876  if (compressLevel == -1)
877  ropt->compression = 0;
878  else
879  ropt->compression = compressLevel;
880 
881  ropt->suppressDumpWarnings = true; /* We've already shown them */
882 
883  SetArchiveOptions(fout, &dopt, ropt);
884 
885  /* Mark which entries should be output */
887 
888  /*
889  * The archive's TOC entries are now marked as to which ones will actually
890  * be output, so we can set up their dependency lists properly. This isn't
891  * necessary for plain-text output, though.
892  */
893  if (!plainText)
895 
896  /*
897  * And finally we can do the actual output.
898  *
899  * Note: for non-plain-text output formats, the output file is written
900  * inside CloseArchive(). This is, um, bizarre; but not worth changing
901  * right now.
902  */
903  if (plainText)
904  RestoreArchive(fout);
905 
906  CloseArchive(fout);
907 
908  exit_nicely(0);
909 }
910 
911 
912 static void
913 help(const char *progname)
914 {
915  printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
916  printf(_("Usage:\n"));
917  printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
918 
919  printf(_("\nGeneral options:\n"));
920  printf(_(" -f, --file=FILENAME output file or directory name\n"));
921  printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
922  " plain text (default))\n"));
923  printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
924  printf(_(" -v, --verbose verbose mode\n"));
925  printf(_(" -V, --version output version information, then exit\n"));
926  printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
927  printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
928  printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
929  printf(_(" -?, --help show this help, then exit\n"));
930 
931  printf(_("\nOptions controlling the output content:\n"));
932  printf(_(" -a, --data-only dump only the data, not the schema\n"));
933  printf(_(" -b, --blobs include large objects in dump\n"));
934  printf(_(" -B, --no-blobs exclude large objects in dump\n"));
935  printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
936  printf(_(" -C, --create include commands to create database in dump\n"));
937  printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
938  printf(_(" -n, --schema=SCHEMA dump the named schema(s) only\n"));
939  printf(_(" -N, --exclude-schema=SCHEMA do NOT dump the named schema(s)\n"));
940  printf(_(" -o, --oids include OIDs in dump\n"));
941  printf(_(" -O, --no-owner skip restoration of object ownership in\n"
942  " plain-text format\n"));
943  printf(_(" -s, --schema-only dump only the schema, no data\n"));
944  printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
945  printf(_(" -t, --table=TABLE dump the named table(s) only\n"));
946  printf(_(" -T, --exclude-table=TABLE do NOT dump the named table(s)\n"));
947  printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
948  printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
949  printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
950  printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
951  printf(_(" --disable-triggers disable triggers during data-only restore\n"));
952  printf(_(" --enable-row-security enable row security (dump only content user has\n"
953  " access to)\n"));
954  printf(_(" --exclude-table-data=TABLE do NOT dump data for the named table(s)\n"));
955  printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
956  printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
957  printf(_(" --no-publications do not dump publications\n"));
958  printf(_(" --no-security-labels do not dump security label assignments\n"));
959  printf(_(" --no-subscriptions do not dump subscriptions\n"));
960  printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
961  printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
962  printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
963  printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
964  printf(_(" --load-via-partition-root load partitions via the root table\n"));
965  printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
966  printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
967  printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
968  printf(_(" --strict-names require table and/or schema include patterns to\n"
969  " match at least one entity each\n"));
970  printf(_(" --use-set-session-authorization\n"
971  " use SET SESSION AUTHORIZATION commands instead of\n"
972  " ALTER OWNER commands to set ownership\n"));
973 
974  printf(_("\nConnection options:\n"));
975  printf(_(" -d, --dbname=DBNAME database to dump\n"));
976  printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
977  printf(_(" -p, --port=PORT database server port number\n"));
978  printf(_(" -U, --username=NAME connect as specified database user\n"));
979  printf(_(" -w, --no-password never prompt for password\n"));
980  printf(_(" -W, --password force password prompt (should happen automatically)\n"));
981  printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
982 
983  printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
984  "variable value is used.\n\n"));
985  printf(_("Report bugs to <pgsql-bugs@postgresql.org>.\n"));
986 }
987 
988 static void
989 setup_connection(Archive *AH, const char *dumpencoding,
990  const char *dumpsnapshot, char *use_role)
991 {
992  DumpOptions *dopt = AH->dopt;
993  PGconn *conn = GetConnection(AH);
994  const char *std_strings;
995 
996  /*
997  * Set the client encoding if requested.
998  */
999  if (dumpencoding)
1000  {
1001  if (PQsetClientEncoding(conn, dumpencoding) < 0)
1002  exit_horribly(NULL, "invalid client encoding \"%s\" specified\n",
1003  dumpencoding);
1004  }
1005 
1006  /*
1007  * Get the active encoding and the standard_conforming_strings setting, so
1008  * we know how to escape strings.
1009  */
1010  AH->encoding = PQclientEncoding(conn);
1011 
1012  std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1013  AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1014 
1015  /*
1016  * Set the role if requested. In a parallel dump worker, we'll be passed
1017  * use_role == NULL, but AH->use_role is already set (if user specified it
1018  * originally) and we should use that.
1019  */
1020  if (!use_role && AH->use_role)
1021  use_role = AH->use_role;
1022 
1023  /* Set the role if requested */
1024  if (use_role && AH->remoteVersion >= 80100)
1025  {
1026  PQExpBuffer query = createPQExpBuffer();
1027 
1028  appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1029  ExecuteSqlStatement(AH, query->data);
1030  destroyPQExpBuffer(query);
1031 
1032  /* save it for possible later use by parallel workers */
1033  if (!AH->use_role)
1034  AH->use_role = pg_strdup(use_role);
1035  }
1036 
1037  /* Set the datestyle to ISO to ensure the dump's portability */
1038  ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1039 
1040  /* Likewise, avoid using sql_standard intervalstyle */
1041  if (AH->remoteVersion >= 80400)
1042  ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1043 
1044  /*
1045  * Set extra_float_digits so that we can dump float data exactly (given
1046  * correctly implemented float I/O code, anyway)
1047  */
1048  if (AH->remoteVersion >= 90000)
1049  ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1050  else
1051  ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
1052 
1053  /*
1054  * If synchronized scanning is supported, disable it, to prevent
1055  * unpredictable changes in row ordering across a dump and reload.
1056  */
1057  if (AH->remoteVersion >= 80300)
1058  ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1059 
1060  /*
1061  * Disable timeouts if supported.
1062  */
1063  ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1064  if (AH->remoteVersion >= 90300)
1065  ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1066  if (AH->remoteVersion >= 90600)
1067  ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1068 
1069  /*
1070  * Quote all identifiers, if requested.
1071  */
1072  if (quote_all_identifiers && AH->remoteVersion >= 90100)
1073  ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1074 
1075  /*
1076  * Adjust row-security mode, if supported.
1077  */
1078  if (AH->remoteVersion >= 90500)
1079  {
1080  if (dopt->enable_row_security)
1081  ExecuteSqlStatement(AH, "SET row_security = on");
1082  else
1083  ExecuteSqlStatement(AH, "SET row_security = off");
1084  }
1085 
1086  /*
1087  * Start transaction-snapshot mode transaction to dump consistent data.
1088  */
1089  ExecuteSqlStatement(AH, "BEGIN");
1090  if (AH->remoteVersion >= 90100)
1091  {
1092  /*
1093  * To support the combination of serializable_deferrable with the jobs
1094  * option we use REPEATABLE READ for the worker connections that are
1095  * passed a snapshot. As long as the snapshot is acquired in a
1096  * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1097  * REPEATABLE READ transaction provides the appropriate integrity
1098  * guarantees. This is a kluge, but safe for back-patching.
1099  */
1100  if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1102  "SET TRANSACTION ISOLATION LEVEL "
1103  "SERIALIZABLE, READ ONLY, DEFERRABLE");
1104  else
1106  "SET TRANSACTION ISOLATION LEVEL "
1107  "REPEATABLE READ, READ ONLY");
1108  }
1109  else
1110  {
1112  "SET TRANSACTION ISOLATION LEVEL "
1113  "SERIALIZABLE, READ ONLY");
1114  }
1115 
1116  /*
1117  * If user specified a snapshot to use, select that. In a parallel dump
1118  * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1119  * is already set (if the server can handle it) and we should use that.
1120  */
1121  if (dumpsnapshot)
1122  AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1123 
1124  if (AH->sync_snapshot_id)
1125  {
1126  PQExpBuffer query = createPQExpBuffer();
1127 
1128  appendPQExpBuffer(query, "SET TRANSACTION SNAPSHOT ");
1129  appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1130  ExecuteSqlStatement(AH, query->data);
1131  destroyPQExpBuffer(query);
1132  }
1133  else if (AH->numWorkers > 1 &&
1134  AH->remoteVersion >= 90200 &&
1136  {
1137  if (AH->isStandby && AH->remoteVersion < 100000)
1139  "Synchronized snapshots on standby servers are not supported by this server version.\n"
1140  "Run with --no-synchronized-snapshots instead if you do not need\n"
1141  "synchronized snapshots.\n");
1142 
1143 
1145  }
1146 }
1147 
1148 /* Set up connection for a parallel worker process */
1149 static void
1151 {
1152  /*
1153  * We want to re-select all the same values the master connection is
1154  * using. We'll have inherited directly-usable values in
1155  * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1156  * inherited encoding value back to a string to pass to setup_connection.
1157  */
1158  setup_connection(AH,
1160  NULL,
1161  NULL);
1162 }
1163 
1164 static char *
1166 {
1167  char *query = "SELECT pg_catalog.pg_export_snapshot()";
1168  char *result;
1169  PGresult *res;
1170 
1171  res = ExecuteSqlQueryForSingleRow(fout, query);
1172  result = pg_strdup(PQgetvalue(res, 0, 0));
1173  PQclear(res);
1174 
1175  return result;
1176 }
1177 
1178 static ArchiveFormat
1180 {
1181  ArchiveFormat archiveFormat;
1182 
1183  *mode = archModeWrite;
1184 
1185  if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1186  {
1187  /* This is used by pg_dumpall, and is not documented */
1188  archiveFormat = archNull;
1189  *mode = archModeAppend;
1190  }
1191  else if (pg_strcasecmp(format, "c") == 0)
1192  archiveFormat = archCustom;
1193  else if (pg_strcasecmp(format, "custom") == 0)
1194  archiveFormat = archCustom;
1195  else if (pg_strcasecmp(format, "d") == 0)
1196  archiveFormat = archDirectory;
1197  else if (pg_strcasecmp(format, "directory") == 0)
1198  archiveFormat = archDirectory;
1199  else if (pg_strcasecmp(format, "p") == 0)
1200  archiveFormat = archNull;
1201  else if (pg_strcasecmp(format, "plain") == 0)
1202  archiveFormat = archNull;
1203  else if (pg_strcasecmp(format, "t") == 0)
1204  archiveFormat = archTar;
1205  else if (pg_strcasecmp(format, "tar") == 0)
1206  archiveFormat = archTar;
1207  else
1208  exit_horribly(NULL, "invalid output format \"%s\" specified\n", format);
1209  return archiveFormat;
1210 }
1211 
1212 /*
1213  * Find the OIDs of all schemas matching the given list of patterns,
1214  * and append them to the given OID list.
1215  */
1216 static void
1218  SimpleStringList *patterns,
1219  SimpleOidList *oids,
1220  bool strict_names)
1221 {
1222  PQExpBuffer query;
1223  PGresult *res;
1224  SimpleStringListCell *cell;
1225  int i;
1226 
1227  if (patterns->head == NULL)
1228  return; /* nothing to do */
1229 
1230  query = createPQExpBuffer();
1231 
1232  /*
1233  * The loop below runs multiple SELECTs might sometimes result in
1234  * duplicate entries in the OID list, but we don't care.
1235  */
1236 
1237  for (cell = patterns->head; cell; cell = cell->next)
1238  {
1239  appendPQExpBuffer(query,
1240  "SELECT oid FROM pg_catalog.pg_namespace n\n");
1241  processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1242  false, NULL, "n.nspname", NULL, NULL);
1243 
1244  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1245  if (strict_names && PQntuples(res) == 0)
1246  exit_horribly(NULL, "no matching schemas were found for pattern \"%s\"\n", cell->val);
1247 
1248  for (i = 0; i < PQntuples(res); i++)
1249  {
1250  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1251  }
1252 
1253  PQclear(res);
1254  resetPQExpBuffer(query);
1255  }
1256 
1257  destroyPQExpBuffer(query);
1258 }
1259 
1260 /*
1261  * Find the OIDs of all tables matching the given list of patterns,
1262  * and append them to the given OID list.
1263  */
1264 static void
1266  SimpleStringList *patterns, SimpleOidList *oids,
1267  bool strict_names)
1268 {
1269  PQExpBuffer query;
1270  PGresult *res;
1271  SimpleStringListCell *cell;
1272  int i;
1273 
1274  if (patterns->head == NULL)
1275  return; /* nothing to do */
1276 
1277  query = createPQExpBuffer();
1278 
1279  /*
1280  * this might sometimes result in duplicate entries in the OID list, but
1281  * we don't care.
1282  */
1283 
1284  for (cell = patterns->head; cell; cell = cell->next)
1285  {
1286  appendPQExpBuffer(query,
1287  "SELECT c.oid"
1288  "\nFROM pg_catalog.pg_class c"
1289  "\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace"
1290  "\nWHERE c.relkind in ('%c', '%c', '%c', '%c', '%c', '%c')\n",
1294  processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1295  false, "n.nspname", "c.relname", NULL,
1296  "pg_catalog.pg_table_is_visible(c.oid)");
1297 
1298  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1299  if (strict_names && PQntuples(res) == 0)
1300  exit_horribly(NULL, "no matching tables were found for pattern \"%s\"\n", cell->val);
1301 
1302  for (i = 0; i < PQntuples(res); i++)
1303  {
1304  simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1305  }
1306 
1307  PQclear(res);
1308  resetPQExpBuffer(query);
1309  }
1310 
1311  destroyPQExpBuffer(query);
1312 }
1313 
1314 /*
1315  * checkExtensionMembership
1316  * Determine whether object is an extension member, and if so,
1317  * record an appropriate dependency and set the object's dump flag.
1318  *
1319  * It's important to call this for each object that could be an extension
1320  * member. Generally, we integrate this with determining the object's
1321  * to-be-dumped-ness, since extension membership overrides other rules for that.
1322  *
1323  * Returns true if object is an extension member, else false.
1324  */
1325 static bool
1327 {
1328  ExtensionInfo *ext = findOwningExtension(dobj->catId);
1329 
1330  if (ext == NULL)
1331  return false;
1332 
1333  dobj->ext_member = true;
1334 
1335  /* Record dependency so that getDependencies needn't deal with that */
1336  addObjectDependency(dobj, ext->dobj.dumpId);
1337 
1338  /*
1339  * In 9.6 and above, mark the member object to have any non-initial ACL,
1340  * policies, and security labels dumped.
1341  *
1342  * Note that any initial ACLs (see pg_init_privs) will be removed when we
1343  * extract the information about the object. We don't provide support for
1344  * initial policies and security labels and it seems unlikely for those to
1345  * ever exist, but we may have to revisit this later.
1346  *
1347  * Prior to 9.6, we do not include any extension member components.
1348  *
1349  * In binary upgrades, we still dump all components of the members
1350  * individually, since the idea is to exactly reproduce the database
1351  * contents rather than replace the extension contents with something
1352  * different.
1353  */
1354  if (fout->dopt->binary_upgrade)
1355  dobj->dump = ext->dobj.dump;
1356  else
1357  {
1358  if (fout->remoteVersion < 90600)
1359  dobj->dump = DUMP_COMPONENT_NONE;
1360  else
1361  dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
1364  }
1365 
1366  return true;
1367 }
1368 
1369 /*
1370  * selectDumpableNamespace: policy-setting subroutine
1371  * Mark a namespace as to be dumped or not
1372  */
1373 static void
1375 {
1376  /*
1377  * If specific tables are being dumped, do not dump any complete
1378  * namespaces. If specific namespaces are being dumped, dump just those
1379  * namespaces. Otherwise, dump all non-system namespaces.
1380  */
1381  if (table_include_oids.head != NULL)
1382  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1383  else if (schema_include_oids.head != NULL)
1384  nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1385  simple_oid_list_member(&schema_include_oids,
1386  nsinfo->dobj.catId.oid) ?
1388  else if (fout->remoteVersion >= 90600 &&
1389  strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1390  {
1391  /*
1392  * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1393  * they are interesting (and not the original ACLs which were set at
1394  * initdb time, see pg_init_privs).
1395  */
1396  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1397  }
1398  else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1399  strcmp(nsinfo->dobj.name, "information_schema") == 0)
1400  {
1401  /* Other system schemas don't get dumped */
1402  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1403  }
1404  else
1405  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1406 
1407  /*
1408  * In any case, a namespace can be excluded by an exclusion switch
1409  */
1410  if (nsinfo->dobj.dump_contains &&
1411  simple_oid_list_member(&schema_exclude_oids,
1412  nsinfo->dobj.catId.oid))
1413  nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1414 
1415  /*
1416  * If the schema belongs to an extension, allow extension membership to
1417  * override the dump decision for the schema itself. However, this does
1418  * not change dump_contains, so this won't change what we do with objects
1419  * within the schema. (If they belong to the extension, they'll get
1420  * suppressed by it, otherwise not.)
1421  */
1422  (void) checkExtensionMembership(&nsinfo->dobj, fout);
1423 }
1424 
1425 /*
1426  * selectDumpableTable: policy-setting subroutine
1427  * Mark a table as to be dumped or not
1428  */
1429 static void
1431 {
1432  if (checkExtensionMembership(&tbinfo->dobj, fout))
1433  return; /* extension membership overrides all else */
1434 
1435  /*
1436  * If specific tables are being dumped, dump just those tables; else, dump
1437  * according to the parent namespace's dump flag.
1438  */
1439  if (table_include_oids.head != NULL)
1440  tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1441  tbinfo->dobj.catId.oid) ?
1443  else
1444  tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1445 
1446  /*
1447  * In any case, a table can be excluded by an exclusion switch
1448  */
1449  if (tbinfo->dobj.dump &&
1450  simple_oid_list_member(&table_exclude_oids,
1451  tbinfo->dobj.catId.oid))
1452  tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1453 }
1454 
1455 /*
1456  * selectDumpableType: policy-setting subroutine
1457  * Mark a type as to be dumped or not
1458  *
1459  * If it's a table's rowtype or an autogenerated array type, we also apply a
1460  * special type code to facilitate sorting into the desired order. (We don't
1461  * want to consider those to be ordinary types because that would bring tables
1462  * up into the datatype part of the dump order.) We still set the object's
1463  * dump flag; that's not going to cause the dummy type to be dumped, but we
1464  * need it so that casts involving such types will be dumped correctly -- see
1465  * dumpCast. This means the flag should be set the same as for the underlying
1466  * object (the table or base type).
1467  */
1468 static void
1470 {
1471  /* skip complex types, except for standalone composite types */
1472  if (OidIsValid(tyinfo->typrelid) &&
1474  {
1475  TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1476 
1477  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1478  if (tytable != NULL)
1479  tyinfo->dobj.dump = tytable->dobj.dump;
1480  else
1481  tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1482  return;
1483  }
1484 
1485  /* skip auto-generated array types */
1486  if (tyinfo->isArray)
1487  {
1488  tyinfo->dobj.objType = DO_DUMMY_TYPE;
1489 
1490  /*
1491  * Fall through to set the dump flag; we assume that the subsequent
1492  * rules will do the same thing as they would for the array's base
1493  * type. (We cannot reliably look up the base type here, since
1494  * getTypes may not have processed it yet.)
1495  */
1496  }
1497 
1498  if (checkExtensionMembership(&tyinfo->dobj, fout))
1499  return; /* extension membership overrides all else */
1500 
1501  /* Dump based on if the contents of the namespace are being dumped */
1502  tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1503 }
1504 
1505 /*
1506  * selectDumpableDefaultACL: policy-setting subroutine
1507  * Mark a default ACL as to be dumped or not
1508  *
1509  * For per-schema default ACLs, dump if the schema is to be dumped.
1510  * Otherwise dump if we are dumping "everything". Note that dataOnly
1511  * and aclsSkip are checked separately.
1512  */
1513 static void
1515 {
1516  /* Default ACLs can't be extension members */
1517 
1518  if (dinfo->dobj.namespace)
1519  /* default ACLs are considered part of the namespace */
1520  dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1521  else
1522  dinfo->dobj.dump = dopt->include_everything ?
1524 }
1525 
1526 /*
1527  * selectDumpableCast: policy-setting subroutine
1528  * Mark a cast as to be dumped or not
1529  *
1530  * Casts do not belong to any particular namespace (since they haven't got
1531  * names), nor do they have identifiable owners. To distinguish user-defined
1532  * casts from built-in ones, we must resort to checking whether the cast's
1533  * OID is in the range reserved for initdb.
1534  */
1535 static void
1537 {
1538  if (checkExtensionMembership(&cast->dobj, fout))
1539  return; /* extension membership overrides all else */
1540 
1541  /*
1542  * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
1543  * support ACLs currently.
1544  */
1545  if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1546  cast->dobj.dump = DUMP_COMPONENT_NONE;
1547  else
1548  cast->dobj.dump = fout->dopt->include_everything ?
1550 }
1551 
1552 /*
1553  * selectDumpableProcLang: policy-setting subroutine
1554  * Mark a procedural language as to be dumped or not
1555  *
1556  * Procedural languages do not belong to any particular namespace. To
1557  * identify built-in languages, we must resort to checking whether the
1558  * language's OID is in the range reserved for initdb.
1559  */
1560 static void
1562 {
1563  if (checkExtensionMembership(&plang->dobj, fout))
1564  return; /* extension membership overrides all else */
1565 
1566  /*
1567  * Only include procedural languages when we are dumping everything.
1568  *
1569  * For from-initdb procedural languages, only include ACLs, as we do for
1570  * the pg_catalog namespace. We need this because procedural languages do
1571  * not live in any namespace.
1572  */
1573  if (!fout->dopt->include_everything)
1574  plang->dobj.dump = DUMP_COMPONENT_NONE;
1575  else
1576  {
1577  if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1578  plang->dobj.dump = fout->remoteVersion < 90600 ?
1580  else
1581  plang->dobj.dump = DUMP_COMPONENT_ALL;
1582  }
1583 }
1584 
1585 /*
1586  * selectDumpableAccessMethod: policy-setting subroutine
1587  * Mark an access method as to be dumped or not
1588  *
1589  * Access methods do not belong to any particular namespace. To identify
1590  * built-in access methods, we must resort to checking whether the
1591  * method's OID is in the range reserved for initdb.
1592  */
1593 static void
1595 {
1596  if (checkExtensionMembership(&method->dobj, fout))
1597  return; /* extension membership overrides all else */
1598 
1599  /*
1600  * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
1601  * they do not support ACLs currently.
1602  */
1603  if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1604  method->dobj.dump = DUMP_COMPONENT_NONE;
1605  else
1606  method->dobj.dump = fout->dopt->include_everything ?
1608 }
1609 
1610 /*
1611  * selectDumpableExtension: policy-setting subroutine
1612  * Mark an extension as to be dumped or not
1613  *
1614  * Normally, we dump all extensions, or none of them if include_everything
1615  * is false (i.e., a --schema or --table switch was given). However, in
1616  * binary-upgrade mode it's necessary to skip built-in extensions, since we
1617  * assume those will already be installed in the target database. We identify
1618  * such extensions by their having OIDs in the range reserved for initdb.
1619  */
1620 static void
1622 {
1623  /*
1624  * Use DUMP_COMPONENT_ACL for from-initdb extensions, to allow users to
1625  * change permissions on those objects, if they wish to, and have those
1626  * changes preserved.
1627  */
1628  if (dopt->binary_upgrade && extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
1629  extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
1630  else
1631  extinfo->dobj.dump = extinfo->dobj.dump_contains =
1634 }
1635 
1636 /*
1637  * selectDumpablePublicationTable: policy-setting subroutine
1638  * Mark a publication table as to be dumped or not
1639  *
1640  * Publication tables have schemas, but those are ignored in decision making,
1641  * because publications are only dumped when we are dumping everything.
1642  */
1643 static void
1645 {
1646  if (checkExtensionMembership(dobj, fout))
1647  return; /* extension membership overrides all else */
1648 
1649  dobj->dump = fout->dopt->include_everything ?
1651 }
1652 
1653 /*
1654  * selectDumpableObject: policy-setting subroutine
1655  * Mark a generic dumpable object as to be dumped or not
1656  *
1657  * Use this only for object types without a special-case routine above.
1658  */
1659 static void
1661 {
1662  if (checkExtensionMembership(dobj, fout))
1663  return; /* extension membership overrides all else */
1664 
1665  /*
1666  * Default policy is to dump if parent namespace is dumpable, or for
1667  * non-namespace-associated items, dump if we're dumping "everything".
1668  */
1669  if (dobj->namespace)
1670  dobj->dump = dobj->namespace->dobj.dump_contains;
1671  else
1672  dobj->dump = fout->dopt->include_everything ?
1674 }
1675 
1676 /*
1677  * Dump a table's contents for loading using the COPY command
1678  * - this routine is called by the Archiver when it wants the table
1679  * to be dumped.
1680  */
1681 
1682 static int
1683 dumpTableData_copy(Archive *fout, void *dcontext)
1684 {
1685  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1686  TableInfo *tbinfo = tdinfo->tdtable;
1687  const char *classname = tbinfo->dobj.name;
1688  const bool hasoids = tbinfo->hasoids;
1689  const bool oids = tdinfo->oids;
1691 
1692  /*
1693  * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
1694  * which uses it already.
1695  */
1696  PQExpBuffer clistBuf = createPQExpBuffer();
1697  PGconn *conn = GetConnection(fout);
1698  PGresult *res;
1699  int ret;
1700  char *copybuf;
1701  const char *column_list;
1702 
1703  if (g_verbose)
1704  write_msg(NULL, "dumping contents of table \"%s.%s\"\n",
1705  tbinfo->dobj.namespace->dobj.name, classname);
1706 
1707  /*
1708  * Make sure we are in proper schema. We will qualify the table name
1709  * below anyway (in case its name conflicts with a pg_catalog table); but
1710  * this ensures reproducible results in case the table contains regproc,
1711  * regclass, etc columns.
1712  */
1713  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
1714 
1715  /*
1716  * Specify the column list explicitly so that we have no possibility of
1717  * retrieving data in the wrong column order. (The default column
1718  * ordering of COPY will not be what we want in certain corner cases
1719  * involving ADD COLUMN and inheritance.)
1720  */
1721  column_list = fmtCopyColumnList(tbinfo, clistBuf);
1722 
1723  if (oids && hasoids)
1724  {
1725  appendPQExpBuffer(q, "COPY %s %s WITH OIDS TO stdout;",
1727  tbinfo->dobj.namespace->dobj.name,
1728  classname),
1729  column_list);
1730  }
1731  else if (tdinfo->filtercond)
1732  {
1733  /* Note: this syntax is only supported in 8.2 and up */
1734  appendPQExpBufferStr(q, "COPY (SELECT ");
1735  /* klugery to get rid of parens in column list */
1736  if (strlen(column_list) > 2)
1737  {
1738  appendPQExpBufferStr(q, column_list + 1);
1739  q->data[q->len - 1] = ' ';
1740  }
1741  else
1742  appendPQExpBufferStr(q, "* ");
1743  appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
1745  tbinfo->dobj.namespace->dobj.name,
1746  classname),
1747  tdinfo->filtercond);
1748  }
1749  else
1750  {
1751  appendPQExpBuffer(q, "COPY %s %s TO stdout;",
1753  tbinfo->dobj.namespace->dobj.name,
1754  classname),
1755  column_list);
1756  }
1757  res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
1758  PQclear(res);
1759  destroyPQExpBuffer(clistBuf);
1760 
1761  for (;;)
1762  {
1763  ret = PQgetCopyData(conn, &copybuf, 0);
1764 
1765  if (ret < 0)
1766  break; /* done or error */
1767 
1768  if (copybuf)
1769  {
1770  WriteData(fout, copybuf, ret);
1771  PQfreemem(copybuf);
1772  }
1773 
1774  /* ----------
1775  * THROTTLE:
1776  *
1777  * There was considerable discussion in late July, 2000 regarding
1778  * slowing down pg_dump when backing up large tables. Users with both
1779  * slow & fast (multi-processor) machines experienced performance
1780  * degradation when doing a backup.
1781  *
1782  * Initial attempts based on sleeping for a number of ms for each ms
1783  * of work were deemed too complex, then a simple 'sleep in each loop'
1784  * implementation was suggested. The latter failed because the loop
1785  * was too tight. Finally, the following was implemented:
1786  *
1787  * If throttle is non-zero, then
1788  * See how long since the last sleep.
1789  * Work out how long to sleep (based on ratio).
1790  * If sleep is more than 100ms, then
1791  * sleep
1792  * reset timer
1793  * EndIf
1794  * EndIf
1795  *
1796  * where the throttle value was the number of ms to sleep per ms of
1797  * work. The calculation was done in each loop.
1798  *
1799  * Most of the hard work is done in the backend, and this solution
1800  * still did not work particularly well: on slow machines, the ratio
1801  * was 50:1, and on medium paced machines, 1:1, and on fast
1802  * multi-processor machines, it had little or no effect, for reasons
1803  * that were unclear.
1804  *
1805  * Further discussion ensued, and the proposal was dropped.
1806  *
1807  * For those people who want this feature, it can be implemented using
1808  * gettimeofday in each loop, calculating the time since last sleep,
1809  * multiplying that by the sleep ratio, then if the result is more
1810  * than a preset 'minimum sleep time' (say 100ms), call the 'select'
1811  * function to sleep for a subsecond period ie.
1812  *
1813  * select(0, NULL, NULL, NULL, &tvi);
1814  *
1815  * This will return after the interval specified in the structure tvi.
1816  * Finally, call gettimeofday again to save the 'last sleep time'.
1817  * ----------
1818  */
1819  }
1820  archprintf(fout, "\\.\n\n\n");
1821 
1822  if (ret == -2)
1823  {
1824  /* copy data transfer failed */
1825  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.\n", classname);
1826  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1827  write_msg(NULL, "The command was: %s\n", q->data);
1828  exit_nicely(1);
1829  }
1830 
1831  /* Check command status and return to normal libpq state */
1832  res = PQgetResult(conn);
1833  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1834  {
1835  write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetResult() failed.\n", classname);
1836  write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
1837  write_msg(NULL, "The command was: %s\n", q->data);
1838  exit_nicely(1);
1839  }
1840  PQclear(res);
1841 
1842  /* Do this to ensure we've pumped libpq back to idle state */
1843  if (PQgetResult(conn) != NULL)
1844  write_msg(NULL, "WARNING: unexpected extra results during COPY of table \"%s\"\n",
1845  classname);
1846 
1847  destroyPQExpBuffer(q);
1848  return 1;
1849 }
1850 
1851 /*
1852  * Dump table data using INSERT commands.
1853  *
1854  * Caution: when we restore from an archive file direct to database, the
1855  * INSERT commands emitted by this function have to be parsed by
1856  * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
1857  * E'' strings, or dollar-quoted strings. So don't emit anything like that.
1858  */
1859 static int
1860 dumpTableData_insert(Archive *fout, void *dcontext)
1861 {
1862  TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
1863  TableInfo *tbinfo = tdinfo->tdtable;
1864  const char *classname = tbinfo->dobj.name;
1865  DumpOptions *dopt = fout->dopt;
1867  PQExpBuffer insertStmt = NULL;
1868  PGresult *res;
1869  int tuple;
1870  int nfields;
1871  int field;
1872 
1873  /*
1874  * Make sure we are in proper schema. We will qualify the table name
1875  * below anyway (in case its name conflicts with a pg_catalog table); but
1876  * this ensures reproducible results in case the table contains regproc,
1877  * regclass, etc columns.
1878  */
1879  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
1880 
1881  appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
1882  "SELECT * FROM ONLY %s",
1884  tbinfo->dobj.namespace->dobj.name,
1885  classname));
1886  if (tdinfo->filtercond)
1887  appendPQExpBuffer(q, " %s", tdinfo->filtercond);
1888 
1889  ExecuteSqlStatement(fout, q->data);
1890 
1891  while (1)
1892  {
1893  res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
1894  PGRES_TUPLES_OK);
1895  nfields = PQnfields(res);
1896  for (tuple = 0; tuple < PQntuples(res); tuple++)
1897  {
1898  /*
1899  * First time through, we build as much of the INSERT statement as
1900  * possible in "insertStmt", which we can then just print for each
1901  * line. If the table happens to have zero columns then this will
1902  * be a complete statement, otherwise it will end in "VALUES(" and
1903  * be ready to have the row's column values appended.
1904  */
1905  if (insertStmt == NULL)
1906  {
1907  insertStmt = createPQExpBuffer();
1908 
1909  /*
1910  * When load-via-partition-root is set, get the root table
1911  * name for the partition table, so that we can reload data
1912  * through the root table.
1913  */
1914  if (dopt->load_via_partition_root && tbinfo->ispartition)
1915  {
1916  TableInfo *parentTbinfo;
1917 
1918  parentTbinfo = getRootTableInfo(tbinfo);
1919 
1920  /*
1921  * When we loading data through the root, we will qualify
1922  * the table name. This is needed because earlier
1923  * search_path will be set for the partition table.
1924  */
1925  classname = (char *) fmtQualifiedId(fout->remoteVersion,
1926  parentTbinfo->dobj.namespace->dobj.name,
1927  parentTbinfo->dobj.name);
1928  }
1929  else
1930  classname = fmtId(tbinfo->dobj.name);
1931 
1932  appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
1933  classname);
1934 
1935  /* corner case for zero-column table */
1936  if (nfields == 0)
1937  {
1938  appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
1939  }
1940  else
1941  {
1942  /* append the list of column names if required */
1943  if (dopt->column_inserts)
1944  {
1945  appendPQExpBufferChar(insertStmt, '(');
1946  for (field = 0; field < nfields; field++)
1947  {
1948  if (field > 0)
1949  appendPQExpBufferStr(insertStmt, ", ");
1950  appendPQExpBufferStr(insertStmt,
1951  fmtId(PQfname(res, field)));
1952  }
1953  appendPQExpBufferStr(insertStmt, ") ");
1954  }
1955 
1956  if (tbinfo->needs_override)
1957  appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
1958 
1959  appendPQExpBufferStr(insertStmt, "VALUES (");
1960  }
1961  }
1962 
1963  archputs(insertStmt->data, fout);
1964 
1965  /* if it is zero-column table then we're done */
1966  if (nfields == 0)
1967  continue;
1968 
1969  for (field = 0; field < nfields; field++)
1970  {
1971  if (field > 0)
1972  archputs(", ", fout);
1973  if (PQgetisnull(res, tuple, field))
1974  {
1975  archputs("NULL", fout);
1976  continue;
1977  }
1978 
1979  /* XXX This code is partially duplicated in ruleutils.c */
1980  switch (PQftype(res, field))
1981  {
1982  case INT2OID:
1983  case INT4OID:
1984  case INT8OID:
1985  case OIDOID:
1986  case FLOAT4OID:
1987  case FLOAT8OID:
1988  case NUMERICOID:
1989  {
1990  /*
1991  * These types are printed without quotes unless
1992  * they contain values that aren't accepted by the
1993  * scanner unquoted (e.g., 'NaN'). Note that
1994  * strtod() and friends might accept NaN, so we
1995  * can't use that to test.
1996  *
1997  * In reality we only need to defend against
1998  * infinity and NaN, so we need not get too crazy
1999  * about pattern matching here.
2000  */
2001  const char *s = PQgetvalue(res, tuple, field);
2002 
2003  if (strspn(s, "0123456789 +-eE.") == strlen(s))
2004  archputs(s, fout);
2005  else
2006  archprintf(fout, "'%s'", s);
2007  }
2008  break;
2009 
2010  case BITOID:
2011  case VARBITOID:
2012  archprintf(fout, "B'%s'",
2013  PQgetvalue(res, tuple, field));
2014  break;
2015 
2016  case BOOLOID:
2017  if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2018  archputs("true", fout);
2019  else
2020  archputs("false", fout);
2021  break;
2022 
2023  default:
2024  /* All other types are printed as string literals. */
2025  resetPQExpBuffer(q);
2027  PQgetvalue(res, tuple, field),
2028  fout);
2029  archputs(q->data, fout);
2030  break;
2031  }
2032  }
2033  archputs(");\n", fout);
2034  }
2035 
2036  if (PQntuples(res) <= 0)
2037  {
2038  PQclear(res);
2039  break;
2040  }
2041  PQclear(res);
2042  }
2043 
2044  archputs("\n\n", fout);
2045 
2046  ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2047 
2048  destroyPQExpBuffer(q);
2049  if (insertStmt != NULL)
2050  destroyPQExpBuffer(insertStmt);
2051 
2052  return 1;
2053 }
2054 
2055 /*
2056  * getRootTableInfo:
2057  * get the root TableInfo for the given partition table.
2058  */
2059 static TableInfo *
2061 {
2062  TableInfo *parentTbinfo;
2063 
2064  Assert(tbinfo->ispartition);
2065  Assert(tbinfo->numParents == 1);
2066 
2067  parentTbinfo = tbinfo->parents[0];
2068  while (parentTbinfo->ispartition)
2069  {
2070  Assert(parentTbinfo->numParents == 1);
2071  parentTbinfo = parentTbinfo->parents[0];
2072  }
2073 
2074  return parentTbinfo;
2075 }
2076 
2077 /*
2078  * dumpTableData -
2079  * dump the contents of a single table
2080  *
2081  * Actually, this just makes an ArchiveEntry for the table contents.
2082  */
2083 static void
2085 {
2086  DumpOptions *dopt = fout->dopt;
2087  TableInfo *tbinfo = tdinfo->tdtable;
2088  PQExpBuffer copyBuf = createPQExpBuffer();
2089  PQExpBuffer clistBuf = createPQExpBuffer();
2090  DataDumperPtr dumpFn;
2091  char *copyStmt;
2092  const char *copyFrom;
2093 
2094  if (!dopt->dump_inserts)
2095  {
2096  /* Dump/restore using COPY */
2097  dumpFn = dumpTableData_copy;
2098 
2099  /*
2100  * When load-via-partition-root is set, get the root table name for
2101  * the partition table, so that we can reload data through the root
2102  * table.
2103  */
2104  if (dopt->load_via_partition_root && tbinfo->ispartition)
2105  {
2106  TableInfo *parentTbinfo;
2107 
2108  parentTbinfo = getRootTableInfo(tbinfo);
2109 
2110  /*
2111  * When we load data through the root, we will qualify the table
2112  * name, because search_path is set for the partition.
2113  */
2114  copyFrom = fmtQualifiedId(fout->remoteVersion,
2115  parentTbinfo->dobj.namespace->dobj.name,
2116  parentTbinfo->dobj.name);
2117  }
2118  else
2119  copyFrom = fmtId(tbinfo->dobj.name);
2120 
2121  /* must use 2 steps here 'cause fmtId is nonreentrant */
2122  appendPQExpBuffer(copyBuf, "COPY %s ",
2123  copyFrom);
2124  appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n",
2125  fmtCopyColumnList(tbinfo, clistBuf),
2126  (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
2127  copyStmt = copyBuf->data;
2128  }
2129  else
2130  {
2131  /* Restore using INSERT */
2132  dumpFn = dumpTableData_insert;
2133  copyStmt = NULL;
2134  }
2135 
2136  /*
2137  * Note: although the TableDataInfo is a full DumpableObject, we treat its
2138  * dependency on its table as "special" and pass it to ArchiveEntry now.
2139  * See comments for BuildArchiveDependencies.
2140  */
2141  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2142  ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2143  tbinfo->dobj.name, tbinfo->dobj.namespace->dobj.name,
2144  NULL, tbinfo->rolname,
2145  false, "TABLE DATA", SECTION_DATA,
2146  "", "", copyStmt,
2147  &(tbinfo->dobj.dumpId), 1,
2148  dumpFn, tdinfo);
2149 
2150  destroyPQExpBuffer(copyBuf);
2151  destroyPQExpBuffer(clistBuf);
2152 }
2153 
2154 /*
2155  * refreshMatViewData -
2156  * load or refresh the contents of a single materialized view
2157  *
2158  * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2159  * statement.
2160  */
2161 static void
2163 {
2164  TableInfo *tbinfo = tdinfo->tdtable;
2165  PQExpBuffer q;
2166 
2167  /* If the materialized view is not flagged as populated, skip this. */
2168  if (!tbinfo->relispopulated)
2169  return;
2170 
2171  q = createPQExpBuffer();
2172 
2173  appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2174  fmtId(tbinfo->dobj.name));
2175 
2176  if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2177  ArchiveEntry(fout,
2178  tdinfo->dobj.catId, /* catalog ID */
2179  tdinfo->dobj.dumpId, /* dump ID */
2180  tbinfo->dobj.name, /* Name */
2181  tbinfo->dobj.namespace->dobj.name, /* Namespace */
2182  NULL, /* Tablespace */
2183  tbinfo->rolname, /* Owner */
2184  false, /* with oids */
2185  "MATERIALIZED VIEW DATA", /* Desc */
2186  SECTION_POST_DATA, /* Section */
2187  q->data, /* Create */
2188  "", /* Del */
2189  NULL, /* Copy */
2190  tdinfo->dobj.dependencies, /* Deps */
2191  tdinfo->dobj.nDeps, /* # Deps */
2192  NULL, /* Dumper */
2193  NULL); /* Dumper Arg */
2194 
2195  destroyPQExpBuffer(q);
2196 }
2197 
2198 /*
2199  * getTableData -
2200  * set up dumpable objects representing the contents of tables
2201  */
2202 static void
2203 getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, bool oids, char relkind)
2204 {
2205  int i;
2206 
2207  for (i = 0; i < numTables; i++)
2208  {
2209  if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2210  (!relkind || tblinfo[i].relkind == relkind))
2211  makeTableDataInfo(dopt, &(tblinfo[i]), oids);
2212  }
2213 }
2214 
2215 /*
2216  * Make a dumpable object for the data of this specific table
2217  *
2218  * Note: we make a TableDataInfo if and only if we are going to dump the
2219  * table data; the "dump" flag in such objects isn't used.
2220  */
2221 static void
2222 makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo, bool oids)
2223 {
2224  TableDataInfo *tdinfo;
2225 
2226  /*
2227  * Nothing to do if we already decided to dump the table. This will
2228  * happen for "config" tables.
2229  */
2230  if (tbinfo->dataObj != NULL)
2231  return;
2232 
2233  /* Skip VIEWs (no data to dump) */
2234  if (tbinfo->relkind == RELKIND_VIEW)
2235  return;
2236  /* Skip FOREIGN TABLEs (no data to dump) */
2237  if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2238  return;
2239  /* Skip partitioned tables (data in partitions) */
2240  if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2241  return;
2242 
2243  /* Don't dump data in unlogged tables, if so requested */
2244  if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2245  dopt->no_unlogged_table_data)
2246  return;
2247 
2248  /* Check that the data is not explicitly excluded */
2249  if (simple_oid_list_member(&tabledata_exclude_oids,
2250  tbinfo->dobj.catId.oid))
2251  return;
2252 
2253  /* OK, let's dump it */
2254  tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2255 
2256  if (tbinfo->relkind == RELKIND_MATVIEW)
2257  tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2258  else if (tbinfo->relkind == RELKIND_SEQUENCE)
2259  tdinfo->dobj.objType = DO_SEQUENCE_SET;
2260  else
2261  tdinfo->dobj.objType = DO_TABLE_DATA;
2262 
2263  /*
2264  * Note: use tableoid 0 so that this object won't be mistaken for
2265  * something that pg_depend entries apply to.
2266  */
2267  tdinfo->dobj.catId.tableoid = 0;
2268  tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2269  AssignDumpId(&tdinfo->dobj);
2270  tdinfo->dobj.name = tbinfo->dobj.name;
2271  tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2272  tdinfo->tdtable = tbinfo;
2273  tdinfo->oids = oids;
2274  tdinfo->filtercond = NULL; /* might get set later */
2275  addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2276 
2277  tbinfo->dataObj = tdinfo;
2278 }
2279 
2280 /*
2281  * The refresh for a materialized view must be dependent on the refresh for
2282  * any materialized view that this one is dependent on.
2283  *
2284  * This must be called after all the objects are created, but before they are
2285  * sorted.
2286  */
2287 static void
2289 {
2290  PQExpBuffer query;
2291  PGresult *res;
2292  int ntups,
2293  i;
2294  int i_classid,
2295  i_objid,
2296  i_refobjid;
2297 
2298  /* No Mat Views before 9.3. */
2299  if (fout->remoteVersion < 90300)
2300  return;
2301 
2302  /* Make sure we are in proper schema */
2303  selectSourceSchema(fout, "pg_catalog");
2304 
2305  query = createPQExpBuffer();
2306 
2307  appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2308  "( "
2309  "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2310  "FROM pg_depend d1 "
2311  "JOIN pg_class c1 ON c1.oid = d1.objid "
2312  "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2313  " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2314  "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2315  "AND d2.objid = r1.oid "
2316  "AND d2.refobjid <> d1.objid "
2317  "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2318  "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2320  "WHERE d1.classid = 'pg_class'::regclass "
2321  "UNION "
2322  "SELECT w.objid, d3.refobjid, c3.relkind "
2323  "FROM w "
2324  "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2325  "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2326  "AND d3.objid = r3.oid "
2327  "AND d3.refobjid <> w.refobjid "
2328  "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2329  "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2331  ") "
2332  "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2333  "FROM w "
2334  "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2335 
2336  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2337 
2338  ntups = PQntuples(res);
2339 
2340  i_classid = PQfnumber(res, "classid");
2341  i_objid = PQfnumber(res, "objid");
2342  i_refobjid = PQfnumber(res, "refobjid");
2343 
2344  for (i = 0; i < ntups; i++)
2345  {
2346  CatalogId objId;
2347  CatalogId refobjId;
2348  DumpableObject *dobj;
2349  DumpableObject *refdobj;
2350  TableInfo *tbinfo;
2351  TableInfo *reftbinfo;
2352 
2353  objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2354  objId.oid = atooid(PQgetvalue(res, i, i_objid));
2355  refobjId.tableoid = objId.tableoid;
2356  refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2357 
2358  dobj = findObjectByCatalogId(objId);
2359  if (dobj == NULL)
2360  continue;
2361 
2362  Assert(dobj->objType == DO_TABLE);
2363  tbinfo = (TableInfo *) dobj;
2364  Assert(tbinfo->relkind == RELKIND_MATVIEW);
2365  dobj = (DumpableObject *) tbinfo->dataObj;
2366  if (dobj == NULL)
2367  continue;
2368  Assert(dobj->objType == DO_REFRESH_MATVIEW);
2369 
2370  refdobj = findObjectByCatalogId(refobjId);
2371  if (refdobj == NULL)
2372  continue;
2373 
2374  Assert(refdobj->objType == DO_TABLE);
2375  reftbinfo = (TableInfo *) refdobj;
2376  Assert(reftbinfo->relkind == RELKIND_MATVIEW);
2377  refdobj = (DumpableObject *) reftbinfo->dataObj;
2378  if (refdobj == NULL)
2379  continue;
2380  Assert(refdobj->objType == DO_REFRESH_MATVIEW);
2381 
2382  addObjectDependency(dobj, refdobj->dumpId);
2383 
2384  if (!reftbinfo->relispopulated)
2385  tbinfo->relispopulated = false;
2386  }
2387 
2388  PQclear(res);
2389 
2390  destroyPQExpBuffer(query);
2391 }
2392 
2393 /*
2394  * getTableDataFKConstraints -
2395  * add dump-order dependencies reflecting foreign key constraints
2396  *
2397  * This code is executed only in a data-only dump --- in schema+data dumps
2398  * we handle foreign key issues by not creating the FK constraints until
2399  * after the data is loaded. In a data-only dump, however, we want to
2400  * order the table data objects in such a way that a table's referenced
2401  * tables are restored first. (In the presence of circular references or
2402  * self-references this may be impossible; we'll detect and complain about
2403  * that during the dependency sorting step.)
2404  */
2405 static void
2407 {
2408  DumpableObject **dobjs;
2409  int numObjs;
2410  int i;
2411 
2412  /* Search through all the dumpable objects for FK constraints */
2413  getDumpableObjects(&dobjs, &numObjs);
2414  for (i = 0; i < numObjs; i++)
2415  {
2416  if (dobjs[i]->objType == DO_FK_CONSTRAINT)
2417  {
2418  ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
2419  TableInfo *ftable;
2420 
2421  /* Not interesting unless both tables are to be dumped */
2422  if (cinfo->contable == NULL ||
2423  cinfo->contable->dataObj == NULL)
2424  continue;
2425  ftable = findTableByOid(cinfo->confrelid);
2426  if (ftable == NULL ||
2427  ftable->dataObj == NULL)
2428  continue;
2429 
2430  /*
2431  * Okay, make referencing table's TABLE_DATA object depend on the
2432  * referenced table's TABLE_DATA object.
2433  */
2435  ftable->dataObj->dobj.dumpId);
2436  }
2437  }
2438  free(dobjs);
2439 }
2440 
2441 
2442 /*
2443  * guessConstraintInheritance:
2444  * In pre-8.4 databases, we can't tell for certain which constraints
2445  * are inherited. We assume a CHECK constraint is inherited if its name
2446  * matches the name of any constraint in the parent. Originally this code
2447  * tried to compare the expression texts, but that can fail for various
2448  * reasons --- for example, if the parent and child tables are in different
2449  * schemas, reverse-listing of function calls may produce different text
2450  * (schema-qualified or not) depending on search path.
2451  *
2452  * In 8.4 and up we can rely on the conislocal field to decide which
2453  * constraints must be dumped; much safer.
2454  *
2455  * This function assumes all conislocal flags were initialized to TRUE.
2456  * It clears the flag on anything that seems to be inherited.
2457  */
2458 static void
2460 {
2461  int i,
2462  j,
2463  k;
2464 
2465  for (i = 0; i < numTables; i++)
2466  {
2467  TableInfo *tbinfo = &(tblinfo[i]);
2468  int numParents;
2469  TableInfo **parents;
2470  TableInfo *parent;
2471 
2472  /* Sequences and views never have parents */
2473  if (tbinfo->relkind == RELKIND_SEQUENCE ||
2474  tbinfo->relkind == RELKIND_VIEW)
2475  continue;
2476 
2477  /* Don't bother computing anything for non-target tables, either */
2478  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
2479  continue;
2480 
2481  numParents = tbinfo->numParents;
2482  parents = tbinfo->parents;
2483 
2484  if (numParents == 0)
2485  continue; /* nothing to see here, move along */
2486 
2487  /* scan for inherited CHECK constraints */
2488  for (j = 0; j < tbinfo->ncheck; j++)
2489  {
2490  ConstraintInfo *constr;
2491 
2492  constr = &(tbinfo->checkexprs[j]);
2493 
2494  for (k = 0; k < numParents; k++)
2495  {
2496  int l;
2497 
2498  parent = parents[k];
2499  for (l = 0; l < parent->ncheck; l++)
2500  {
2501  ConstraintInfo *pconstr = &(parent->checkexprs[l]);
2502 
2503  if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
2504  {
2505  constr->conislocal = false;
2506  break;
2507  }
2508  }
2509  if (!constr->conislocal)
2510  break;
2511  }
2512  }
2513  }
2514 }
2515 
2516 
2517 /*
2518  * dumpDatabase:
2519  * dump the database definition
2520  */
2521 static void
2523 {
2524  DumpOptions *dopt = fout->dopt;
2525  PQExpBuffer dbQry = createPQExpBuffer();
2526  PQExpBuffer delQry = createPQExpBuffer();
2527  PQExpBuffer creaQry = createPQExpBuffer();
2528  PGconn *conn = GetConnection(fout);
2529  PGresult *res;
2530  int i_tableoid,
2531  i_oid,
2532  i_dba,
2533  i_encoding,
2534  i_collate,
2535  i_ctype,
2536  i_frozenxid,
2537  i_minmxid,
2538  i_tablespace;
2539  CatalogId dbCatId;
2540  DumpId dbDumpId;
2541  const char *datname,
2542  *dba,
2543  *encoding,
2544  *collate,
2545  *ctype,
2546  *tablespace;
2547  uint32 frozenxid,
2548  minmxid;
2549 
2550  datname = PQdb(conn);
2551 
2552  if (g_verbose)
2553  write_msg(NULL, "saving database definition\n");
2554 
2555  /* Make sure we are in proper schema */
2556  selectSourceSchema(fout, "pg_catalog");
2557 
2558  /* Get the database owner and parameters from pg_database */
2559  if (fout->remoteVersion >= 90300)
2560  {
2561  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2562  "(%s datdba) AS dba, "
2563  "pg_encoding_to_char(encoding) AS encoding, "
2564  "datcollate, datctype, datfrozenxid, datminmxid, "
2565  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2566  "shobj_description(oid, 'pg_database') AS description "
2567 
2568  "FROM pg_database "
2569  "WHERE datname = ",
2571  appendStringLiteralAH(dbQry, datname, fout);
2572  }
2573  else if (fout->remoteVersion >= 80400)
2574  {
2575  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2576  "(%s datdba) AS dba, "
2577  "pg_encoding_to_char(encoding) AS encoding, "
2578  "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
2579  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2580  "shobj_description(oid, 'pg_database') AS description "
2581 
2582  "FROM pg_database "
2583  "WHERE datname = ",
2585  appendStringLiteralAH(dbQry, datname, fout);
2586  }
2587  else if (fout->remoteVersion >= 80200)
2588  {
2589  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2590  "(%s datdba) AS dba, "
2591  "pg_encoding_to_char(encoding) AS encoding, "
2592  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2593  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
2594  "shobj_description(oid, 'pg_database') AS description "
2595 
2596  "FROM pg_database "
2597  "WHERE datname = ",
2599  appendStringLiteralAH(dbQry, datname, fout);
2600  }
2601  else
2602  {
2603  appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
2604  "(%s datdba) AS dba, "
2605  "pg_encoding_to_char(encoding) AS encoding, "
2606  "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
2607  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
2608  "FROM pg_database "
2609  "WHERE datname = ",
2611  appendStringLiteralAH(dbQry, datname, fout);
2612  }
2613 
2614  res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
2615 
2616  i_tableoid = PQfnumber(res, "tableoid");
2617  i_oid = PQfnumber(res, "oid");
2618  i_dba = PQfnumber(res, "dba");
2619  i_encoding = PQfnumber(res, "encoding");
2620  i_collate = PQfnumber(res, "datcollate");
2621  i_ctype = PQfnumber(res, "datctype");
2622  i_frozenxid = PQfnumber(res, "datfrozenxid");
2623  i_minmxid = PQfnumber(res, "datminmxid");
2624  i_tablespace = PQfnumber(res, "tablespace");
2625 
2626  dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
2627  dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
2628  dba = PQgetvalue(res, 0, i_dba);
2629  encoding = PQgetvalue(res, 0, i_encoding);
2630  collate = PQgetvalue(res, 0, i_collate);
2631  ctype = PQgetvalue(res, 0, i_ctype);
2632  frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
2633  minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
2634  tablespace = PQgetvalue(res, 0, i_tablespace);
2635 
2636  appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
2637  fmtId(datname));
2638  if (strlen(encoding) > 0)
2639  {
2640  appendPQExpBufferStr(creaQry, " ENCODING = ");
2641  appendStringLiteralAH(creaQry, encoding, fout);
2642  }
2643  if (strlen(collate) > 0)
2644  {
2645  appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
2646  appendStringLiteralAH(creaQry, collate, fout);
2647  }
2648  if (strlen(ctype) > 0)
2649  {
2650  appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
2651  appendStringLiteralAH(creaQry, ctype, fout);
2652  }
2653  if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
2654  !dopt->outputNoTablespaces)
2655  appendPQExpBuffer(creaQry, " TABLESPACE = %s",
2656  fmtId(tablespace));
2657  appendPQExpBufferStr(creaQry, ";\n");
2658 
2659  if (dopt->binary_upgrade)
2660  {
2661  appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
2662  appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
2663  "SET datfrozenxid = '%u', datminmxid = '%u'\n"
2664  "WHERE datname = ",
2665  frozenxid, minmxid);
2666  appendStringLiteralAH(creaQry, datname, fout);
2667  appendPQExpBufferStr(creaQry, ";\n");
2668 
2669  }
2670 
2671  appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
2672  fmtId(datname));
2673 
2674  dbDumpId = createDumpId();
2675 
2676  ArchiveEntry(fout,
2677  dbCatId, /* catalog ID */
2678  dbDumpId, /* dump ID */
2679  datname, /* Name */
2680  NULL, /* Namespace */
2681  NULL, /* Tablespace */
2682  dba, /* Owner */
2683  false, /* with oids */
2684  "DATABASE", /* Desc */
2685  SECTION_PRE_DATA, /* Section */
2686  creaQry->data, /* Create */
2687  delQry->data, /* Del */
2688  NULL, /* Copy */
2689  NULL, /* Deps */
2690  0, /* # Deps */
2691  NULL, /* Dumper */
2692  NULL); /* Dumper Arg */
2693 
2694  /*
2695  * pg_largeobject and pg_largeobject_metadata come from the old system
2696  * intact, so set their relfrozenxids and relminmxids.
2697  */
2698  if (dopt->binary_upgrade)
2699  {
2700  PGresult *lo_res;
2701  PQExpBuffer loFrozenQry = createPQExpBuffer();
2702  PQExpBuffer loOutQry = createPQExpBuffer();
2703  int i_relfrozenxid,
2704  i_relminmxid;
2705 
2706  /*
2707  * pg_largeobject
2708  */
2709  if (fout->remoteVersion >= 90300)
2710  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2711  "FROM pg_catalog.pg_class\n"
2712  "WHERE oid = %u;\n",
2714  else
2715  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2716  "FROM pg_catalog.pg_class\n"
2717  "WHERE oid = %u;\n",
2719 
2720  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2721 
2722  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2723  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2724 
2725  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
2726  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2727  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2728  "WHERE oid = %u;\n",
2729  atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2730  atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
2732  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2733  "pg_largeobject", NULL, NULL, "",
2734  false, "pg_largeobject", SECTION_PRE_DATA,
2735  loOutQry->data, "", NULL,
2736  NULL, 0,
2737  NULL, NULL);
2738 
2739  PQclear(lo_res);
2740 
2741  /*
2742  * pg_largeobject_metadata
2743  */
2744  if (fout->remoteVersion >= 90000)
2745  {
2746  resetPQExpBuffer(loFrozenQry);
2747  resetPQExpBuffer(loOutQry);
2748 
2749  if (fout->remoteVersion >= 90300)
2750  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
2751  "FROM pg_catalog.pg_class\n"
2752  "WHERE oid = %u;\n",
2754  else
2755  appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
2756  "FROM pg_catalog.pg_class\n"
2757  "WHERE oid = %u;\n",
2759 
2760  lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
2761 
2762  i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
2763  i_relminmxid = PQfnumber(lo_res, "relminmxid");
2764 
2765  appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
2766  appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
2767  "SET relfrozenxid = '%u', relminmxid = '%u'\n"
2768  "WHERE oid = %u;\n",
2769  atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
2770  atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
2772  ArchiveEntry(fout, nilCatalogId, createDumpId(),
2773  "pg_largeobject_metadata", NULL, NULL, "",
2774  false, "pg_largeobject_metadata", SECTION_PRE_DATA,
2775  loOutQry->data, "", NULL,
2776  NULL, 0,
2777  NULL, NULL);
2778 
2779  PQclear(lo_res);
2780  }
2781 
2782  destroyPQExpBuffer(loFrozenQry);
2783  destroyPQExpBuffer(loOutQry);
2784  }
2785 
2786  /* Dump DB comment if any */
2787  if (fout->remoteVersion >= 80200)
2788  {
2789  /*
2790  * 8.2 keeps comments on shared objects in a shared table, so we
2791  * cannot use the dumpComment used for other database objects.
2792  */
2793  char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
2794 
2795  if (comment && strlen(comment))
2796  {
2797  resetPQExpBuffer(dbQry);
2798 
2799  /*
2800  * Generates warning when loaded into a differently-named
2801  * database.
2802  */
2803  appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", fmtId(datname));
2804  appendStringLiteralAH(dbQry, comment, fout);
2805  appendPQExpBufferStr(dbQry, ";\n");
2806 
2807  ArchiveEntry(fout, dbCatId, createDumpId(), datname, NULL, NULL,
2808  dba, false, "COMMENT", SECTION_NONE,
2809  dbQry->data, "", NULL,
2810  &dbDumpId, 1, NULL, NULL);
2811  }
2812  }
2813  else
2814  {
2815  resetPQExpBuffer(dbQry);
2816  appendPQExpBuffer(dbQry, "DATABASE %s", fmtId(datname));
2817  dumpComment(fout, dbQry->data, NULL, "",
2818  dbCatId, 0, dbDumpId);
2819  }
2820 
2821  /* Dump shared security label. */
2822  if (!dopt->no_security_labels && fout->remoteVersion >= 90200)
2823  {
2824  PGresult *shres;
2825  PQExpBuffer seclabelQry;
2826 
2827  seclabelQry = createPQExpBuffer();
2828 
2829  buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry);
2830  shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
2831  resetPQExpBuffer(seclabelQry);
2832  emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
2833  if (strlen(seclabelQry->data))
2834  ArchiveEntry(fout, dbCatId, createDumpId(), datname, NULL, NULL,
2835  dba, false, "SECURITY LABEL", SECTION_NONE,
2836  seclabelQry->data, "", NULL,
2837  &dbDumpId, 1, NULL, NULL);
2838  destroyPQExpBuffer(seclabelQry);
2839  PQclear(shres);
2840  }
2841 
2842  PQclear(res);
2843 
2844  destroyPQExpBuffer(dbQry);
2845  destroyPQExpBuffer(delQry);
2846  destroyPQExpBuffer(creaQry);
2847 }
2848 
2849 /*
2850  * dumpEncoding: put the correct encoding into the archive
2851  */
2852 static void
2854 {
2855  const char *encname = pg_encoding_to_char(AH->encoding);
2857 
2858  if (g_verbose)
2859  write_msg(NULL, "saving encoding = %s\n", encname);
2860 
2861  appendPQExpBufferStr(qry, "SET client_encoding = ");
2862  appendStringLiteralAH(qry, encname, AH);
2863  appendPQExpBufferStr(qry, ";\n");
2864 
2865  ArchiveEntry(AH, nilCatalogId, createDumpId(),
2866  "ENCODING", NULL, NULL, "",
2867  false, "ENCODING", SECTION_PRE_DATA,
2868  qry->data, "", NULL,
2869  NULL, 0,
2870  NULL, NULL);
2871 
2872  destroyPQExpBuffer(qry);
2873 }
2874 
2875 
2876 /*
2877  * dumpStdStrings: put the correct escape string behavior into the archive
2878  */
2879 static void
2881 {
2882  const char *stdstrings = AH->std_strings ? "on" : "off";
2884 
2885  if (g_verbose)
2886  write_msg(NULL, "saving standard_conforming_strings = %s\n",
2887  stdstrings);
2888 
2889  appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
2890  stdstrings);
2891 
2892  ArchiveEntry(AH, nilCatalogId, createDumpId(),
2893  "STDSTRINGS", NULL, NULL, "",
2894  false, "STDSTRINGS", SECTION_PRE_DATA,
2895  qry->data, "", NULL,
2896  NULL, 0,
2897  NULL, NULL);
2898 
2899  destroyPQExpBuffer(qry);
2900 }
2901 
2902 
2903 /*
2904  * getBlobs:
2905  * Collect schema-level data about large objects
2906  */
2907 static void
2909 {
2910  DumpOptions *dopt = fout->dopt;
2911  PQExpBuffer blobQry = createPQExpBuffer();
2912  BlobInfo *binfo;
2913  DumpableObject *bdata;
2914  PGresult *res;
2915  int ntups;
2916  int i;
2917  int i_oid;
2918  int i_lomowner;
2919  int i_lomacl;
2920  int i_rlomacl;
2921  int i_initlomacl;
2922  int i_initrlomacl;
2923 
2924  /* Verbose message */
2925  if (g_verbose)
2926  write_msg(NULL, "reading large objects\n");
2927 
2928  /* Make sure we are in proper schema */
2929  selectSourceSchema(fout, "pg_catalog");
2930 
2931  /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
2932  if (fout->remoteVersion >= 90600)
2933  {
2934  PQExpBuffer acl_subquery = createPQExpBuffer();
2935  PQExpBuffer racl_subquery = createPQExpBuffer();
2936  PQExpBuffer init_acl_subquery = createPQExpBuffer();
2937  PQExpBuffer init_racl_subquery = createPQExpBuffer();
2938 
2939  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
2940  init_racl_subquery, "l.lomacl", "l.lomowner", "'L'",
2941  dopt->binary_upgrade);
2942 
2943  appendPQExpBuffer(blobQry,
2944  "SELECT l.oid, (%s l.lomowner) AS rolname, "
2945  "%s AS lomacl, "
2946  "%s AS rlomacl, "
2947  "%s AS initlomacl, "
2948  "%s AS initrlomacl "
2949  "FROM pg_largeobject_metadata l "
2950  "LEFT JOIN pg_init_privs pip ON "
2951  "(l.oid = pip.objoid "
2952  "AND pip.classoid = 'pg_largeobject'::regclass "
2953  "AND pip.objsubid = 0) ",
2955  acl_subquery->data,
2956  racl_subquery->data,
2957  init_acl_subquery->data,
2958  init_racl_subquery->data);
2959 
2960  destroyPQExpBuffer(acl_subquery);
2961  destroyPQExpBuffer(racl_subquery);
2962  destroyPQExpBuffer(init_acl_subquery);
2963  destroyPQExpBuffer(init_racl_subquery);
2964  }
2965  else if (fout->remoteVersion >= 90000)
2966  appendPQExpBuffer(blobQry,
2967  "SELECT oid, (%s lomowner) AS rolname, lomacl, "
2968  "NULL AS rlomacl, NULL AS initlomacl, "
2969  "NULL AS initrlomacl "
2970  " FROM pg_largeobject_metadata",
2972  else
2973  appendPQExpBufferStr(blobQry,
2974  "SELECT DISTINCT loid AS oid, "
2975  "NULL::name AS rolname, NULL::oid AS lomacl, "
2976  "NULL::oid AS rlomacl, NULL::oid AS initlomacl, "
2977  "NULL::oid AS initrlomacl "
2978  " FROM pg_largeobject");
2979 
2980  res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
2981 
2982  i_oid = PQfnumber(res, "oid");
2983  i_lomowner = PQfnumber(res, "rolname");
2984  i_lomacl = PQfnumber(res, "lomacl");
2985  i_rlomacl = PQfnumber(res, "rlomacl");
2986  i_initlomacl = PQfnumber(res, "initlomacl");
2987  i_initrlomacl = PQfnumber(res, "initrlomacl");
2988 
2989  ntups = PQntuples(res);
2990 
2991  /*
2992  * Each large object has its own BLOB archive entry.
2993  */
2994  binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
2995 
2996  for (i = 0; i < ntups; i++)
2997  {
2998  binfo[i].dobj.objType = DO_BLOB;
3000  binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3001  AssignDumpId(&binfo[i].dobj);
3002 
3003  binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
3004  binfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_lomowner));
3005  binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, i_lomacl));
3006  binfo[i].rblobacl = pg_strdup(PQgetvalue(res, i, i_rlomacl));
3007  binfo[i].initblobacl = pg_strdup(PQgetvalue(res, i, i_initlomacl));
3008  binfo[i].initrblobacl = pg_strdup(PQgetvalue(res, i, i_initrlomacl));
3009 
3010  if (PQgetisnull(res, i, i_lomacl) &&
3011  PQgetisnull(res, i, i_rlomacl) &&
3012  PQgetisnull(res, i, i_initlomacl) &&
3013  PQgetisnull(res, i, i_initrlomacl))
3014  binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
3015 
3016  /*
3017  * In binary-upgrade mode for blobs, we do *not* dump out the data or
3018  * the ACLs, should any exist. The data and ACL (if any) will be
3019  * copied by pg_upgrade, which simply copies the pg_largeobject and
3020  * pg_largeobject_metadata tables.
3021  *
3022  * We *do* dump out the definition of the blob because we need that to
3023  * make the restoration of the comments, and anything else, work since
3024  * pg_upgrade copies the files behind pg_largeobject and
3025  * pg_largeobject_metadata after the dump is restored.
3026  */
3027  if (dopt->binary_upgrade)
3029  }
3030 
3031  /*
3032  * If we have any large objects, a "BLOBS" archive entry is needed. This
3033  * is just a placeholder for sorting; it carries no data now.
3034  */
3035  if (ntups > 0)
3036  {
3037  bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
3038  bdata->objType = DO_BLOB_DATA;
3039  bdata->catId = nilCatalogId;
3040  AssignDumpId(bdata);
3041  bdata->name = pg_strdup("BLOBS");
3042  }
3043 
3044  PQclear(res);
3045  destroyPQExpBuffer(blobQry);
3046 }
3047 
3048 /*
3049  * dumpBlob
3050  *
3051  * dump the definition (metadata) of the given large object
3052  */
3053 static void
3054 dumpBlob(Archive *fout, BlobInfo *binfo)
3055 {
3056  PQExpBuffer cquery = createPQExpBuffer();
3057  PQExpBuffer dquery = createPQExpBuffer();
3058 
3059  appendPQExpBuffer(cquery,
3060  "SELECT pg_catalog.lo_create('%s');\n",
3061  binfo->dobj.name);
3062 
3063  appendPQExpBuffer(dquery,
3064  "SELECT pg_catalog.lo_unlink('%s');\n",
3065  binfo->dobj.name);
3066 
3067  if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
3068  ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
3069  binfo->dobj.name,
3070  NULL, NULL,
3071  binfo->rolname, false,
3072  "BLOB", SECTION_PRE_DATA,
3073  cquery->data, dquery->data, NULL,
3074  NULL, 0,
3075  NULL, NULL);
3076 
3077  /* set up tag for comment and/or ACL */
3078  resetPQExpBuffer(cquery);
3079  appendPQExpBuffer(cquery, "LARGE OBJECT %s", binfo->dobj.name);
3080 
3081  /* Dump comment if any */
3082  if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3083  dumpComment(fout, cquery->data,
3084  NULL, binfo->rolname,
3085  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3086 
3087  /* Dump security label if any */
3088  if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3089  dumpSecLabel(fout, cquery->data,
3090  NULL, binfo->rolname,
3091  binfo->dobj.catId, 0, binfo->dobj.dumpId);
3092 
3093  /* Dump ACL if any */
3094  if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
3095  dumpACL(fout, binfo->dobj.catId, binfo->dobj.dumpId, "LARGE OBJECT",
3096  binfo->dobj.name, NULL, cquery->data,
3097  NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
3098  binfo->initblobacl, binfo->initrblobacl);
3099 
3100  destroyPQExpBuffer(cquery);
3101  destroyPQExpBuffer(dquery);
3102 }
3103 
3104 /*
3105  * dumpBlobs:
3106  * dump the data contents of all large objects
3107  */
3108 static int
3109 dumpBlobs(Archive *fout, void *arg)
3110 {
3111  const char *blobQry;
3112  const char *blobFetchQry;
3113  PGconn *conn = GetConnection(fout);
3114  PGresult *res;
3115  char buf[LOBBUFSIZE];
3116  int ntups;
3117  int i;
3118  int cnt;
3119 
3120  if (g_verbose)
3121  write_msg(NULL, "saving large objects\n");
3122 
3123  /* Make sure we are in proper schema */
3124  selectSourceSchema(fout, "pg_catalog");
3125 
3126  /*
3127  * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
3128  * the already-in-memory dumpable objects instead...
3129  */
3130  if (fout->remoteVersion >= 90000)
3131  blobQry = "DECLARE bloboid CURSOR FOR SELECT oid FROM pg_largeobject_metadata";
3132  else
3133  blobQry = "DECLARE bloboid CURSOR FOR SELECT DISTINCT loid FROM pg_largeobject";
3134 
3135  ExecuteSqlStatement(fout, blobQry);
3136 
3137  /* Command to fetch from cursor */
3138  blobFetchQry = "FETCH 1000 IN bloboid";
3139 
3140  do
3141  {
3142  /* Do a fetch */
3143  res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
3144 
3145  /* Process the tuples, if any */
3146  ntups = PQntuples(res);
3147  for (i = 0; i < ntups; i++)
3148  {
3149  Oid blobOid;
3150  int loFd;
3151 
3152  blobOid = atooid(PQgetvalue(res, i, 0));
3153  /* Open the BLOB */
3154  loFd = lo_open(conn, blobOid, INV_READ);
3155  if (loFd == -1)
3156  exit_horribly(NULL, "could not open large object %u: %s",
3157  blobOid, PQerrorMessage(conn));
3158 
3159  StartBlob(fout, blobOid);
3160 
3161  /* Now read it in chunks, sending data to archive */
3162  do
3163  {
3164  cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3165  if (cnt < 0)
3166  exit_horribly(NULL, "error reading large object %u: %s",
3167  blobOid, PQerrorMessage(conn));
3168 
3169  WriteData(fout, buf, cnt);
3170  } while (cnt > 0);
3171 
3172  lo_close(conn, loFd);
3173 
3174  EndBlob(fout, blobOid);
3175  }
3176 
3177  PQclear(res);
3178  } while (ntups > 0);
3179 
3180  return 1;
3181 }
3182 
3183 /*
3184  * getPolicies
3185  * get information about policies on a dumpable table.
3186  */
3187 void
3188 getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3189 {
3190  PQExpBuffer query;
3191  PGresult *res;
3192  PolicyInfo *polinfo;
3193  int i_oid;
3194  int i_tableoid;
3195  int i_polname;
3196  int i_polcmd;
3197  int i_polpermissive;
3198  int i_polroles;
3199  int i_polqual;
3200  int i_polwithcheck;
3201  int i,
3202  j,
3203  ntups;
3204 
3205  if (fout->remoteVersion < 90500)
3206  return;
3207 
3208  query = createPQExpBuffer();
3209 
3210  for (i = 0; i < numTables; i++)
3211  {
3212  TableInfo *tbinfo = &tblinfo[i];
3213 
3214  /* Ignore row security on tables not to be dumped */
3215  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
3216  continue;
3217 
3218  if (g_verbose)
3219  write_msg(NULL, "reading row security enabled for table \"%s.%s\"\n",
3220  tbinfo->dobj.namespace->dobj.name,
3221  tbinfo->dobj.name);
3222 
3223  /*
3224  * Get row security enabled information for the table. We represent
3225  * RLS enabled on a table by creating PolicyInfo object with an empty
3226  * policy.
3227  */
3228  if (tbinfo->rowsec)
3229  {
3230  /*
3231  * Note: use tableoid 0 so that this object won't be mistaken for
3232  * something that pg_depend entries apply to.
3233  */
3234  polinfo = pg_malloc(sizeof(PolicyInfo));
3235  polinfo->dobj.objType = DO_POLICY;
3236  polinfo->dobj.catId.tableoid = 0;
3237  polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3238  AssignDumpId(&polinfo->dobj);
3239  polinfo->dobj.namespace = tbinfo->dobj.namespace;
3240  polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
3241  polinfo->poltable = tbinfo;
3242  polinfo->polname = NULL;
3243  polinfo->polcmd = '\0';
3244  polinfo->polpermissive = 0;
3245  polinfo->polroles = NULL;
3246  polinfo->polqual = NULL;
3247  polinfo->polwithcheck = NULL;
3248  }
3249 
3250  if (g_verbose)
3251  write_msg(NULL, "reading policies for table \"%s.%s\"\n",
3252  tbinfo->dobj.namespace->dobj.name,
3253  tbinfo->dobj.name);
3254 
3255  /*
3256  * select table schema to ensure regproc name is qualified if needed
3257  */
3258  selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
3259 
3260  resetPQExpBuffer(query);
3261 
3262  /* Get the policies for the table. */
3263  if (fout->remoteVersion >= 100000)
3264  appendPQExpBuffer(query,
3265  "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
3266  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3267  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3268  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3269  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3270  "FROM pg_catalog.pg_policy pol "
3271  "WHERE polrelid = '%u'",
3272  tbinfo->dobj.catId.oid);
3273  else
3274  appendPQExpBuffer(query,
3275  "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
3276  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
3277  " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
3278  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
3279  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
3280  "FROM pg_catalog.pg_policy pol "
3281  "WHERE polrelid = '%u'",
3282  tbinfo->dobj.catId.oid);
3283  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3284 
3285  ntups = PQntuples(res);
3286 
3287  if (ntups == 0)
3288  {
3289  /*
3290  * No explicit policies to handle (only the default-deny policy,
3291  * which is handled as part of the table definition). Clean up
3292  * and return.
3293  */
3294  PQclear(res);
3295  continue;
3296  }
3297 
3298  i_oid = PQfnumber(res, "oid");
3299  i_tableoid = PQfnumber(res, "tableoid");
3300  i_polname = PQfnumber(res, "polname");
3301  i_polcmd = PQfnumber(res, "polcmd");
3302  i_polpermissive = PQfnumber(res, "polpermissive");
3303  i_polroles = PQfnumber(res, "polroles");
3304  i_polqual = PQfnumber(res, "polqual");
3305  i_polwithcheck = PQfnumber(res, "polwithcheck");
3306 
3307  polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
3308 
3309  for (j = 0; j < ntups; j++)
3310  {
3311  polinfo[j].dobj.objType = DO_POLICY;
3312  polinfo[j].dobj.catId.tableoid =
3313  atooid(PQgetvalue(res, j, i_tableoid));
3314  polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3315  AssignDumpId(&polinfo[j].dobj);
3316  polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3317  polinfo[j].poltable = tbinfo;
3318  polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
3319  polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
3320 
3321  polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
3322  polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
3323 
3324  if (PQgetisnull(res, j, i_polroles))
3325  polinfo[j].polroles = NULL;
3326  else
3327  polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
3328 
3329  if (PQgetisnull(res, j, i_polqual))
3330  polinfo[j].polqual = NULL;
3331  else
3332  polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
3333 
3334  if (PQgetisnull(res, j, i_polwithcheck))
3335  polinfo[j].polwithcheck = NULL;
3336  else
3337  polinfo[j].polwithcheck
3338  = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
3339  }
3340  PQclear(res);
3341  }
3342  destroyPQExpBuffer(query);
3343 }
3344 
3345 /*
3346  * dumpPolicy
3347  * dump the definition of the given policy
3348  */
3349 static void
3351 {
3352  DumpOptions *dopt = fout->dopt;
3353  TableInfo *tbinfo = polinfo->poltable;
3354  PQExpBuffer query;
3355  PQExpBuffer delqry;
3356  const char *cmd;
3357  char *tag;
3358 
3359  if (dopt->dataOnly)
3360  return;
3361 
3362  /*
3363  * If polname is NULL, then this record is just indicating that ROW LEVEL
3364  * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
3365  * ROW LEVEL SECURITY.
3366  */
3367  if (polinfo->polname == NULL)
3368  {
3369  query = createPQExpBuffer();
3370 
3371  appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
3372  fmtId(polinfo->dobj.name));
3373 
3374  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3375  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3376  polinfo->dobj.name,
3377  polinfo->dobj.namespace->dobj.name,
3378  NULL,
3379  tbinfo->rolname, false,
3380  "ROW SECURITY", SECTION_POST_DATA,
3381  query->data, "", NULL,
3382  NULL, 0,
3383  NULL, NULL);
3384 
3385  destroyPQExpBuffer(query);
3386  return;
3387  }
3388 
3389  if (polinfo->polcmd == '*')
3390  cmd = "";
3391  else if (polinfo->polcmd == 'r')
3392  cmd = " FOR SELECT";
3393  else if (polinfo->polcmd == 'a')
3394  cmd = " FOR INSERT";
3395  else if (polinfo->polcmd == 'w')
3396  cmd = " FOR UPDATE";
3397  else if (polinfo->polcmd == 'd')
3398  cmd = " FOR DELETE";
3399  else
3400  {
3401  write_msg(NULL, "unexpected policy command type: %c\n",
3402  polinfo->polcmd);
3403  exit_nicely(1);
3404  }
3405 
3406  query = createPQExpBuffer();
3407  delqry = createPQExpBuffer();
3408 
3409  appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
3410 
3411  appendPQExpBuffer(query, " ON %s%s%s", fmtId(tbinfo->dobj.name),
3412  !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
3413 
3414  if (polinfo->polroles != NULL)
3415  appendPQExpBuffer(query, " TO %s", polinfo->polroles);
3416 
3417  if (polinfo->polqual != NULL)
3418  appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
3419 
3420  if (polinfo->polwithcheck != NULL)
3421  appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
3422 
3423  appendPQExpBuffer(query, ";\n");
3424 
3425  appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
3426  appendPQExpBuffer(delqry, " ON %s;\n", fmtId(tbinfo->dobj.name));
3427 
3428  tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
3429 
3430  if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
3431  ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
3432  tag,
3433  polinfo->dobj.namespace->dobj.name,
3434  NULL,
3435  tbinfo->rolname, false,
3436  "POLICY", SECTION_POST_DATA,
3437  query->data, delqry->data, NULL,
3438  NULL, 0,
3439  NULL, NULL);
3440 
3441  free(tag);
3442  destroyPQExpBuffer(query);
3443  destroyPQExpBuffer(delqry);
3444 }
3445 
3446 /*
3447  * getPublications
3448  * get information about publications
3449  */
3450 void
3452 {
3453  DumpOptions *dopt = fout->dopt;
3454  PQExpBuffer query;
3455  PGresult *res;
3456  PublicationInfo *pubinfo;
3457  int i_tableoid;
3458  int i_oid;
3459  int i_pubname;
3460  int i_rolname;
3461  int i_puballtables;
3462  int i_pubinsert;
3463  int i_pubupdate;
3464  int i_pubdelete;
3465  int i,
3466  ntups;
3467 
3468  if (dopt->no_publications || fout->remoteVersion < 100000)
3469  return;
3470 
3471  query = createPQExpBuffer();
3472 
3473  resetPQExpBuffer(query);
3474 
3475  /* Get the publications. */
3476  appendPQExpBuffer(query,
3477  "SELECT p.tableoid, p.oid, p.pubname, "
3478  "(%s p.pubowner) AS rolname, "
3479  "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete "
3480  "FROM pg_catalog.pg_publication p",
3482 
3483  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3484 
3485  ntups = PQntuples(res);
3486 
3487  i_tableoid = PQfnumber(res, "tableoid");
3488  i_oid = PQfnumber(res, "oid");
3489  i_pubname = PQfnumber(res, "pubname");
3490  i_rolname = PQfnumber(res, "rolname");
3491  i_puballtables = PQfnumber(res, "puballtables");
3492  i_pubinsert = PQfnumber(res, "pubinsert");
3493  i_pubupdate = PQfnumber(res, "pubupdate");
3494  i_pubdelete = PQfnumber(res, "pubdelete");
3495 
3496  pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
3497 
3498  for (i = 0; i < ntups; i++)
3499  {
3500  pubinfo[i].dobj.objType = DO_PUBLICATION;
3501  pubinfo[i].dobj.catId.tableoid =
3502  atooid(PQgetvalue(res, i, i_tableoid));
3503  pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3504  AssignDumpId(&pubinfo[i].dobj);
3505  pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
3506  pubinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3507  pubinfo[i].puballtables =
3508  (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
3509  pubinfo[i].pubinsert =
3510  (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
3511  pubinfo[i].pubupdate =
3512  (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
3513  pubinfo[i].pubdelete =
3514  (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
3515 
3516  if (strlen(pubinfo[i].rolname) == 0)
3517  write_msg(NULL, "WARNING: owner of publication \"%s\" appears to be invalid\n",
3518  pubinfo[i].dobj.name);
3519 
3520  /* Decide whether we want to dump it */
3521  selectDumpableObject(&(pubinfo[i].dobj), fout);
3522  }
3523  PQclear(res);
3524 
3525  destroyPQExpBuffer(query);
3526 }
3527 
3528 /*
3529  * dumpPublication
3530  * dump the definition of the given publication
3531  */
3532 static void
3534 {
3535  PQExpBuffer delq;
3536  PQExpBuffer query;
3537  PQExpBuffer labelq;
3538  bool first = true;
3539 
3540  if (!(pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3541  return;
3542 
3543  delq = createPQExpBuffer();
3544  query = createPQExpBuffer();
3545  labelq = createPQExpBuffer();
3546 
3547  appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
3548  fmtId(pubinfo->dobj.name));
3549 
3550  appendPQExpBuffer(query, "CREATE PUBLICATION %s",
3551  fmtId(pubinfo->dobj.name));
3552 
3553  appendPQExpBuffer(labelq, "PUBLICATION %s", fmtId(pubinfo->dobj.name));
3554 
3555  if (pubinfo->puballtables)
3556  appendPQExpBufferStr(query, " FOR ALL TABLES");
3557 
3558  appendPQExpBufferStr(query, " WITH (publish = '");
3559  if (pubinfo->pubinsert)
3560  {
3561  appendPQExpBufferStr(query, "insert");
3562  first = false;
3563  }
3564 
3565  if (pubinfo->pubupdate)
3566  {
3567  if (!first)
3568  appendPQExpBufferStr(query, ", ");
3569 
3570  appendPQExpBufferStr(query, "update");
3571  first = false;
3572  }
3573 
3574  if (pubinfo->pubdelete)
3575  {
3576  if (!first)
3577  appendPQExpBufferStr(query, ", ");
3578 
3579  appendPQExpBufferStr(query, "delete");
3580  first = false;
3581  }
3582 
3583  appendPQExpBufferStr(query, "');\n");
3584 
3585  ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
3586  pubinfo->dobj.name,
3587  NULL,
3588  NULL,
3589  pubinfo->rolname, false,
3590  "PUBLICATION", SECTION_POST_DATA,
3591  query->data, delq->data, NULL,
3592  NULL, 0,
3593  NULL, NULL);
3594 
3595  if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3596  dumpComment(fout, labelq->data,
3597  NULL, pubinfo->rolname,
3598  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3599 
3600  if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3601  dumpSecLabel(fout, labelq->data,
3602  NULL, pubinfo->rolname,
3603  pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
3604 
3605  destroyPQExpBuffer(delq);
3606  destroyPQExpBuffer(query);
3607 }
3608 
3609 /*
3610  * getPublicationTables
3611  * get information about publication membership for dumpable tables.
3612  */
3613 void
3615 {
3616  PQExpBuffer query;
3617  PGresult *res;
3618  PublicationRelInfo *pubrinfo;
3619  int i_tableoid;
3620  int i_oid;
3621  int i_pubname;
3622  int i,
3623  j,
3624  ntups;
3625 
3626  if (fout->remoteVersion < 100000)
3627  return;
3628 
3629  query = createPQExpBuffer();
3630 
3631  for (i = 0; i < numTables; i++)
3632  {
3633  TableInfo *tbinfo = &tblinfo[i];
3634 
3635  /* Only plain tables can be aded to publications. */
3636  if (tbinfo->relkind != RELKIND_RELATION)
3637  continue;
3638 
3639  /*
3640  * Ignore publication membership of tables whose definitions are not
3641  * to be dumped.
3642  */
3643  if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3644  continue;
3645 
3646  if (g_verbose)
3647  write_msg(NULL, "reading publication membership for table \"%s.%s\"\n",
3648  tbinfo->dobj.namespace->dobj.name,
3649  tbinfo->dobj.name);
3650 
3651  resetPQExpBuffer(query);
3652 
3653  /* Get the publication membership for the table. */
3654  appendPQExpBuffer(query,
3655  "SELECT pr.tableoid, pr.oid, p.pubname "
3656  "FROM pg_catalog.pg_publication_rel pr,"
3657  " pg_catalog.pg_publication p "
3658  "WHERE pr.prrelid = '%u'"
3659  " AND p.oid = pr.prpubid",
3660  tbinfo->dobj.catId.oid);
3661  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3662 
3663  ntups = PQntuples(res);
3664 
3665  if (ntups == 0)
3666  {
3667  /*
3668  * Table is not member of any publications. Clean up and return.
3669  */
3670  PQclear(res);
3671  continue;
3672  }
3673 
3674  i_tableoid = PQfnumber(res, "tableoid");
3675  i_oid = PQfnumber(res, "oid");
3676  i_pubname = PQfnumber(res, "pubname");
3677 
3678  pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
3679 
3680  for (j = 0; j < ntups; j++)
3681  {
3682  pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
3683  pubrinfo[j].dobj.catId.tableoid =
3684  atooid(PQgetvalue(res, j, i_tableoid));
3685  pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
3686  AssignDumpId(&pubrinfo[j].dobj);
3687  pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
3688  pubrinfo[j].dobj.name = tbinfo->dobj.name;
3689  pubrinfo[j].pubname = pg_strdup(PQgetvalue(res, j, i_pubname));
3690  pubrinfo[j].pubtable = tbinfo;
3691 
3692  /* Decide whether we want to dump it */
3693  selectDumpablePublicationTable(&(pubrinfo[j].dobj), fout);
3694  }
3695  PQclear(res);
3696  }
3697  destroyPQExpBuffer(query);
3698 }
3699 
3700 /*
3701  * dumpPublicationTable
3702  * dump the definition of the given publication table mapping
3703  */
3704 static void
3706 {
3707  TableInfo *tbinfo = pubrinfo->pubtable;
3708  PQExpBuffer query;
3709  char *tag;
3710 
3711  if (!(pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3712  return;
3713 
3714  tag = psprintf("%s %s", pubrinfo->pubname, tbinfo->dobj.name);
3715 
3716  query = createPQExpBuffer();
3717 
3718  appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
3719  fmtId(pubrinfo->pubname));
3720  appendPQExpBuffer(query, " %s;",
3721  fmtId(tbinfo->dobj.name));
3722 
3723  /*
3724  * There is no point in creating drop query as drop query as the drop is
3725  * done by table drop.
3726  */
3727  ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
3728  tag,
3729  tbinfo->dobj.namespace->dobj.name,
3730  NULL,
3731  "", false,
3732  "PUBLICATION TABLE", SECTION_POST_DATA,
3733  query->data, "", NULL,
3734  NULL, 0,
3735  NULL, NULL);
3736 
3737  free(tag);
3738  destroyPQExpBuffer(query);
3739 }
3740 
3741 /*
3742  * Is the currently connected user a superuser?
3743  */
3744 static bool
3746 {
3747  ArchiveHandle *AH = (ArchiveHandle *) fout;
3748  const char *val;
3749 
3750  val = PQparameterStatus(AH->connection, "is_superuser");
3751 
3752  if (val && strcmp(val, "on") == 0)
3753  return true;
3754 
3755  return false;
3756 }
3757 
3758 /*
3759  * getSubscriptions
3760  * get information about subscriptions
3761  */
3762 void
3764 {
3765  DumpOptions *dopt = fout->dopt;
3766  PQExpBuffer query;
3767  PGresult *res;
3768  SubscriptionInfo *subinfo;
3769  int i_tableoid;
3770  int i_oid;
3771  int i_subname;
3772  int i_rolname;
3773  int i_subconninfo;
3774  int i_subslotname;
3775  int i_subsynccommit;
3776  int i_subpublications;
3777  int i,
3778  ntups;
3779 
3780  if (dopt->no_subscriptions || fout->remoteVersion < 100000)
3781  return;
3782 
3783  if (!is_superuser(fout))
3784  {
3785  int n;
3786 
3787  res = ExecuteSqlQuery(fout,
3788  "SELECT count(*) FROM pg_subscription "
3789  "WHERE subdbid = (SELECT oid FROM pg_catalog.pg_database"
3790  " WHERE datname = current_database())",
3791  PGRES_TUPLES_OK);
3792  n = atoi(PQgetvalue(res, 0, 0));
3793  if (n > 0)
3794  write_msg(NULL, "WARNING: subscriptions not dumped because current user is not a superuser\n");
3795  PQclear(res);
3796  return;
3797  }
3798 
3799  query = createPQExpBuffer();
3800 
3801  resetPQExpBuffer(query);
3802 
3803  /* Get the subscriptions in current database. */
3804  appendPQExpBuffer(query,
3805  "SELECT s.tableoid, s.oid, s.subname,"
3806  "(%s s.subowner) AS rolname, "
3807  " s.subconninfo, s.subslotname, s.subsynccommit, "
3808  " s.subpublications "
3809  "FROM pg_catalog.pg_subscription s "
3810  "WHERE s.subdbid = (SELECT oid FROM pg_catalog.pg_database"
3811  " WHERE datname = current_database())",
3813  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3814 
3815  ntups = PQntuples(res);
3816 
3817  i_tableoid = PQfnumber(res, "tableoid");
3818  i_oid = PQfnumber(res, "oid");
3819  i_subname = PQfnumber(res, "subname");
3820  i_rolname = PQfnumber(res, "rolname");
3821  i_subconninfo = PQfnumber(res, "subconninfo");
3822  i_subslotname = PQfnumber(res, "subslotname");
3823  i_subsynccommit = PQfnumber(res, "subsynccommit");
3824  i_subpublications = PQfnumber(res, "subpublications");
3825 
3826  subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
3827 
3828  for (i = 0; i < ntups; i++)
3829  {
3830  subinfo[i].dobj.objType = DO_SUBSCRIPTION;
3831  subinfo[i].dobj.catId.tableoid =
3832  atooid(PQgetvalue(res, i, i_tableoid));
3833  subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
3834  AssignDumpId(&subinfo[i].dobj);
3835  subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
3836  subinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
3837  subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo));
3838  if (PQgetisnull(res, i, i_subslotname))
3839  subinfo[i].subslotname = NULL;
3840  else
3841  subinfo[i].subslotname = pg_strdup(PQgetvalue(res, i, i_subslotname));
3842  subinfo[i].subsynccommit =
3843  pg_strdup(PQgetvalue(res, i, i_subsynccommit));
3844  subinfo[i].subpublications =
3845  pg_strdup(PQgetvalue(res, i, i_subpublications));
3846 
3847  if (strlen(subinfo[i].rolname) == 0)
3848  write_msg(NULL, "WARNING: owner of subscription \"%s\" appears to be invalid\n",
3849  subinfo[i].dobj.name);
3850 
3851  /* Decide whether we want to dump it */
3852  selectDumpableObject(&(subinfo[i].dobj), fout);
3853  }
3854  PQclear(res);
3855 
3856  destroyPQExpBuffer(query);
3857 }
3858 
3859 /*
3860  * dumpSubscription
3861  * dump the definition of the given subscription
3862  */
3863 static void
3865 {
3866  PQExpBuffer delq;
3867  PQExpBuffer query;
3868  PQExpBuffer labelq;
3869  PQExpBuffer publications;
3870  char **pubnames = NULL;
3871  int npubnames = 0;
3872  int i;
3873 
3874  if (!(subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
3875  return;
3876 
3877  delq = createPQExpBuffer();
3878  query = createPQExpBuffer();
3879  labelq = createPQExpBuffer();
3880 
3881  appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
3882  fmtId(subinfo->dobj.name));
3883 
3884  appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
3885  fmtId(subinfo->dobj.name));
3886  appendStringLiteralAH(query, subinfo->subconninfo, fout);
3887 
3888  /* Build list of quoted publications and append them to query. */
3889  if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
3890  {
3891  write_msg(NULL,
3892  "WARNING: could not parse subpublications array\n");
3893  if (pubnames)
3894  free(pubnames);
3895  pubnames = NULL;
3896  npubnames = 0;
3897  }
3898 
3899  publications = createPQExpBuffer();
3900  for (i = 0; i < npubnames; i++)
3901  {
3902  if (i > 0)
3903  appendPQExpBufferStr(publications, ", ");
3904 
3905  appendPQExpBufferStr(publications, fmtId(pubnames[i]));
3906  }
3907 
3908  appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
3909  if (subinfo->subslotname)
3910  appendStringLiteralAH(query, subinfo->subslotname, fout);
3911  else
3912  appendPQExpBufferStr(query, "NONE");
3913 
3914  if (strcmp(subinfo->subsynccommit, "off") != 0)
3915  appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
3916 
3917  appendPQExpBufferStr(query, ");\n");
3918 
3919  appendPQExpBuffer(labelq, "SUBSCRIPTION %s", fmtId(subinfo->dobj.name));
3920 
3921  ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
3922  subinfo->dobj.name,
3923  NULL,
3924  NULL,
3925  subinfo->rolname, false,
3926  "SUBSCRIPTION", SECTION_POST_DATA,
3927  query->data, delq->data, NULL,
3928  NULL, 0,
3929  NULL, NULL);
3930 
3931  if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3932  dumpComment(fout, labelq->data,
3933  NULL, subinfo->rolname,
3934  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
3935 
3936  if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3937  dumpSecLabel(fout, labelq->data,
3938  NULL, subinfo->rolname,
3939  subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
3940 
3941  destroyPQExpBuffer(publications);
3942  if (pubnames)
3943  free(pubnames);
3944 
3945  destroyPQExpBuffer(delq);
3946  destroyPQExpBuffer(query);
3947 }
3948 
3949 static void
3951  PQExpBuffer upgrade_buffer,
3952  Oid pg_type_oid)
3953 {
3954  PQExpBuffer upgrade_query = createPQExpBuffer();
3955  PGresult *upgrade_res;
3956  Oid pg_type_array_oid;
3957 
3958  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
3959  appendPQExpBuffer(upgrade_buffer,
3960  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
3961  pg_type_oid);
3962 
3963  /* we only support old >= 8.3 for binary upgrades */
3964  appendPQExpBuffer(upgrade_query,
3965  "SELECT typarray "
3966  "FROM pg_catalog.pg_type "
3967  "WHERE pg_type.oid = '%u'::pg_catalog.oid;",
3968  pg_type_oid);
3969 
3970  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
3971 
3972  pg_type_array_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "typarray")));
3973 
3974  if (OidIsValid(pg_type_array_oid))
3975  {
3976  appendPQExpBufferStr(upgrade_buffer,
3977  "\n-- For binary upgrade, must preserve pg_type array oid\n");
3978  appendPQExpBuffer(upgrade_buffer,
3979  "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
3980  pg_type_array_oid);
3981  }
3982 
3983  PQclear(upgrade_res);
3984  destroyPQExpBuffer(upgrade_query);
3985 }
3986 
3987 static bool
3989  PQExpBuffer upgrade_buffer,
3990  Oid pg_rel_oid)
3991 {
3992  PQExpBuffer upgrade_query = createPQExpBuffer();
3993  PGresult *upgrade_res;
3994  Oid pg_type_oid;
3995  bool toast_set = false;
3996 
3997  /* we only support old >= 8.3 for binary upgrades */
3998  appendPQExpBuffer(upgrade_query,
3999  "SELECT c.reltype AS crel, t.reltype AS trel "
4000  "FROM pg_catalog.pg_class c "
4001  "LEFT JOIN pg_catalog.pg_class t ON "
4002  " (c.reltoastrelid = t.oid) "
4003  "WHERE c.oid = '%u'::pg_catalog.oid;",
4004  pg_rel_oid);
4005 
4006  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4007 
4008  pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
4009 
4010  binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
4011  pg_type_oid);
4012 
4013  if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
4014  {
4015  /* Toast tables do not have pg_type array rows */
4016  Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
4017  PQfnumber(upgrade_res, "trel")));
4018 
4019  appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
4020  appendPQExpBuffer(upgrade_buffer,
4021  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_type_oid('%u'::pg_catalog.oid);\n\n",
4022  pg_type_toast_oid);
4023 
4024  toast_set = true;
4025  }
4026 
4027  PQclear(upgrade_res);
4028  destroyPQExpBuffer(upgrade_query);
4029 
4030  return toast_set;
4031 }
4032 
4033 static void
4035  PQExpBuffer upgrade_buffer, Oid pg_class_oid,
4036  bool is_index)
4037 {
4038  PQExpBuffer upgrade_query = createPQExpBuffer();
4039  PGresult *upgrade_res;
4040  Oid pg_class_reltoastrelid;
4041  Oid pg_index_indexrelid;
4042 
4043  appendPQExpBuffer(upgrade_query,
4044  "SELECT c.reltoastrelid, i.indexrelid "
4045  "FROM pg_catalog.pg_class c LEFT JOIN "
4046  "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
4047  "WHERE c.oid = '%u'::pg_catalog.oid;",
4048  pg_class_oid);
4049 
4050  upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
4051 
4052  pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastrelid")));
4053  pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
4054 
4055  appendPQExpBufferStr(upgrade_buffer,
4056  "\n-- For binary upgrade, must preserve pg_class oids\n");
4057 
4058  if (!is_index)
4059  {
4060  appendPQExpBuffer(upgrade_buffer,
4061  "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
4062  pg_class_oid);
4063  /* only tables have toast tables, not indexes */
4064  if (OidIsValid(pg_class_reltoastrelid))
4065  {
4066  /*
4067  * One complexity is that the table definition might not require
4068  * the creation of a TOAST table, and the TOAST table might have
4069  * been created long after table creation, when the table was
4070  * loaded with wide data. By setting the TOAST oid we force
4071  * creation of the TOAST heap and TOAST index by the backend so we
4072  * can cleanly copy the files during binary upgrade.
4073  */
4074 
4075  appendPQExpBuffer(upgrade_buffer,
4076  "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
4077  pg_class_reltoastrelid);
4078 
4079  /* every toast table has an index */
4080  appendPQExpBuffer(upgrade_buffer,
4081  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4082  pg_index_indexrelid);
4083  }
4084  }
4085  else
4086  appendPQExpBuffer(upgrade_buffer,
4087  "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
4088  pg_class_oid);
4089 
4090  appendPQExpBufferChar(upgrade_buffer, '\n');
4091 
4092  PQclear(upgrade_res);
4093  destroyPQExpBuffer(upgrade_query);
4094 }
4095 
4096 /*
4097  * If the DumpableObject is a member of an extension, add a suitable
4098  * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
4099  */
4100 static void
4102  DumpableObject *dobj,
4103  const char *objlabel)
4104 {
4105  DumpableObject *extobj = NULL;
4106  int i;
4107 
4108  if (!dobj->ext_member)
4109  return;
4110 
4111  /*
4112  * Find the parent extension. We could avoid this search if we wanted to
4113  * add a link field to DumpableObject, but the space costs of that would
4114  * be considerable. We assume that member objects could only have a
4115  * direct dependency on their own extension, not any others.
4116  */
4117  for (i = 0; i < dobj->nDeps; i++)
4118  {
4119  extobj = findObjectByDumpId(dobj->dependencies[i]);
4120  if (extobj && extobj->objType == DO_EXTENSION)
4121  break;
4122  extobj = NULL;
4123  }
4124  if (extobj == NULL)
4125  exit_horribly(NULL, "could not find parent extension for %s\n", objlabel);
4126 
4127  appendPQExpBufferStr(upgrade_buffer,
4128  "\n-- For binary upgrade, handle extension membership the hard way\n");
4129  appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s;\n",
4130  fmtId(extobj->name),
4131  objlabel);
4132 }
4133 
4134 /*
4135  * getNamespaces:
4136  * read all namespaces in the system catalogs and return them in the
4137  * NamespaceInfo* structure
4138  *
4139  * numNamespaces is set to the number of namespaces read in
4140  */
4141 NamespaceInfo *
4143 {
4144  DumpOptions *dopt = fout->dopt;
4145  PGresult *res;
4146  int ntups;
4147  int i;
4148  PQExpBuffer query;
4149  NamespaceInfo *nsinfo;
4150  int i_tableoid;
4151  int i_oid;
4152  int i_nspname;
4153  int i_rolname;
4154  int i_nspacl;
4155  int i_rnspacl;
4156  int i_initnspacl;
4157  int i_initrnspacl;
4158 
4159  query = createPQExpBuffer();
4160 
4161  /* Make sure we are in proper schema */
4162  selectSourceSchema(fout, "pg_catalog");
4163 
4164  /*
4165  * we fetch all namespaces including system ones, so that every object we
4166  * read in can be linked to a containing namespace.
4167  */
4168  if (fout->remoteVersion >= 90600)
4169  {
4170  PQExpBuffer acl_subquery = createPQExpBuffer();
4171  PQExpBuffer racl_subquery = createPQExpBuffer();
4172  PQExpBuffer init_acl_subquery = createPQExpBuffer();
4173  PQExpBuffer init_racl_subquery = createPQExpBuffer();
4174 
4175  buildACLQueries(acl_subquery, racl_subquery, init_acl_subquery,
4176  init_racl_subquery, "n.nspacl", "n.nspowner", "'n'",
4177  dopt->binary_upgrade);
4178 
4179  appendPQExpBuffer(query, "SELECT n.tableoid, n.oid, n.nspname, "
4180  "(%s nspowner) AS rolname, "
4181  "%s as nspacl, "
4182  "%s as rnspacl, "
4183  "%s as initnspacl, "
4184  "%s as initrnspacl "
4185  "FROM pg_namespace n "
4186  "LEFT JOIN pg_init_privs pip "
4187  "ON (n.oid = pip.objoid "
4188  "AND pip.classoid = 'pg_namespace'::regclass "
4189  "AND pip.objsubid = 0",
4191  acl_subquery->data,
4192  racl_subquery->data,
4193  init_acl_subquery->data,
4194  init_racl_subquery->data);
4195 
4196  /*
4197  * When we are doing a 'clean' run, we will be dropping and recreating
4198  * the 'public' schema (the only object which has that kind of
4199  * treatment in the backend and which has an entry in pg_init_privs)
4200  * and therefore we should not consider any initial privileges in
4201  * pg_init_privs in that case.
4202  *
4203  * See pg_backup_archiver.c:_printTocEntry() for the details on why
4204  * the public schema is special in this regard.
4205  *
4206  * Note that if the public schema is dropped and re-created, this is
4207  * essentially a no-op because the new public schema won't have an
4208  * entry in pg_init_privs anyway, as the entry will be removed when
4209  * the public schema is dropped.
4210  *
4211  * Further, we have to handle the case where the public schema does
4212  * not exist at all.
4213  */
4214  if (dopt->outputClean)
4215  appendPQExpBuffer(query, " AND pip.objoid <> "
4216  "coalesce((select oid from pg_namespace "
4217  "where nspname = 'public'),0)");
4218 
4219  appendPQExpBuffer(query, ") ");
4220 
4221  destroyPQExpBuffer(acl_subquery);
4222  destroyPQExpBuffer(racl_subquery);
4223  destroyPQExpBuffer(init_acl_subquery);
4224  destroyPQExpBuffer(init_racl_subquery);
4225  }
4226  else
4227  appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
4228  "(%s nspowner) AS rolname, "
4229  "nspacl, NULL as rnspacl, "
4230  "NULL AS initnspacl, NULL as initrnspacl "
4231  "FROM pg_namespace",
4233 
4234  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4235 
4236  ntups = PQntuples(res);
4237 
4238  nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
4239 
4240  i_tableoid = PQfnumber(res, "tableoid");
4241  i_oid = PQfnumber(res, "oid");
4242  i_nspname = PQfnumber(res, "nspname");
4243  i_rolname = PQfnumber(res, "rolname");
4244  i_nspacl = PQfnumber(res, "nspacl");
4245  i_rnspacl = PQfnumber(res, "rnspacl");
4246  i_initnspacl = PQfnumber(res, "initnspacl");
4247  i_initrnspacl = PQfnumber(res, "initrnspacl");
4248 
4249  for (i = 0; i < ntups; i++)
4250  {
4251  nsinfo[i].dobj.objType = DO_NAMESPACE;
4252  nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4253  nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4254  AssignDumpId(&nsinfo[i].dobj);
4255  nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
4256  nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4257  nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
4258  nsinfo[i].rnspacl = pg_strdup(PQgetvalue(res, i, i_rnspacl));
4259  nsinfo[i].initnspacl = pg_strdup(PQgetvalue(res, i, i_initnspacl));
4260  nsinfo[i].initrnspacl = pg_strdup(PQgetvalue(res, i, i_initrnspacl));
4261 
4262  /* Decide whether to dump this namespace */
4263  selectDumpableNamespace(&nsinfo[i], fout);
4264 
4265  /*
4266  * Do not try to dump ACL if the ACL is empty or the default.
4267  *
4268  * This is useful because, for some schemas/objects, the only
4269  * component we are going to try and dump is the ACL and if we can
4270  * remove that then 'dump' goes to zero/false and we don't consider
4271  * this object for dumping at all later on.
4272  */
4273  if (PQgetisnull(res, i, i_nspacl) && PQgetisnull(res, i, i_rnspacl) &&
4274  PQgetisnull(res, i, i_initnspacl) &&
4275  PQgetisnull(res, i, i_initrnspacl))
4276  nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4277 
4278  if (strlen(nsinfo[i].rolname) == 0)
4279  write_msg(NULL, "WARNING: owner of schema \"%s\" appears to be invalid\n",
4280  nsinfo[i].dobj.name);
4281  }
4282 
4283  PQclear(res);
4284  destroyPQExpBuffer(query);
4285 
4286  *numNamespaces = ntups;
4287 
4288  return nsinfo;
4289 }
4290 
4291 /*
4292  * findNamespace:
4293  * given a namespace OID, look up the info read by getNamespaces
4294  */
4295 static NamespaceInfo *
4297 {
4298  NamespaceInfo *nsinfo;
4299 
4300  nsinfo = findNamespaceByOid(nsoid);
4301  if (nsinfo == NULL)
4302  exit_horribly(NULL, "schema with OID %u does not exist\n", nsoid);
4303  return nsinfo;
4304 }
4305 
4306 /*
4307  * getExtensions:
4308  * read all extensions in the system catalogs and return them in the
4309  * ExtensionInfo* structure
4310  *
4311  * numExtensions is set to the number of extensions read in
4312  */
4313 ExtensionInfo *
4315 {
4316  DumpOptions *dopt = fout->dopt;
4317  PGresult *res;
4318  int ntups;
4319  int i;
4320  PQExpBuffer query;
4321  ExtensionInfo *extinfo;
4322  int i_tableoid;
4323  int i_oid;
4324  int i_extname;
4325  int i_nspname;
4326  int i_extrelocatable;
4327  int i_extversion;
4328  int i_extconfig;
4329  int i_extcondition;
4330 
4331  /*
4332  * Before 9.1, there are no extensions.
4333  */
4334  if (fout->remoteVersion < 90100)
4335  {
4336  *numExtensions = 0;
4337  return NULL;
4338  }
4339 
4340  query = createPQExpBuffer();
4341 
4342  /* Make sure we are in proper schema */
4343  selectSourceSchema(fout, "pg_catalog");
4344 
4345  appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
4346  "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
4347  "FROM pg_extension x "
4348  "JOIN pg_namespace n ON n.oid = x.extnamespace");
4349 
4350  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4351 
4352  ntups = PQntuples(res);
4353 
4354  extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
4355 
4356  i_tableoid = PQfnumber(res, "tableoid");
4357  i_oid = PQfnumber(res, "oid");
4358  i_extname = PQfnumber(res, "extname");
4359  i_nspname = PQfnumber(res, "nspname");
4360  i_extrelocatable = PQfnumber(res, "extrelocatable");
4361  i_extversion = PQfnumber(res, "extversion");
4362  i_extconfig = PQfnumber(res, "extconfig");
4363  i_extcondition = PQfnumber(res, "extcondition");
4364 
4365  for (i = 0; i < ntups; i++)
4366  {
4367  extinfo[i].dobj.objType = DO_EXTENSION;
4368  extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4369  extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4370  AssignDumpId(&extinfo[i].dobj);
4371  extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
4372  extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
4373  extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
4374  extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
4375  extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
4376  extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
4377 
4378  /* Decide whether we want to dump it */
4379  selectDumpableExtension(&(extinfo[i]), dopt);
4380  }
4381 
4382  PQclear(res);
4383  destroyPQExpBuffer(query);
4384 
4385  *numExtensions = ntups;
4386 
4387  return extinfo;
4388 }
4389 
4390 /*
4391  * getTypes:
4392  * read all types in the system catalogs and return them in the
4393  * TypeInfo* structure
4394  *
4395  * numTypes is set to the number of types read in
4396  *
4397  * NB: this must run after getFuncs() because we assume we can do
4398  * findFuncByOid().
4399  */
4400 TypeInfo *
4402 {
4403  DumpOptions *dopt = fout->dopt;
4404  PGresult *res;
4405  int ntups;
4406  int i;
4407  PQExpBuffer query = createPQExpBuffer();
4408  TypeInfo *tyinfo;
4409  ShellTypeInfo *stinfo;
4410  int i_tableoid;
4411  int i_oid;
4412  int i_typname;
4413  int i_typnamespace;
4414  int i_typacl;
4415  int i_rtypacl;
4416  int i_inittypacl;
4417  int i_initrtypacl;
4418  int i_rolname;
4419  int i_typelem;
4420  int i_typrelid;
4421  int i_typrelkind;
4422  int i_typtype;
4423  int i_typisdefined;
4424  int i_isarray;
4425 
4426  /*
4427  * we include even the built-in types because those may be used as array
4428  * elements by user-defined types
4429  *
4430  * we filter out the built-in types when we dump out the types
4431  *
4432  * same approach for undefined (shell) types and array types
4433  *
4434  * Note: as of 8.3 we can reliably detect whether a type is an
4435  * auto-generated array type by checking the element type's typarray.
4436  * (Before that the test is capable of generating false positives.) We
4437  * still check for name beginning with '_', though, so as to avoid the
4438  * cost of the subselect probe for all standard types. This would have to
4439  * be revisited if the backend ever allows renaming of array types.
4440  */
4441 
4442  /* Make sure we are in proper schema */
4443  selectSourceSchema(fout, "pg_catalog");
4444 
4445  if (fout->remoteVersion >= 90600)
4446  {
4447  PQExpBuffer acl_subquery = createPQExpBuffer();
4448  PQExpBuffer racl_subquery = createPQExpBuffer();
4449  PQExpBuffer initacl_subquery = createPQExpBuffer();
4450  PQExpBuffer initracl_subquery = createPQExpBuffer();
4451 
4452  buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
4453  initracl_subquery, "t.typacl", "t.typowner", "'T'",
4454  dopt->binary_upgrade);
4455 
4456  appendPQExpBuffer(query, "SELECT t.tableoid, t.oid, t.typname, "
4457  "t.typnamespace, "
4458  "%s AS typacl, "
4459  "%s AS rtypacl, "
4460  "%s AS inittypacl, "
4461  "%s AS initrtypacl, "
4462  "(%s t.typowner) AS rolname, "
4463  "t.typelem, t.typrelid, "
4464  "CASE WHEN t.typrelid = 0 THEN ' '::\"char\" "
4465  "ELSE (SELECT relkind FROM pg_class WHERE oid = t.typrelid) END AS typrelkind, "
4466  "t.typtype, t.typisdefined, "
4467  "t.typname[0] = '_' AND t.typelem != 0 AND "
4468  "(SELECT typarray FROM pg_type te WHERE oid = t.typelem) = t.oid AS isarray "
4469  "FROM pg_type t "
4470  "LEFT JOIN pg_init_privs pip ON "
4471  "(t.oid = pip.objoid "
4472  "AND pip.classoid = 'pg_type'::regclass "
4473  "AND pip.objsubid = 0) ",
4474  acl_subquery->data,
4475  racl_subquery->data,
4476  initacl_subquery->data,
4477  initracl_subquery->data,
4479 
4480  destroyPQExpBuffer(acl_subquery);
4481  destroyPQExpBuffer(racl_subquery);
4482  destroyPQExpBuffer(initacl_subquery);
4483  destroyPQExpBuffer(initracl_subquery);
4484  }
4485  else if (fout->remoteVersion >= 90200)
4486  {
4487  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4488  "typnamespace, typacl, NULL as rtypacl, "
4489  "NULL AS inittypacl, NULL AS initrtypacl, "
4490  "(%s typowner) AS rolname, "
4491  "typelem, typrelid, "
4492  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4493  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4494  "typtype, typisdefined, "
4495  "typname[0] = '_' AND typelem != 0 AND "
4496  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4497  "FROM pg_type",
4499  }
4500  else if (fout->remoteVersion >= 80300)
4501  {
4502  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4503  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4504  "NULL AS inittypacl, NULL AS initrtypacl, "
4505  "(%s typowner) AS rolname, "
4506  "typelem, typrelid, "
4507  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4508  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4509  "typtype, typisdefined, "
4510  "typname[0] = '_' AND typelem != 0 AND "
4511  "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
4512  "FROM pg_type",
4514  }
4515  else
4516  {
4517  appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
4518  "typnamespace, NULL AS typacl, NULL as rtypacl, "
4519  "NULL AS inittypacl, NULL AS initrtypacl, "
4520  "(%s typowner) AS rolname, "
4521  "typelem, typrelid, "
4522  "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
4523  "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
4524  "typtype, typisdefined, "
4525  "typname[0] = '_' AND typelem != 0 AS isarray "
4526  "FROM pg_type",
4528  }
4529 
4530  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4531 
4532  ntups = PQntuples(res);
4533 
4534  tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
4535 
4536  i_tableoid = PQfnumber(res, "tableoid");
4537  i_oid = PQfnumber(res, "oid");
4538  i_typname = PQfnumber(res, "typname");
4539  i_typnamespace = PQfnumber(res, "typnamespace");
4540  i_typacl = PQfnumber(res, "typacl");
4541  i_rtypacl = PQfnumber(res, "rtypacl");
4542  i_inittypacl = PQfnumber(res, "inittypacl");
4543  i_initrtypacl = PQfnumber(res, "initrtypacl");
4544  i_rolname = PQfnumber(res, "rolname");
4545  i_typelem = PQfnumber(res, "typelem");
4546  i_typrelid = PQfnumber(res, "typrelid");
4547  i_typrelkind = PQfnumber(res, "typrelkind");
4548  i_typtype = PQfnumber(res, "typtype");
4549  i_typisdefined = PQfnumber(res, "typisdefined");
4550  i_isarray = PQfnumber(res, "isarray");
4551 
4552  for (i = 0; i < ntups; i++)
4553  {
4554  tyinfo[i].dobj.objType = DO_TYPE;
4555  tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4556  tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4557  AssignDumpId(&tyinfo[i].dobj);
4558  tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
4559  tyinfo[i].dobj.namespace =
4560  findNamespace(fout,
4561  atooid(PQgetvalue(res, i, i_typnamespace)));
4562  tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4563  tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
4564  tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl));
4565  tyinfo[i].inittypacl = pg_strdup(PQgetvalue(res, i, i_inittypacl));
4566  tyinfo[i].initrtypacl = pg_strdup(PQgetvalue(res, i, i_initrtypacl));
4567  tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
4568  tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
4569  tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
4570  tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
4571  tyinfo[i].shellType = NULL;
4572 
4573  if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
4574  tyinfo[i].isDefined = true;
4575  else
4576  tyinfo[i].isDefined = false;
4577 
4578  if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
4579  tyinfo[i].isArray = true;
4580  else
4581  tyinfo[i].isArray = false;
4582 
4583  /* Decide whether we want to dump it */
4584  selectDumpableType(&tyinfo[i], fout);
4585 
4586  /* Do not try to dump ACL if no ACL exists. */
4587  if (PQgetisnull(res, i, i_typacl) && PQgetisnull(res, i, i_rtypacl) &&
4588  PQgetisnull(res, i, i_inittypacl) &&
4589  PQgetisnull(res, i, i_initrtypacl))
4590  tyinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4591 
4592  /*
4593  * If it's a domain, fetch info about its constraints, if any
4594  */
4595  tyinfo[i].nDomChecks = 0;
4596  tyinfo[i].domChecks = NULL;
4597  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4598  tyinfo[i].typtype == TYPTYPE_DOMAIN)
4599  getDomainConstraints(fout, &(tyinfo[i]));
4600 
4601  /*
4602  * If it's a base type, make a DumpableObject representing a shell
4603  * definition of the type. We will need to dump that ahead of the I/O
4604  * functions for the type. Similarly, range types need a shell
4605  * definition in case they have a canonicalize function.
4606  *
4607  * Note: the shell type doesn't have a catId. You might think it
4608  * should copy the base type's catId, but then it might capture the
4609  * pg_depend entries for the type, which we don't want.
4610  */
4611  if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
4612  (tyinfo[i].typtype == TYPTYPE_BASE ||
4613  tyinfo[i].typtype == TYPTYPE_RANGE))
4614  {
4615  stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
4616  stinfo->dobj.objType = DO_SHELL_TYPE;
4617  stinfo->dobj.catId = nilCatalogId;
4618  AssignDumpId(&stinfo->dobj);
4619  stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
4620  stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
4621  stinfo->baseType = &(tyinfo[i]);
4622  tyinfo[i].shellType = stinfo;
4623 
4624  /*
4625  * Initially mark the shell type as not to be dumped. We'll only
4626  * dump it if the I/O or canonicalize functions need to be dumped;
4627  * this is taken care of while sorting dependencies.
4628  */
4629  stinfo->dobj.dump = DUMP_COMPONENT_NONE;
4630  }
4631 
4632  if (strlen(tyinfo[i].rolname) == 0)
4633  write_msg(NULL, "WARNING: owner of data type \"%s\" appears to be invalid\n",
4634  tyinfo[i].dobj.name);
4635  }
4636 
4637  *numTypes = ntups;
4638 
4639  PQclear(res);
4640 
4641  destroyPQExpBuffer(query);
4642 
4643  return tyinfo;
4644 }
4645 
4646 /*
4647  * getOperators:
4648  * read all operators in the system catalogs and return them in the
4649  * OprInfo* structure
4650  *
4651  * numOprs is set to the number of operators read in
4652  */
4653 OprInfo *
4654 getOperators(Archive *fout, int *numOprs)
4655 {
4656  PGresult *res;
4657  int ntups;
4658  int i;
4659  PQExpBuffer query = createPQExpBuffer();
4660  OprInfo *oprinfo;
4661  int i_tableoid;
4662  int i_oid;
4663  int i_oprname;
4664  int i_oprnamespace;
4665  int i_rolname;
4666  int i_oprkind;
4667  int i_oprcode;
4668 
4669  /*
4670  * find all operators, including builtin operators; we filter out
4671  * system-defined operators at dump-out time.
4672  */
4673 
4674  /* Make sure we are in proper schema */
4675  selectSourceSchema(fout, "pg_catalog");
4676 
4677  appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
4678  "oprnamespace, "
4679  "(%s oprowner) AS rolname, "
4680  "oprkind, "
4681  "oprcode::oid AS oprcode "
4682  "FROM pg_operator",
4684 
4685  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4686 
4687  ntups = PQntuples(res);
4688  *numOprs = ntups;
4689 
4690  oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
4691 
4692  i_tableoid = PQfnumber(res, "tableoid");
4693  i_oid = PQfnumber(res, "oid");
4694  i_oprname = PQfnumber(res, "oprname");
4695  i_oprnamespace = PQfnumber(res, "oprnamespace");
4696  i_rolname = PQfnumber(res, "rolname");
4697  i_oprkind = PQfnumber(res, "oprkind");
4698  i_oprcode = PQfnumber(res, "oprcode");
4699 
4700  for (i = 0; i < ntups; i++)
4701  {
4702  oprinfo[i].dobj.objType = DO_OPERATOR;
4703  oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4704  oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4705  AssignDumpId(&oprinfo[i].dobj);
4706  oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
4707  oprinfo[i].dobj.namespace =
4708  findNamespace(fout,
4709  atooid(PQgetvalue(res, i, i_oprnamespace)));
4710  oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4711  oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
4712  oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
4713 
4714  /* Decide whether we want to dump it */
4715  selectDumpableObject(&(oprinfo[i].dobj), fout);
4716 
4717  /* Operators do not currently have ACLs. */
4718  oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4719 
4720  if (strlen(oprinfo[i].rolname) == 0)
4721  write_msg(NULL, "WARNING: owner of operator \"%s\" appears to be invalid\n",
4722  oprinfo[i].dobj.name);
4723  }
4724 
4725  PQclear(res);
4726 
4727  destroyPQExpBuffer(query);
4728 
4729  return oprinfo;
4730 }
4731 
4732 /*
4733  * getCollations:
4734  * read all collations in the system catalogs and return them in the
4735  * CollInfo* structure
4736  *
4737  * numCollations is set to the number of collations read in
4738  */
4739 CollInfo *
4741 {
4742  PGresult *res;
4743  int ntups;
4744  int i;
4745  PQExpBuffer query;
4746  CollInfo *collinfo;
4747  int i_tableoid;
4748  int i_oid;
4749  int i_collname;
4750  int i_collnamespace;
4751  int i_rolname;
4752 
4753  /* Collations didn't exist pre-9.1 */
4754  if (fout->remoteVersion < 90100)
4755  {
4756  *numCollations = 0;
4757  return NULL;
4758  }
4759 
4760  query = createPQExpBuffer();
4761 
4762  /*
4763  * find all collations, including builtin collations; we filter out
4764  * system-defined collations at dump-out time.
4765  */
4766 
4767  /* Make sure we are in proper schema */
4768  selectSourceSchema(fout, "pg_catalog");
4769 
4770  appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
4771  "collnamespace, "
4772  "(%s collowner) AS rolname "
4773  "FROM pg_collation",
4775 
4776  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4777 
4778  ntups = PQntuples(res);
4779  *numCollations = ntups;
4780 
4781  collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
4782 
4783  i_tableoid = PQfnumber(res, "tableoid");
4784  i_oid = PQfnumber(res, "oid");
4785  i_collname = PQfnumber(res, "collname");
4786  i_collnamespace = PQfnumber(res, "collnamespace");
4787  i_rolname = PQfnumber(res, "rolname");
4788 
4789  for (i = 0; i < ntups; i++)
4790  {
4791  collinfo[i].dobj.objType = DO_COLLATION;
4792  collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4793  collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4794  AssignDumpId(&collinfo[i].dobj);
4795  collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
4796  collinfo[i].dobj.namespace =
4797  findNamespace(fout,
4798  atooid(PQgetvalue(res, i, i_collnamespace)));
4799  collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4800 
4801  /* Decide whether we want to dump it */
4802  selectDumpableObject(&(collinfo[i].dobj), fout);
4803 
4804  /* Collations do not currently have ACLs. */
4805  collinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4806  }
4807 
4808  PQclear(res);
4809 
4810  destroyPQExpBuffer(query);
4811 
4812  return collinfo;
4813 }
4814 
4815 /*
4816  * getConversions:
4817  * read all conversions in the system catalogs and return them in the
4818  * ConvInfo* structure
4819  *
4820  * numConversions is set to the number of conversions read in
4821  */
4822 ConvInfo *
4823 getConversions(Archive *fout, int *numConversions)
4824 {
4825  PGresult *res;
4826  int ntups;
4827  int i;
4828  PQExpBuffer query;
4829  ConvInfo *convinfo;
4830  int i_tableoid;
4831  int i_oid;
4832  int i_conname;
4833  int i_connamespace;
4834  int i_rolname;
4835 
4836  query = createPQExpBuffer();
4837 
4838  /*
4839  * find all conversions, including builtin conversions; we filter out
4840  * system-defined conversions at dump-out time.
4841  */
4842 
4843  /* Make sure we are in proper schema */
4844  selectSourceSchema(fout, "pg_catalog");
4845 
4846  appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
4847  "connamespace, "
4848  "(%s conowner) AS rolname "
4849  "FROM pg_conversion",
4851 
4852  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4853 
4854  ntups = PQntuples(res);
4855  *numConversions = ntups;
4856 
4857  convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
4858 
4859  i_tableoid = PQfnumber(res, "tableoid");
4860  i_oid = PQfnumber(res, "oid");
4861  i_conname = PQfnumber(res, "conname");
4862  i_connamespace = PQfnumber(res, "connamespace");
4863  i_rolname = PQfnumber(res, "rolname");
4864 
4865  for (i = 0; i < ntups; i++)
4866  {
4867  convinfo[i].dobj.objType = DO_CONVERSION;
4868  convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4869  convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4870  AssignDumpId(&convinfo[i].dobj);
4871  convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
4872  convinfo[i].dobj.namespace =
4873  findNamespace(fout,
4874  atooid(PQgetvalue(res, i, i_connamespace)));
4875  convinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
4876 
4877  /* Decide whether we want to dump it */
4878  selectDumpableObject(&(convinfo[i].dobj), fout);
4879 
4880  /* Conversions do not currently have ACLs. */
4881  convinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4882  }
4883 
4884  PQclear(res);
4885 
4886  destroyPQExpBuffer(query);
4887 
4888  return convinfo;
4889 }
4890 
4891 /*
4892  * getAccessMethods:
4893  * read all user-defined access methods in the system catalogs and return
4894  * them in the AccessMethodInfo* structure
4895  *
4896  * numAccessMethods is set to the number of access methods read in
4897  */
4899 getAccessMethods(Archive *fout, int *numAccessMethods)
4900 {
4901  PGresult *res;
4902  int ntups;
4903  int i;
4904  PQExpBuffer query;
4905  AccessMethodInfo *aminfo;
4906  int i_tableoid;
4907  int i_oid;
4908  int i_amname;
4909  int i_amhandler;
4910  int i_amtype;
4911 
4912  /* Before 9.6, there are no user-defined access methods */
4913  if (fout->remoteVersion < 90600)
4914  {
4915  *numAccessMethods = 0;
4916  return NULL;
4917  }
4918 
4919  query = createPQExpBuffer();
4920 
4921  /* Make sure we are in proper schema */
4922  selectSourceSchema(fout, "pg_catalog");
4923 
4924  /* Select all access methods from pg_am table */
4925  appendPQExpBuffer(query, "SELECT tableoid, oid, amname, amtype, "
4926  "amhandler::pg_catalog.regproc AS amhandler "
4927  "FROM pg_am");
4928 
4929  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4930 
4931  ntups = PQntuples(res);
4932  *numAccessMethods = ntups;
4933 
4934  aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
4935 
4936  i_tableoid = PQfnumber(res, "tableoid");
4937  i_oid = PQfnumber(res, "oid");
4938  i_amname = PQfnumber(res, "amname");
4939  i_amhandler = PQfnumber(res, "amhandler");
4940  i_amtype = PQfnumber(res, "amtype");
4941 
4942  for (i = 0; i < ntups; i++)
4943  {
4944  aminfo[i].dobj.objType = DO_ACCESS_METHOD;
4945  aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
4946  aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4947  AssignDumpId(&aminfo[i].dobj);
4948  aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
4949  aminfo[i].dobj.namespace = NULL;
4950  aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
4951  aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
4952 
4953  /* Decide whether we want to dump it */
4954  selectDumpableAccessMethod(&(aminfo[i]), fout);
4955 
4956  /* Access methods do not currently have ACLs. */
4957  aminfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
4958  }
4959 
4960  PQclear(res);
4961 
4962  destroyPQExpBuffer(query);
4963 
4964  return aminfo;
4965 }
4966 
4967 
4968 /*
4969  * getOpclasses:
4970  * read all opclasses in the system catalogs and return them in the
4971  * OpclassInfo* structure
4972  *
4973  * numOpclasses is set to the number of opclasses read in
4974  */
4975 OpclassInfo *
4976 getOpclasses(Archive *fout, int *numOpclasses)
4977 {
4978  PGresult *res;
4979  int ntups;
4980  int i;
4981  PQExpBuffer query = createPQExpBuffer();
4982  OpclassInfo *opcinfo;
4983  int i_tableoid;
4984  int i_oid;
4985  int i_opcname;
4986  int i_opcnamespace;
4987  int i_rolname;
4988 
4989  /*
4990  * find all opclasses, including builtin opclasses; we filter out
4991  * system-defined opclasses at dump-out time.
4992  */
4993 
4994  /* Make sure we are in proper schema */
4995  selectSourceSchema(fout, "pg_catalog");
4996 
4997  appendPQExpBuffer(query, "SELECT tableoid, oid, opcname, "
4998  "opcnamespace, "
4999  "(%s opcowner) AS rolname "
5000  "FROM pg_opclass",
5002 
5003  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5004 
5005  ntups = PQntuples(res);
5006  *numOpclasses = ntups;
5007 
5008  opcinfo = (OpclassInfo *) pg_malloc(ntups * sizeof(OpclassInfo));
5009 
5010  i_tableoid = PQfnumber(res, "tableoid");
5011  i_oid = PQfnumber(res, "oid");
5012  i_opcname = PQfnumber(res, "opcname");
5013  i_opcnamespace = PQfnumber(res, "opcnamespace");
5014  i_rolname = PQfnumber(res, "rolname");
5015 
5016  for (i = 0; i < ntups; i++)
5017  {
5018  opcinfo[i].dobj.objType = DO_OPCLASS;
5019  opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5020  opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5021  AssignDumpId(&opcinfo[i].dobj);
5022  opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
5023  opcinfo[i].dobj.namespace =
5024  findNamespace(fout,
5025  atooid(PQgetvalue(res, i, i_opcnamespace)));
5026  opcinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5027 
5028  /* Decide whether we want to dump it */
5029  selectDumpableObject(&(opcinfo[i].dobj), fout);
5030 
5031  /* Op Classes do not currently have ACLs. */
5032  opcinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5033 
5034  if (strlen(opcinfo[i].rolname) == 0)
5035  write_msg(NULL, "WARNING: owner of operator class \"%s\" appears to be invalid\n",
5036  opcinfo[i].dobj.name);
5037  }
5038 
5039  PQclear(res);
5040 
5041  destroyPQExpBuffer(query);
5042 
5043  return opcinfo;
5044 }
5045 
5046 /*
5047  * getOpfamilies:
5048  * read all opfamilies in the system catalogs and return them in the
5049  * OpfamilyInfo* structure
5050  *
5051  * numOpfamilies is set to the number of opfamilies read in
5052  */
5053 OpfamilyInfo *
5054 getOpfamilies(Archive *fout, int *numOpfamilies)
5055 {
5056  PGresult *res;
5057  int ntups;
5058  int i;
5059  PQExpBuffer query;
5060  OpfamilyInfo *opfinfo;
5061  int i_tableoid;
5062  int i_oid;
5063  int i_opfname;
5064  int i_opfnamespace;
5065  int i_rolname;
5066 
5067  /* Before 8.3, there is no separate concept of opfamilies */
5068  if (fout->remoteVersion < 80300)
5069  {
5070  *numOpfamilies = 0;
5071  return NULL;
5072  }
5073 
5074  query = createPQExpBuffer();
5075 
5076  /*
5077  * find all opfamilies, including builtin opfamilies; we filter out
5078  * system-defined opfamilies at dump-out time.
5079  */
5080 
5081  /* Make sure we are in proper schema */
5082  selectSourceSchema(fout, "pg_catalog");
5083 
5084  appendPQExpBuffer(query, "SELECT tableoid, oid, opfname, "
5085  "opfnamespace, "
5086  "(%s opfowner) AS rolname "
5087  "FROM pg_opfamily",
5089 
5090  res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5091 
5092  ntups = PQntuples(res);
5093  *numOpfamilies = ntups;
5094 
5095  opfinfo = (OpfamilyInfo *) pg_malloc(ntups * sizeof(OpfamilyInfo));
5096 
5097  i_tableoid = PQfnumber(res, "tableoid");
5098  i_oid = PQfnumber(res, "oid");
5099  i_opfname = PQfnumber(res, "opfname");
5100  i_opfnamespace = PQfnumber(res, "opfnamespace");
5101  i_rolname = PQfnumber(res, "rolname");
5102 
5103  for (i = 0; i < ntups; i++)
5104  {
5105  opfinfo[i].dobj.objType = DO_OPFAMILY;
5106  opfinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5107  opfinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5108  AssignDumpId(&opfinfo[i].dobj);
5109  opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
5110  opfinfo[i].dobj.namespace =
5111  findNamespace(fout,
5112  atooid(PQgetvalue(res, i, i_opfnamespace)));
5113  opfinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
5114 
5115  /* Decide whether we want to dump it */
5116  selectDumpableObject(&(opfinfo[i].dobj), fout);
5117 
5118  /* Extensions do not currently have ACLs. */
5119  opfinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
5120 
5121  if (strlen(opfinfo[i].rolname) == 0)
5122  write_msg(NULL, "WARNING: owner of operator family \"%s\" appears to be invalid\n",
5123  opfinfo[i].dobj.name);
5124  }
5125 
5126  PQclear(res);
5127 
5128  destroyPQExpBuffer(query);
5129 
5130  return opfinfo;
5131 }
5132 
5133 /*
5134  * getAggregates:
5135  * read all the user-defined aggregates in the system catalogs and
5136  * return them in the AggInfo* structure
5137  *
5138  * numAggs is set to the number of aggregates read in
5139  */
5140 AggInfo *
5141 getAggregates(Archive *fout, int *numAggs)
5142 {
5143  DumpOptions *dopt = fout->dopt;
5144  PGresult *res;
5145  int ntups;
5146