PostgreSQL Source Code git master
Loading...
Searching...
No Matches
pg_dump.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * pg_dump.c
4 * pg_dump is a utility for dumping out a postgres database
5 * into a script file.
6 *
7 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
9 *
10 * pg_dump will read the system catalogs in a database and dump out a
11 * script that reproduces the schema in terms of SQL that is understood
12 * by PostgreSQL
13 *
14 * Note that pg_dump runs in a transaction-snapshot mode transaction,
15 * so it sees a consistent snapshot of the database including system
16 * catalogs. However, it relies in part on various specialized backend
17 * functions like pg_get_indexdef(), and those things tend to look at
18 * the currently committed state. So it is possible to get 'cache
19 * lookup failed' error if someone performs DDL changes while a dump is
20 * happening. The window for this sort of thing is from the acquisition
21 * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22 * AccessShareLock on every table it intends to dump). It isn't very large,
23 * but it can happen.
24 *
25 * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26 *
27 * IDENTIFICATION
28 * src/bin/pg_dump/pg_dump.c
29 *
30 *-------------------------------------------------------------------------
31 */
32#include "postgres_fe.h"
33
34#include <unistd.h>
35#include <ctype.h>
36#include <limits.h>
37#ifdef HAVE_TERMIOS_H
38#include <termios.h>
39#endif
40
41#include "access/attnum.h"
42#include "access/sysattr.h"
43#include "access/transam.h"
44#include "catalog/pg_aggregate_d.h"
45#include "catalog/pg_am_d.h"
46#include "catalog/pg_attribute_d.h"
47#include "catalog/pg_authid_d.h"
48#include "catalog/pg_cast_d.h"
49#include "catalog/pg_class_d.h"
50#include "catalog/pg_constraint_d.h"
51#include "catalog/pg_default_acl_d.h"
52#include "catalog/pg_largeobject_d.h"
53#include "catalog/pg_largeobject_metadata_d.h"
54#include "catalog/pg_proc_d.h"
55#include "catalog/pg_publication_d.h"
56#include "catalog/pg_shdepend_d.h"
57#include "catalog/pg_subscription_d.h"
58#include "catalog/pg_type_d.h"
59#include "common/connect.h"
60#include "common/int.h"
61#include "common/relpath.h"
62#include "common/shortest_dec.h"
63#include "compress_io.h"
64#include "dumputils.h"
67#include "filter.h"
68#include "getopt_long.h"
69#include "libpq/libpq-fs.h"
70#include "parallel.h"
71#include "pg_backup_db.h"
72#include "pg_backup_utils.h"
73#include "pg_dump.h"
75#include "storage/block.h"
76
77typedef struct
78{
79 Oid roleoid; /* role's OID */
80 const char *rolename; /* role's name */
82
83typedef struct
84{
85 const char *descr; /* comment for an object */
86 Oid classoid; /* object class (catalog OID) */
87 Oid objoid; /* object OID */
88 int objsubid; /* subobject (table column #) */
90
91typedef struct
92{
93 const char *provider; /* label provider of this security label */
94 const char *label; /* security label for an object */
95 Oid classoid; /* object class (catalog OID) */
96 Oid objoid; /* object OID */
97 int objsubid; /* subobject (table column #) */
99
100typedef struct
101{
102 Oid oid; /* object OID */
103 char relkind; /* object kind */
104 RelFileNumber relfilenumber; /* object filenode */
105 Oid toast_oid; /* toast table OID */
106 RelFileNumber toast_relfilenumber; /* toast table filenode */
107 Oid toast_index_oid; /* toast table index OID */
108 RelFileNumber toast_index_relfilenumber; /* toast table index filenode */
110
111/* sequence types */
118
119static const char *const SeqTypeNames[] =
120{
121 [SEQTYPE_SMALLINT] = "smallint",
122 [SEQTYPE_INTEGER] = "integer",
123 [SEQTYPE_BIGINT] = "bigint",
124};
125
127 "array length mismatch");
128
129typedef struct
130{
131 Oid oid; /* sequence OID */
132 SeqType seqtype; /* data type of sequence */
133 bool cycled; /* whether sequence cycles */
134 int64 minv; /* minimum value */
135 int64 maxv; /* maximum value */
136 int64 startv; /* start value */
137 int64 incby; /* increment value */
138 int64 cache; /* cache size */
139 int64 last_value; /* last value of sequence */
140 bool is_called; /* whether nextval advances before returning */
141 bool null_seqtuple; /* did pg_get_sequence_data return nulls? */
143
150
151/* global decls */
152static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
153
154static Oid g_last_builtin_oid; /* value of the last builtin oid */
155
156/* The specified names/patterns should to match at least one entity */
157static int strict_names = 0;
158
160
161/*
162 * Object inclusion/exclusion lists
163 *
164 * The string lists record the patterns given by command-line switches,
165 * which we then convert to lists of OIDs of matching objects.
166 */
171
181
184
187
190
191static const CatalogId nilCatalogId = {0, 0};
192
193/* override for standard extra_float_digits setting */
194static bool have_extra_float_digits = false;
196
197/* sorted table of role names */
199static int nrolenames = 0;
200
201/* sorted table of comments */
203static int ncomments = 0;
204
205/* sorted table of security labels */
207static int nseclabels = 0;
208
209/* sorted table of pg_class information for binary upgrade */
212
213/* sorted table of sequences */
215static int nsequences = 0;
216
217/* Maximum number of relations to fetch in a fetchAttributeStats() call. */
218#define MAX_ATTR_STATS_RELS 64
219
220/*
221 * The default number of rows per INSERT when
222 * --inserts is specified without --rows-per-insert
223 */
224#define DUMP_DEFAULT_ROWS_PER_INSERT 1
225
226/*
227 * Maximum number of large objects to group into a single ArchiveEntry.
228 * At some point we might want to make this user-controllable, but for now
229 * a hard-wired setting will suffice.
230 */
231#define MAX_BLOBS_PER_ARCHIVE_ENTRY 1000
232
233/*
234 * Macro for producing quoted, schema-qualified name of a dumpable object.
235 */
236#define fmtQualifiedDumpable(obj) \
237 fmtQualifiedId((obj)->dobj.namespace->dobj.name, \
238 (obj)->dobj.name)
239
240static void help(const char *progname);
241static void setup_connection(Archive *AH,
242 const char *dumpencoding, const char *dumpsnapshot,
243 char *use_role);
247 SimpleOidList *oids,
248 bool strict_names);
251 SimpleOidList *oids,
252 bool strict_names);
255 SimpleOidList *oids);
258 SimpleOidList *oids,
259 bool strict_names,
260 bool with_child_tables);
261static void prohibit_crossdb_refs(PGconn *conn, const char *dbname,
262 const char *pattern);
263
265static void dumpTableData(Archive *fout, const TableDataInfo *tdinfo);
267static const char *getRoleName(const char *roleoid_str);
268static void collectRoleNames(Archive *fout);
269static void getAdditionalACLs(Archive *fout);
270static void dumpCommentExtended(Archive *fout, const char *type,
271 const char *name, const char *namespace,
272 const char *owner, CatalogId catalogId,
273 int subid, DumpId dumpId,
274 const char *initdb_comment);
275static inline void dumpComment(Archive *fout, const char *type,
276 const char *name, const char *namespace,
277 const char *owner, CatalogId catalogId,
278 int subid, DumpId dumpId);
279static int findComments(Oid classoid, Oid objoid, CommentItem **items);
280static void collectComments(Archive *fout);
281static void dumpSecLabel(Archive *fout, const char *type, const char *name,
282 const char *namespace, const char *owner,
283 CatalogId catalogId, int subid, DumpId dumpId);
284static int findSecLabels(Oid classoid, Oid objoid, SecLabelItem **items);
285static void collectSecLabels(Archive *fout);
286static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
287static void dumpNamespace(Archive *fout, const NamespaceInfo *nspinfo);
288static void dumpExtension(Archive *fout, const ExtensionInfo *extinfo);
289static void dumpType(Archive *fout, const TypeInfo *tyinfo);
290static void dumpBaseType(Archive *fout, const TypeInfo *tyinfo);
291static void dumpEnumType(Archive *fout, const TypeInfo *tyinfo);
292static void dumpRangeType(Archive *fout, const TypeInfo *tyinfo);
293static void dumpUndefinedType(Archive *fout, const TypeInfo *tyinfo);
294static void dumpDomain(Archive *fout, const TypeInfo *tyinfo);
295static void dumpCompositeType(Archive *fout, const TypeInfo *tyinfo);
297 PGresult *res);
298static void dumpShellType(Archive *fout, const ShellTypeInfo *stinfo);
299static void dumpProcLang(Archive *fout, const ProcLangInfo *plang);
300static void dumpFunc(Archive *fout, const FuncInfo *finfo);
301static void dumpCast(Archive *fout, const CastInfo *cast);
302static void dumpTransform(Archive *fout, const TransformInfo *transform);
303static void dumpOpr(Archive *fout, const OprInfo *oprinfo);
305static void dumpOpclass(Archive *fout, const OpclassInfo *opcinfo);
306static void dumpOpfamily(Archive *fout, const OpfamilyInfo *opfinfo);
307static void dumpCollation(Archive *fout, const CollInfo *collinfo);
308static void dumpConversion(Archive *fout, const ConvInfo *convinfo);
309static void dumpRule(Archive *fout, const RuleInfo *rinfo);
310static void dumpAgg(Archive *fout, const AggInfo *agginfo);
311static void dumpTrigger(Archive *fout, const TriggerInfo *tginfo);
313static void dumpTable(Archive *fout, const TableInfo *tbinfo);
314static void dumpTableSchema(Archive *fout, const TableInfo *tbinfo);
316static void dumpAttrDef(Archive *fout, const AttrDefInfo *adinfo);
317static void collectSequences(Archive *fout);
318static void dumpSequence(Archive *fout, const TableInfo *tbinfo);
319static void dumpSequenceData(Archive *fout, const TableDataInfo *tdinfo);
320static void dumpIndex(Archive *fout, const IndxInfo *indxinfo);
324static void dumpConstraint(Archive *fout, const ConstraintInfo *coninfo);
326static void dumpTSParser(Archive *fout, const TSParserInfo *prsinfo);
327static void dumpTSDictionary(Archive *fout, const TSDictInfo *dictinfo);
329static void dumpTSConfig(Archive *fout, const TSConfigInfo *cfginfo);
332static void dumpUserMappings(Archive *fout,
333 const char *servername, const char *namespace,
334 const char *owner, CatalogId catalogId, DumpId dumpId);
336
338 const char *type, const char *name, const char *subname,
339 const char *nspname, const char *tag, const char *owner,
340 const DumpableAcl *dacl);
341
342static void getDependencies(Archive *fout);
344static void findDumpableDependencies(ArchiveHandle *AH, const DumpableObject *dobj,
345 DumpId **dependencies, int *nDeps, int *allocDeps);
346
350
351static void addConstrChildIdxDeps(DumpableObject *dobj, const IndxInfo *refidx);
353static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
354static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo);
356static void getTableDataFKConstraints(void);
357static void determineNotNullFlags(Archive *fout, PGresult *res, int r,
358 TableInfo *tbinfo, int j,
359 int i_notnull_name,
365static char *format_function_arguments(const FuncInfo *finfo, const char *funcargs,
366 bool is_agg);
368 const FuncInfo *finfo, bool honor_quotes);
369static char *convertRegProcReference(const char *proc);
370static char *getFormattedOperatorName(const char *oproid);
371static char *convertTSFunction(Archive *fout, Oid funcOid);
372static const char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
373static void getLOs(Archive *fout);
374static void dumpLO(Archive *fout, const LoInfo *loinfo);
375static int dumpLOs(Archive *fout, const void *arg);
376static void dumpPolicy(Archive *fout, const PolicyInfo *polinfo);
379static void dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo);
381static void dumpDatabase(Archive *fout);
382static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
383 const char *dbname, Oid dboid);
384static void dumpEncoding(Archive *AH);
385static void dumpStdStrings(Archive *AH);
386static void dumpSearchPath(Archive *AH);
390 bool force_array_type,
394 const TableInfo *tbinfo);
400 const DumpableObject *dobj,
401 const char *objtype,
402 const char *objname,
403 const char *objnamespace);
404static const char *getAttrName(int attrnum, const TableInfo *tblInfo);
405static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
406static bool nonemptyReloptions(const char *reloptions);
407static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
408 const char *prefix, Archive *fout);
410static void set_restrict_relation_kind(Archive *AH, const char *value);
411static void setupDumpWorker(Archive *AH);
413static bool forcePartitionRootLoad(const TableInfo *tbinfo);
414static void read_dump_filters(const char *filename, DumpOptions *dopt);
415
416
417int
418main(int argc, char **argv)
419{
420 int c;
421 const char *filename = NULL;
422 const char *format = "p";
423 TableInfo *tblinfo;
424 int numTables;
426 int numObjs;
428 int i;
429 int optindex;
430 RestoreOptions *ropt;
431 Archive *fout; /* the script file */
432 bool g_verbose = false;
433 const char *dumpencoding = NULL;
434 const char *dumpsnapshot = NULL;
435 char *use_role = NULL;
436 int numWorkers = 1;
437 int plainText = 0;
440 pg_compress_specification compression_spec = {0};
441 char *compression_detail = NULL;
442 char *compression_algorithm_str = "none";
443 char *error_detail = NULL;
444 bool user_compression_defined = false;
446 bool data_only = false;
447 bool schema_only = false;
448 bool statistics_only = false;
449 bool with_statistics = false;
450 bool no_data = false;
451 bool no_schema = false;
452 bool no_statistics = false;
453
454 static DumpOptions dopt;
455
456 static struct option long_options[] = {
457 {"data-only", no_argument, NULL, 'a'},
458 {"blobs", no_argument, NULL, 'b'},
459 {"large-objects", no_argument, NULL, 'b'},
460 {"no-blobs", no_argument, NULL, 'B'},
461 {"no-large-objects", no_argument, NULL, 'B'},
462 {"clean", no_argument, NULL, 'c'},
463 {"create", no_argument, NULL, 'C'},
464 {"dbname", required_argument, NULL, 'd'},
465 {"extension", required_argument, NULL, 'e'},
466 {"file", required_argument, NULL, 'f'},
467 {"format", required_argument, NULL, 'F'},
468 {"host", required_argument, NULL, 'h'},
469 {"jobs", 1, NULL, 'j'},
470 {"no-reconnect", no_argument, NULL, 'R'},
471 {"no-owner", no_argument, NULL, 'O'},
472 {"port", required_argument, NULL, 'p'},
473 {"schema", required_argument, NULL, 'n'},
474 {"exclude-schema", required_argument, NULL, 'N'},
475 {"schema-only", no_argument, NULL, 's'},
476 {"superuser", required_argument, NULL, 'S'},
477 {"table", required_argument, NULL, 't'},
478 {"exclude-table", required_argument, NULL, 'T'},
479 {"no-password", no_argument, NULL, 'w'},
480 {"password", no_argument, NULL, 'W'},
481 {"username", required_argument, NULL, 'U'},
482 {"verbose", no_argument, NULL, 'v'},
483 {"no-privileges", no_argument, NULL, 'x'},
484 {"no-acl", no_argument, NULL, 'x'},
485 {"compress", required_argument, NULL, 'Z'},
486 {"encoding", required_argument, NULL, 'E'},
487 {"help", no_argument, NULL, '?'},
488 {"version", no_argument, NULL, 'V'},
489
490 /*
491 * the following options don't have an equivalent short option letter
492 */
493 {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
494 {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
495 {"column-inserts", no_argument, &dopt.column_inserts, 1},
496 {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
497 {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
498 {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
499 {"exclude-table-data", required_argument, NULL, 4},
500 {"extra-float-digits", required_argument, NULL, 8},
501 {"if-exists", no_argument, &dopt.if_exists, 1},
502 {"inserts", no_argument, NULL, 9},
503 {"lock-wait-timeout", required_argument, NULL, 2},
504 {"no-table-access-method", no_argument, &dopt.outputNoTableAm, 1},
505 {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
506 {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
507 {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
508 {"role", required_argument, NULL, 3},
509 {"section", required_argument, NULL, 5},
510 {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
511 {"snapshot", required_argument, NULL, 6},
512 {"statistics", no_argument, NULL, 22},
513 {"statistics-only", no_argument, NULL, 18},
514 {"strict-names", no_argument, &strict_names, 1},
515 {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
516 {"no-comments", no_argument, &dopt.no_comments, 1},
517 {"no-data", no_argument, NULL, 19},
518 {"no-policies", no_argument, &dopt.no_policies, 1},
519 {"no-publications", no_argument, &dopt.no_publications, 1},
520 {"no-schema", no_argument, NULL, 20},
521 {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
522 {"no-statistics", no_argument, NULL, 21},
523 {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
524 {"no-toast-compression", no_argument, &dopt.no_toast_compression, 1},
525 {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
526 {"no-sync", no_argument, NULL, 7},
527 {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
528 {"rows-per-insert", required_argument, NULL, 10},
529 {"include-foreign-data", required_argument, NULL, 11},
530 {"table-and-children", required_argument, NULL, 12},
531 {"exclude-table-and-children", required_argument, NULL, 13},
532 {"exclude-table-data-and-children", required_argument, NULL, 14},
533 {"sync-method", required_argument, NULL, 15},
534 {"filter", required_argument, NULL, 16},
535 {"exclude-extension", required_argument, NULL, 17},
536 {"sequence-data", no_argument, &dopt.sequence_data, 1},
537 {"restrict-key", required_argument, NULL, 25},
538
539 {NULL, 0, NULL, 0}
540 };
541
542 pg_logging_init(argv[0]);
544 set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
545
546 /*
547 * Initialize what we need for parallel execution, especially for thread
548 * support on Windows.
549 */
551
552 progname = get_progname(argv[0]);
553
554 if (argc > 1)
555 {
556 if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
557 {
558 help(progname);
559 exit_nicely(0);
560 }
561 if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
562 {
563 puts("pg_dump (PostgreSQL) " PG_VERSION);
564 exit_nicely(0);
565 }
566 }
567
568 InitDumpOptions(&dopt);
569
570 while ((c = getopt_long(argc, argv, "abBcCd:e:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxXZ:",
571 long_options, &optindex)) != -1)
572 {
573 switch (c)
574 {
575 case 'a': /* Dump data only */
576 data_only = true;
577 break;
578
579 case 'b': /* Dump LOs */
580 dopt.outputLOs = true;
581 break;
582
583 case 'B': /* Don't dump LOs */
584 dopt.dontOutputLOs = true;
585 break;
586
587 case 'c': /* clean (i.e., drop) schema prior to create */
588 dopt.outputClean = 1;
589 break;
590
591 case 'C': /* Create DB */
592 dopt.outputCreateDB = 1;
593 break;
594
595 case 'd': /* database name */
597 break;
598
599 case 'e': /* include extension(s) */
601 dopt.include_everything = false;
602 break;
603
604 case 'E': /* Dump encoding */
606 break;
607
608 case 'f':
610 break;
611
612 case 'F':
614 break;
615
616 case 'h': /* server host */
618 break;
619
620 case 'j': /* number of dump jobs */
621 if (!option_parse_int(optarg, "-j/--jobs", 1,
623 &numWorkers))
624 exit_nicely(1);
625 break;
626
627 case 'n': /* include schema(s) */
629 dopt.include_everything = false;
630 break;
631
632 case 'N': /* exclude schema(s) */
634 break;
635
636 case 'O': /* Don't reconnect to match owner */
637 dopt.outputNoOwner = 1;
638 break;
639
640 case 'p': /* server port */
642 break;
643
644 case 'R':
645 /* no-op, still accepted for backwards compatibility */
646 break;
647
648 case 's': /* dump schema only */
649 schema_only = true;
650 break;
651
652 case 'S': /* Username for superuser in plain text output */
654 break;
655
656 case 't': /* include table(s) */
658 dopt.include_everything = false;
659 break;
660
661 case 'T': /* exclude table(s) */
663 break;
664
665 case 'U':
667 break;
668
669 case 'v': /* verbose */
670 g_verbose = true;
672 break;
673
674 case 'w':
676 break;
677
678 case 'W':
680 break;
681
682 case 'x': /* skip ACL dump */
683 dopt.aclsSkip = true;
684 break;
685
686 case 'Z': /* Compression */
690 break;
691
692 case 0:
693 /* This covers the long options. */
694 break;
695
696 case 2: /* lock-wait-timeout */
698 break;
699
700 case 3: /* SET ROLE */
701 use_role = pg_strdup(optarg);
702 break;
703
704 case 4: /* exclude table(s) data */
706 break;
707
708 case 5: /* section */
710 break;
711
712 case 6: /* snapshot */
714 break;
715
716 case 7: /* no-sync */
717 dosync = false;
718 break;
719
720 case 8:
722 if (!option_parse_int(optarg, "--extra-float-digits", -15, 3,
724 exit_nicely(1);
725 break;
726
727 case 9: /* inserts */
728
729 /*
730 * dump_inserts also stores --rows-per-insert, careful not to
731 * overwrite that.
732 */
733 if (dopt.dump_inserts == 0)
735 break;
736
737 case 10: /* rows per insert */
738 if (!option_parse_int(optarg, "--rows-per-insert", 1, INT_MAX,
739 &dopt.dump_inserts))
740 exit_nicely(1);
741 break;
742
743 case 11: /* include foreign data */
745 optarg);
746 break;
747
748 case 12: /* include table(s) and their children */
750 optarg);
751 dopt.include_everything = false;
752 break;
753
754 case 13: /* exclude table(s) and their children */
756 optarg);
757 break;
758
759 case 14: /* exclude data of table(s) and children */
761 optarg);
762 break;
763
764 case 15:
766 exit_nicely(1);
767 break;
768
769 case 16: /* read object filters from file */
771 break;
772
773 case 17: /* exclude extension(s) */
775 optarg);
776 break;
777
778 case 18:
779 statistics_only = true;
780 break;
781
782 case 19:
783 no_data = true;
784 break;
785
786 case 20:
787 no_schema = true;
788 break;
789
790 case 21:
791 no_statistics = true;
792 break;
793
794 case 22:
795 with_statistics = true;
796 break;
797
798 case 25:
800 break;
801
802 default:
803 /* getopt_long already emitted a complaint */
804 pg_log_error_hint("Try \"%s --help\" for more information.", progname);
805 exit_nicely(1);
806 }
807 }
808
809 /*
810 * Non-option argument specifies database name as long as it wasn't
811 * already specified with -d / --dbname
812 */
813 if (optind < argc && dopt.cparams.dbname == NULL)
814 dopt.cparams.dbname = argv[optind++];
815
816 /* Complain if any arguments remain */
817 if (optind < argc)
818 {
819 pg_log_error("too many command-line arguments (first is \"%s\")",
820 argv[optind]);
821 pg_log_error_hint("Try \"%s --help\" for more information.", progname);
822 exit_nicely(1);
823 }
824
825 /* --column-inserts implies --inserts */
826 if (dopt.column_inserts && dopt.dump_inserts == 0)
828
829 /* reject conflicting "-only" options */
830 if (data_only && schema_only)
831 pg_fatal("options %s and %s cannot be used together",
832 "-s/--schema-only", "-a/--data-only");
834 pg_fatal("options %s and %s cannot be used together",
835 "-s/--schema-only", "--statistics-only");
837 pg_fatal("options %s and %s cannot be used together",
838 "-a/--data-only", "--statistics-only");
839
840 /* reject conflicting "-only" and "no-" options */
841 if (data_only && no_data)
842 pg_fatal("options %s and %s cannot be used together",
843 "-a/--data-only", "--no-data");
844 if (schema_only && no_schema)
845 pg_fatal("options %s and %s cannot be used together",
846 "-s/--schema-only", "--no-schema");
848 pg_fatal("options %s and %s cannot be used together",
849 "--statistics-only", "--no-statistics");
850
851 /* reject conflicting "no-" options */
853 pg_fatal("options %s and %s cannot be used together",
854 "--statistics", "--no-statistics");
855
856 /* reject conflicting "-only" options */
858 pg_fatal("options %s and %s cannot be used together",
859 "-a/--data-only", "--statistics");
861 pg_fatal("options %s and %s cannot be used together",
862 "-s/--schema-only", "--statistics");
863
865 pg_fatal("options %s and %s cannot be used together",
866 "-s/--schema-only", "--include-foreign-data");
867
868 if (numWorkers > 1 && foreign_servers_include_patterns.head != NULL)
869 pg_fatal("option %s is not supported with parallel backup",
870 "--include-foreign-data");
871
872 if (data_only && dopt.outputClean)
873 pg_fatal("options %s and %s cannot be used together",
874 "-c/--clean", "-a/--data-only");
875
876 if (dopt.if_exists && !dopt.outputClean)
877 pg_fatal("option %s requires option %s",
878 "--if-exists", "-c/--clean");
879
880 /*
881 * Set derivative flags. Ambiguous or nonsensical combinations, e.g.
882 * "--schema-only --no-schema", will have already caused an error in one
883 * of the checks above.
884 */
885 dopt.dumpData = ((dopt.dumpData && !schema_only && !statistics_only) ||
886 data_only) && !no_data;
887 dopt.dumpSchema = ((dopt.dumpSchema && !data_only && !statistics_only) ||
889 dopt.dumpStatistics = ((dopt.dumpStatistics && !schema_only && !data_only) ||
891
892
893 /*
894 * --inserts are already implied above if --column-inserts or
895 * --rows-per-insert were specified.
896 */
897 if (dopt.do_nothing && dopt.dump_inserts == 0)
898 pg_fatal("option %s requires option %s, %s, or %s",
899 "--on-conflict-do-nothing",
900 "--inserts", "--rows-per-insert", "--column-inserts");
901
902 /* Identify archive format to emit */
904
905 /* archiveFormat specific setup */
906 if (archiveFormat == archNull)
907 {
908 plainText = 1;
909
910 /*
911 * If you don't provide a restrict key, one will be appointed for you.
912 */
913 if (!dopt.restrict_key)
915 if (!dopt.restrict_key)
916 pg_fatal("could not generate restrict key");
918 pg_fatal("invalid restrict key");
919 }
920 else if (dopt.restrict_key)
921 pg_fatal("option %s can only be used with %s",
922 "--restrict-key", "--format=plain");
923
924 /*
925 * Custom and directory formats are compressed by default with gzip when
926 * available, not the others. If gzip is not available, no compression is
927 * done by default.
928 */
931 {
932#ifdef HAVE_LIBZ
934#else
936#endif
937 }
938
939 /*
940 * Compression options
941 */
944 pg_fatal("unrecognized compression algorithm: \"%s\"",
946
948 &compression_spec);
950 if (error_detail != NULL)
951 pg_fatal("invalid compression specification: %s",
953
954 error_detail = supports_compression(compression_spec);
955 if (error_detail != NULL)
956 pg_fatal("%s", error_detail);
957
958 /*
959 * Disable support for zstd workers for now - these are based on
960 * threading, and it's unclear how it interacts with parallel dumps on
961 * platforms where that relies on threads too (e.g. Windows).
962 */
963 if (compression_spec.options & PG_COMPRESSION_OPTION_WORKERS)
964 pg_log_warning("compression option \"%s\" is not currently supported by pg_dump",
965 "workers");
966
967 /*
968 * If emitting an archive format, we always want to emit a DATABASE item,
969 * in case --create is specified at pg_restore time.
970 */
971 if (!plainText)
972 dopt.outputCreateDB = 1;
973
974 /* Parallel backup only in the directory archive format so far */
975 if (archiveFormat != archDirectory && numWorkers > 1)
976 pg_fatal("parallel backup only supported by the directory format");
977
978 /* Open the output file */
979 fout = CreateArchive(filename, archiveFormat, compression_spec,
981
982 /* Make dump options accessible right away */
983 SetArchiveOptions(fout, &dopt, NULL);
984
985 /* Register the cleanup hook */
987
988 /* Let the archiver know how noisy to be */
990
991
992 /*
993 * We allow the server to be back to 9.2, and up to any minor release of
994 * our own major version. (See also version check in pg_dumpall.c.)
995 */
996 fout->minRemoteVersion = 90200;
997 fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
998
999 fout->numWorkers = numWorkers;
1000
1001 /*
1002 * Open the database using the Archiver, so it knows about it. Errors mean
1003 * death.
1004 */
1005 ConnectDatabaseAhx(fout, &dopt.cparams, false);
1007
1008 /*
1009 * On hot standbys, never try to dump unlogged table data, since it will
1010 * just throw an error.
1011 */
1012 if (fout->isStandby)
1013 dopt.no_unlogged_table_data = true;
1014
1015 /*
1016 * Find the last built-in OID, if needed (prior to 8.1)
1017 *
1018 * With 8.1 and above, we can just use FirstNormalObjectId - 1.
1019 */
1021
1022 pg_log_info("last built-in OID is %u", g_last_builtin_oid);
1023
1024 /* Expand schema selection patterns into OID lists */
1026 {
1029 strict_names);
1031 pg_fatal("no matching schemas were found");
1032 }
1035 false);
1036 /* non-matching exclusion patterns aren't an error */
1037
1038 /* Expand table selection patterns into OID lists */
1041 strict_names, false);
1044 strict_names, true);
1048 pg_fatal("no matching tables were found");
1049
1052 false, false);
1055 false, true);
1056
1059 false, false);
1062 false, true);
1063
1066
1067 /* non-matching exclusion patterns aren't an error */
1068
1069 /* Expand extension selection patterns into OID lists */
1071 {
1074 strict_names);
1076 pg_fatal("no matching extensions were found");
1077 }
1080 false);
1081 /* non-matching exclusion patterns aren't an error */
1082
1083 /*
1084 * Dumping LOs is the default for dumps where an inclusion switch is not
1085 * used (an "include everything" dump). -B can be used to exclude LOs
1086 * from those dumps. -b can be used to include LOs even when an inclusion
1087 * switch is used.
1088 *
1089 * -s means "schema only" and LOs are data, not schema, so we never
1090 * include LOs when -s is used.
1091 */
1092 if (dopt.include_everything && dopt.dumpData && !dopt.dontOutputLOs)
1093 dopt.outputLOs = true;
1094
1095 /*
1096 * Collect role names so we can map object owner OIDs to names.
1097 */
1099
1100 /*
1101 * Now scan the database and create DumpableObject structs for all the
1102 * objects we intend to dump.
1103 */
1104 tblinfo = getSchemaData(fout, &numTables);
1105
1106 if (dopt.dumpData)
1107 {
1108 getTableData(&dopt, tblinfo, numTables, 0);
1110 if (!dopt.dumpSchema)
1112 }
1113
1114 if (!dopt.dumpData && dopt.sequence_data)
1115 getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
1116
1117 /*
1118 * For binary upgrade mode, dump the pg_shdepend rows for large objects
1119 * and maybe even pg_largeobject_metadata (see comment below for details).
1120 * This is faster to restore than the equivalent set of large object
1121 * commands.
1122 */
1123 if (dopt.binary_upgrade)
1124 {
1126
1129
1130 /*
1131 * Only dump large object shdepend rows for this database.
1132 */
1133 shdepend->dataObj->filtercond = "WHERE classid = 'pg_largeobject'::regclass "
1134 "AND dbid = (SELECT oid FROM pg_database "
1135 " WHERE datname = current_database())";
1136
1137 /*
1138 * For binary upgrades from v16 and newer versions, we can copy
1139 * pg_largeobject_metadata's files from the old cluster, so we don't
1140 * need to dump its contents. pg_upgrade can't copy/link the files
1141 * from older versions because aclitem (needed by
1142 * pg_largeobject_metadata.lomacl) changed its storage format in v16.
1143 */
1144 if (fout->remoteVersion < 160000)
1145 {
1147
1150 }
1151 }
1152
1153 /*
1154 * In binary-upgrade mode, we do not have to worry about the actual LO
1155 * data or the associated metadata that resides in the pg_largeobject and
1156 * pg_largeobject_metadata tables, respectively.
1157 *
1158 * However, we do need to collect LO information as there may be comments
1159 * or other information on LOs that we do need to dump out.
1160 */
1161 if (dopt.outputLOs || dopt.binary_upgrade)
1162 getLOs(fout);
1163
1164 /*
1165 * Collect dependency data to assist in ordering the objects.
1166 */
1168
1169 /*
1170 * Collect ACLs, comments, and security labels, if wanted.
1171 */
1172 if (!dopt.aclsSkip)
1174 if (!dopt.no_comments)
1176 if (!dopt.no_security_labels)
1178
1179 /* For binary upgrade mode, collect required pg_class information. */
1180 if (dopt.binary_upgrade)
1182
1183 /* Collect sequence information. */
1185
1186 /* Lastly, create dummy objects to represent the section boundaries */
1188
1189 /* Get pointers to all the known DumpableObjects */
1191
1192 /*
1193 * Add dummy dependencies to enforce the dump section ordering.
1194 */
1196
1197 /*
1198 * Sort the objects into a safe dump order (no forward references).
1199 *
1200 * We rely on dependency information to help us determine a safe order, so
1201 * the initial sort is mostly for cosmetic purposes: we sort by name to
1202 * ensure that logically identical schemas will dump identically.
1203 */
1205
1207 boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
1208
1209 /*
1210 * Create archive TOC entries for all the objects to be dumped, in a safe
1211 * order.
1212 */
1213
1214 /*
1215 * First the special entries for ENCODING, STDSTRINGS, and SEARCHPATH.
1216 */
1220
1221 /* The database items are always next, unless we don't want them at all */
1222 if (dopt.outputCreateDB)
1224
1225 /* Now the rearrangeable objects. */
1226 for (i = 0; i < numObjs; i++)
1228
1229 /*
1230 * Set up options info to ensure we dump what we want.
1231 */
1232 ropt = NewRestoreOptions();
1233 ropt->filename = filename;
1234
1235 /* if you change this list, see dumpOptionsFromRestoreOptions */
1236 ropt->cparams.dbname = dopt.cparams.dbname ? pg_strdup(dopt.cparams.dbname) : NULL;
1237 ropt->cparams.pgport = dopt.cparams.pgport ? pg_strdup(dopt.cparams.pgport) : NULL;
1238 ropt->cparams.pghost = dopt.cparams.pghost ? pg_strdup(dopt.cparams.pghost) : NULL;
1241 ropt->dropSchema = dopt.outputClean;
1242 ropt->dumpData = dopt.dumpData;
1243 ropt->dumpSchema = dopt.dumpSchema;
1244 ropt->dumpStatistics = dopt.dumpStatistics;
1245 ropt->if_exists = dopt.if_exists;
1246 ropt->column_inserts = dopt.column_inserts;
1247 ropt->dumpSections = dopt.dumpSections;
1248 ropt->aclsSkip = dopt.aclsSkip;
1249 ropt->superuser = dopt.outputSuperuser;
1250 ropt->createDB = dopt.outputCreateDB;
1251 ropt->noOwner = dopt.outputNoOwner;
1252 ropt->noTableAm = dopt.outputNoTableAm;
1253 ropt->noTablespace = dopt.outputNoTablespaces;
1255 ropt->use_setsessauth = dopt.use_setsessauth;
1257 ropt->dump_inserts = dopt.dump_inserts;
1258 ropt->no_comments = dopt.no_comments;
1259 ropt->no_policies = dopt.no_policies;
1260 ropt->no_publications = dopt.no_publications;
1263 ropt->lockWaitTimeout = dopt.lockWaitTimeout;
1266 ropt->sequence_data = dopt.sequence_data;
1267 ropt->binary_upgrade = dopt.binary_upgrade;
1268 ropt->restrict_key = dopt.restrict_key ? pg_strdup(dopt.restrict_key) : NULL;
1269
1270 ropt->compression_spec = compression_spec;
1271
1272 ropt->suppressDumpWarnings = true; /* We've already shown them */
1273
1274 SetArchiveOptions(fout, &dopt, ropt);
1275
1276 /* Mark which entries should be output */
1278
1279 /*
1280 * The archive's TOC entries are now marked as to which ones will actually
1281 * be output, so we can set up their dependency lists properly. This isn't
1282 * necessary for plain-text output, though.
1283 */
1284 if (!plainText)
1286
1287 /*
1288 * And finally we can do the actual output.
1289 *
1290 * Note: for non-plain-text output formats, the output file is written
1291 * inside CloseArchive(). This is, um, bizarre; but not worth changing
1292 * right now.
1293 */
1294 if (plainText)
1295 RestoreArchive(fout, false);
1296
1298
1299 exit_nicely(0);
1300}
1301
1302
1303static void
1304help(const char *progname)
1305{
1306 printf(_("%s exports a PostgreSQL database as an SQL script or to other formats.\n\n"), progname);
1307 printf(_("Usage:\n"));
1308 printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
1309
1310 printf(_("\nGeneral options:\n"));
1311 printf(_(" -f, --file=FILENAME output file or directory name\n"));
1312 printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
1313 " plain text (default))\n"));
1314 printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
1315 printf(_(" -v, --verbose verbose mode\n"));
1316 printf(_(" -V, --version output version information, then exit\n"));
1317 printf(_(" -Z, --compress=METHOD[:DETAIL]\n"
1318 " compress as specified\n"));
1319 printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
1320 printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
1321 printf(_(" --sync-method=METHOD set method for syncing files to disk\n"));
1322 printf(_(" -?, --help show this help, then exit\n"));
1323
1324 printf(_("\nOptions controlling the output content:\n"));
1325 printf(_(" -a, --data-only dump only the data, not the schema or statistics\n"));
1326 printf(_(" -b, --large-objects include large objects in dump\n"));
1327 printf(_(" --blobs (same as --large-objects, deprecated)\n"));
1328 printf(_(" -B, --no-large-objects exclude large objects in dump\n"));
1329 printf(_(" --no-blobs (same as --no-large-objects, deprecated)\n"));
1330 printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
1331 printf(_(" -C, --create include commands to create database in dump\n"));
1332 printf(_(" -e, --extension=PATTERN dump the specified extension(s) only\n"));
1333 printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
1334 printf(_(" -n, --schema=PATTERN dump the specified schema(s) only\n"));
1335 printf(_(" -N, --exclude-schema=PATTERN do NOT dump the specified schema(s)\n"));
1336 printf(_(" -O, --no-owner skip restoration of object ownership in\n"
1337 " plain-text format\n"));
1338 printf(_(" -s, --schema-only dump only the schema, no data or statistics\n"));
1339 printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
1340 printf(_(" -t, --table=PATTERN dump only the specified table(s)\n"));
1341 printf(_(" -T, --exclude-table=PATTERN do NOT dump the specified table(s)\n"));
1342 printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
1343 printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
1344 printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
1345 printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
1346 printf(_(" --disable-triggers disable triggers during data-only restore\n"));
1347 printf(_(" --enable-row-security enable row security (dump only content user has\n"
1348 " access to)\n"));
1349 printf(_(" --exclude-extension=PATTERN do NOT dump the specified extension(s)\n"));
1350 printf(_(" --exclude-table-and-children=PATTERN\n"
1351 " do NOT dump the specified table(s), including\n"
1352 " child and partition tables\n"));
1353 printf(_(" --exclude-table-data=PATTERN do NOT dump data for the specified table(s)\n"));
1354 printf(_(" --exclude-table-data-and-children=PATTERN\n"
1355 " do NOT dump data for the specified table(s),\n"
1356 " including child and partition tables\n"));
1357 printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
1358 printf(_(" --filter=FILENAME include or exclude objects and data from dump\n"
1359 " based on expressions in FILENAME\n"));
1360 printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
1361 printf(_(" --include-foreign-data=PATTERN\n"
1362 " include data of foreign tables on foreign\n"
1363 " servers matching PATTERN\n"));
1364 printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
1365 printf(_(" --load-via-partition-root load partitions via the root table\n"));
1366 printf(_(" --no-comments do not dump comment commands\n"));
1367 printf(_(" --no-data do not dump data\n"));
1368 printf(_(" --no-policies do not dump row security policies\n"));
1369 printf(_(" --no-publications do not dump publications\n"));
1370 printf(_(" --no-schema do not dump schema\n"));
1371 printf(_(" --no-security-labels do not dump security label assignments\n"));
1372 printf(_(" --no-statistics do not dump statistics\n"));
1373 printf(_(" --no-subscriptions do not dump subscriptions\n"));
1374 printf(_(" --no-table-access-method do not dump table access methods\n"));
1375 printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
1376 printf(_(" --no-toast-compression do not dump TOAST compression methods\n"));
1377 printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
1378 printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
1379 printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
1380 printf(_(" --restrict-key=RESTRICT_KEY use provided string as psql \\restrict key\n"));
1381 printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
1382 printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
1383 printf(_(" --sequence-data include sequence data in dump\n"));
1384 printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
1385 printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
1386 printf(_(" --statistics dump the statistics\n"));
1387 printf(_(" --statistics-only dump only the statistics, not schema or data\n"));
1388 printf(_(" --strict-names require table and/or schema include patterns to\n"
1389 " match at least one entity each\n"));
1390 printf(_(" --table-and-children=PATTERN dump only the specified table(s), including\n"
1391 " child and partition tables\n"));
1392 printf(_(" --use-set-session-authorization\n"
1393 " use SET SESSION AUTHORIZATION commands instead of\n"
1394 " ALTER OWNER commands to set ownership\n"));
1395
1396 printf(_("\nConnection options:\n"));
1397 printf(_(" -d, --dbname=DBNAME database to dump\n"));
1398 printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
1399 printf(_(" -p, --port=PORT database server port number\n"));
1400 printf(_(" -U, --username=NAME connect as specified database user\n"));
1401 printf(_(" -w, --no-password never prompt for password\n"));
1402 printf(_(" -W, --password force password prompt (should happen automatically)\n"));
1403 printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
1404
1405 printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
1406 "variable value is used.\n\n"));
1407 printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
1408 printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
1409}
1410
1411static void
1413 const char *dumpsnapshot, char *use_role)
1414{
1415 DumpOptions *dopt = AH->dopt;
1416 PGconn *conn = GetConnection(AH);
1417
1419
1420 /*
1421 * Set the client encoding if requested.
1422 */
1423 if (dumpencoding)
1424 {
1426 pg_fatal("invalid client encoding \"%s\" specified",
1427 dumpencoding);
1428 }
1429
1430 /*
1431 * Force standard_conforming_strings on, just in case we are dumping from
1432 * an old server that has it disabled. Without this, literals in views,
1433 * expressions, etc, would be incorrect for modern servers.
1434 */
1435 ExecuteSqlStatement(AH, "SET standard_conforming_strings = on");
1436
1437 /*
1438 * And reflect that to AH->std_strings. You might think that we should
1439 * just delete that variable and the code that checks it, but that would
1440 * be problematic for pg_restore, which at least for now should still cope
1441 * with archives containing the other setting (cf. processStdStringsEntry
1442 * in pg_backup_archiver.c).
1443 */
1444 AH->std_strings = true;
1445
1446 /*
1447 * Get the active encoding, so we know how to escape strings.
1448 */
1451
1452 /*
1453 * Set the role if requested. In a parallel dump worker, we'll be passed
1454 * use_role == NULL, but AH->use_role is already set (if user specified it
1455 * originally) and we should use that.
1456 */
1457 if (!use_role && AH->use_role)
1458 use_role = AH->use_role;
1459
1460 /* Set the role if requested */
1461 if (use_role)
1462 {
1464
1465 appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1466 ExecuteSqlStatement(AH, query->data);
1467 destroyPQExpBuffer(query);
1468
1469 /* save it for possible later use by parallel workers */
1470 if (!AH->use_role)
1471 AH->use_role = pg_strdup(use_role);
1472 }
1473
1474 /* Set the datestyle to ISO to ensure the dump's portability */
1475 ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1476
1477 /* Likewise, avoid using sql_standard intervalstyle */
1478 ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1479
1480 /*
1481 * Use an explicitly specified extra_float_digits if it has been provided.
1482 * Otherwise, set extra_float_digits so that we can dump float data
1483 * exactly (given correctly implemented float I/O code, anyway).
1484 */
1486 {
1488
1489 appendPQExpBuffer(q, "SET extra_float_digits TO %d",
1491 ExecuteSqlStatement(AH, q->data);
1493 }
1494 else
1495 ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1496
1497 /*
1498 * Disable synchronized scanning, to prevent unpredictable changes in row
1499 * ordering across a dump and reload.
1500 */
1501 ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1502
1503 /*
1504 * Disable timeouts if supported.
1505 */
1506 ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1507 if (AH->remoteVersion >= 90300)
1508 ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1509 if (AH->remoteVersion >= 90600)
1510 ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1511 if (AH->remoteVersion >= 170000)
1512 ExecuteSqlStatement(AH, "SET transaction_timeout = 0");
1513
1514 /*
1515 * Quote all identifiers, if requested.
1516 */
1518 ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1519
1520 /*
1521 * Adjust row-security mode, if supported.
1522 */
1523 if (AH->remoteVersion >= 90500)
1524 {
1525 if (dopt->enable_row_security)
1526 ExecuteSqlStatement(AH, "SET row_security = on");
1527 else
1528 ExecuteSqlStatement(AH, "SET row_security = off");
1529 }
1530
1531 /*
1532 * For security reasons, we restrict the expansion of non-system views and
1533 * access to foreign tables during the pg_dump process. This restriction
1534 * is adjusted when dumping foreign table data.
1535 */
1536 set_restrict_relation_kind(AH, "view, foreign-table");
1537
1538 /*
1539 * Initialize prepared-query state to "nothing prepared". We do this here
1540 * so that a parallel dump worker will have its own state.
1541 */
1543
1544 /*
1545 * Start transaction-snapshot mode transaction to dump consistent data.
1546 */
1547 ExecuteSqlStatement(AH, "BEGIN");
1548
1549 /*
1550 * To support the combination of serializable_deferrable with the jobs
1551 * option we use REPEATABLE READ for the worker connections that are
1552 * passed a snapshot. As long as the snapshot is acquired in a
1553 * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1554 * REPEATABLE READ transaction provides the appropriate integrity
1555 * guarantees. This is a kluge, but safe for back-patching.
1556 */
1557 if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1559 "SET TRANSACTION ISOLATION LEVEL "
1560 "SERIALIZABLE, READ ONLY, DEFERRABLE");
1561 else
1563 "SET TRANSACTION ISOLATION LEVEL "
1564 "REPEATABLE READ, READ ONLY");
1565
1566 /*
1567 * If user specified a snapshot to use, select that. In a parallel dump
1568 * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1569 * is already set (if the server can handle it) and we should use that.
1570 */
1571 if (dumpsnapshot)
1573
1574 if (AH->sync_snapshot_id)
1575 {
1577
1578 appendPQExpBufferStr(query, "SET TRANSACTION SNAPSHOT ");
1580 ExecuteSqlStatement(AH, query->data);
1581 destroyPQExpBuffer(query);
1582 }
1583 else if (AH->numWorkers > 1)
1584 {
1585 if (AH->isStandby && AH->remoteVersion < 100000)
1586 pg_fatal("parallel dumps from standby servers are not supported by this server version");
1588 }
1589}
1590
1591/* Set up connection for a parallel worker process */
1592static void
1594{
1595 /*
1596 * We want to re-select all the same values the leader connection is
1597 * using. We'll have inherited directly-usable values in
1598 * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1599 * inherited encoding value back to a string to pass to setup_connection.
1600 */
1603 NULL,
1604 NULL);
1605}
1606
1607static char *
1609{
1610 char *query = "SELECT pg_catalog.pg_export_snapshot()";
1611 char *result;
1612 PGresult *res;
1613
1614 res = ExecuteSqlQueryForSingleRow(fout, query);
1615 result = pg_strdup(PQgetvalue(res, 0, 0));
1616 PQclear(res);
1617
1618 return result;
1619}
1620
1621static ArchiveFormat
1623{
1625
1627
1628 if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1629 {
1630 /* This is used by pg_dumpall, and is not documented */
1633 }
1634 else if (pg_strcasecmp(format, "c") == 0)
1636 else if (pg_strcasecmp(format, "custom") == 0)
1638 else if (pg_strcasecmp(format, "d") == 0)
1640 else if (pg_strcasecmp(format, "directory") == 0)
1642 else if (pg_strcasecmp(format, "p") == 0)
1644 else if (pg_strcasecmp(format, "plain") == 0)
1646 else if (pg_strcasecmp(format, "t") == 0)
1648 else if (pg_strcasecmp(format, "tar") == 0)
1650 else
1651 pg_fatal("invalid output format \"%s\" specified", format);
1652 return archiveFormat;
1653}
1654
1655/*
1656 * Find the OIDs of all schemas matching the given list of patterns,
1657 * and append them to the given OID list.
1658 */
1659static void
1662 SimpleOidList *oids,
1663 bool strict_names)
1664{
1665 PQExpBuffer query;
1666 PGresult *res;
1668 int i;
1669
1670 if (patterns->head == NULL)
1671 return; /* nothing to do */
1672
1673 query = createPQExpBuffer();
1674
1675 /*
1676 * The loop below runs multiple SELECTs might sometimes result in
1677 * duplicate entries in the OID list, but we don't care.
1678 */
1679
1680 for (cell = patterns->head; cell; cell = cell->next)
1681 {
1683 int dotcnt;
1684
1686 "SELECT oid FROM pg_catalog.pg_namespace n\n");
1688 processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1689 false, NULL, "n.nspname", NULL, NULL, &dbbuf,
1690 &dotcnt);
1691 if (dotcnt > 1)
1692 pg_fatal("improper qualified name (too many dotted names): %s",
1693 cell->val);
1694 else if (dotcnt == 1)
1697
1698 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1699 if (strict_names && PQntuples(res) == 0)
1700 pg_fatal("no matching schemas were found for pattern \"%s\"", cell->val);
1701
1702 for (i = 0; i < PQntuples(res); i++)
1703 {
1704 simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1705 }
1706
1707 PQclear(res);
1708 resetPQExpBuffer(query);
1709 }
1710
1711 destroyPQExpBuffer(query);
1712}
1713
1714/*
1715 * Find the OIDs of all extensions matching the given list of patterns,
1716 * and append them to the given OID list.
1717 */
1718static void
1721 SimpleOidList *oids,
1722 bool strict_names)
1723{
1724 PQExpBuffer query;
1725 PGresult *res;
1727 int i;
1728
1729 if (patterns->head == NULL)
1730 return; /* nothing to do */
1731
1732 query = createPQExpBuffer();
1733
1734 /*
1735 * The loop below runs multiple SELECTs might sometimes result in
1736 * duplicate entries in the OID list, but we don't care.
1737 */
1738 for (cell = patterns->head; cell; cell = cell->next)
1739 {
1740 int dotcnt;
1741
1743 "SELECT oid FROM pg_catalog.pg_extension e\n");
1744 processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1745 false, NULL, "e.extname", NULL, NULL, NULL,
1746 &dotcnt);
1747 if (dotcnt > 0)
1748 pg_fatal("improper qualified name (too many dotted names): %s",
1749 cell->val);
1750
1751 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1752 if (strict_names && PQntuples(res) == 0)
1753 pg_fatal("no matching extensions were found for pattern \"%s\"", cell->val);
1754
1755 for (i = 0; i < PQntuples(res); i++)
1756 {
1757 simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1758 }
1759
1760 PQclear(res);
1761 resetPQExpBuffer(query);
1762 }
1763
1764 destroyPQExpBuffer(query);
1765}
1766
1767/*
1768 * Find the OIDs of all foreign servers matching the given list of patterns,
1769 * and append them to the given OID list.
1770 */
1771static void
1774 SimpleOidList *oids)
1775{
1776 PQExpBuffer query;
1777 PGresult *res;
1779 int i;
1780
1781 if (patterns->head == NULL)
1782 return; /* nothing to do */
1783
1784 query = createPQExpBuffer();
1785
1786 /*
1787 * The loop below runs multiple SELECTs might sometimes result in
1788 * duplicate entries in the OID list, but we don't care.
1789 */
1790
1791 for (cell = patterns->head; cell; cell = cell->next)
1792 {
1793 int dotcnt;
1794
1796 "SELECT oid FROM pg_catalog.pg_foreign_server s\n");
1797 processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1798 false, NULL, "s.srvname", NULL, NULL, NULL,
1799 &dotcnt);
1800 if (dotcnt > 0)
1801 pg_fatal("improper qualified name (too many dotted names): %s",
1802 cell->val);
1803
1804 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1805 if (PQntuples(res) == 0)
1806 pg_fatal("no matching foreign servers were found for pattern \"%s\"", cell->val);
1807
1808 for (i = 0; i < PQntuples(res); i++)
1809 simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1810
1811 PQclear(res);
1812 resetPQExpBuffer(query);
1813 }
1814
1815 destroyPQExpBuffer(query);
1816}
1817
1818/*
1819 * Find the OIDs of all tables matching the given list of patterns,
1820 * and append them to the given OID list. See also expand_dbname_patterns()
1821 * in pg_dumpall.c
1822 */
1823static void
1827{
1828 PQExpBuffer query;
1829 PGresult *res;
1831 int i;
1832
1833 if (patterns->head == NULL)
1834 return; /* nothing to do */
1835
1836 query = createPQExpBuffer();
1837
1838 /*
1839 * this might sometimes result in duplicate entries in the OID list, but
1840 * we don't care.
1841 */
1842
1843 for (cell = patterns->head; cell; cell = cell->next)
1844 {
1846 int dotcnt;
1847
1848 /*
1849 * Query must remain ABSOLUTELY devoid of unqualified names. This
1850 * would be unnecessary given a pg_table_is_visible() variant taking a
1851 * search_path argument.
1852 *
1853 * For with_child_tables, we start with the basic query's results and
1854 * recursively search the inheritance tree to add child tables.
1855 */
1857 {
1858 appendPQExpBufferStr(query, "WITH RECURSIVE partition_tree (relid) AS (\n");
1859 }
1860
1861 appendPQExpBuffer(query,
1862 "SELECT c.oid"
1863 "\nFROM pg_catalog.pg_class c"
1864 "\n LEFT JOIN pg_catalog.pg_namespace n"
1865 "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1866 "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1867 "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1872 processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1873 false, "n.nspname", "c.relname", NULL,
1874 "pg_catalog.pg_table_is_visible(c.oid)", &dbbuf,
1875 &dotcnt);
1876 if (dotcnt > 2)
1877 pg_fatal("improper relation name (too many dotted names): %s",
1878 cell->val);
1879 else if (dotcnt == 2)
1882
1884 {
1885 appendPQExpBufferStr(query, "UNION"
1886 "\nSELECT i.inhrelid"
1887 "\nFROM partition_tree p"
1888 "\n JOIN pg_catalog.pg_inherits i"
1889 "\n ON p.relid OPERATOR(pg_catalog.=) i.inhparent"
1890 "\n)"
1891 "\nSELECT relid FROM partition_tree");
1892 }
1893
1894 ExecuteSqlStatement(fout, "RESET search_path");
1895 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1898 if (strict_names && PQntuples(res) == 0)
1899 pg_fatal("no matching tables were found for pattern \"%s\"", cell->val);
1900
1901 for (i = 0; i < PQntuples(res); i++)
1902 {
1903 simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1904 }
1905
1906 PQclear(res);
1907 resetPQExpBuffer(query);
1908 }
1909
1910 destroyPQExpBuffer(query);
1911}
1912
1913/*
1914 * Verifies that the connected database name matches the given database name,
1915 * and if not, dies with an error about the given pattern.
1916 *
1917 * The 'dbname' argument should be a literal name parsed from 'pattern'.
1918 */
1919static void
1920prohibit_crossdb_refs(PGconn *conn, const char *dbname, const char *pattern)
1921{
1922 const char *db;
1923
1924 db = PQdb(conn);
1925 if (db == NULL)
1926 pg_fatal("You are currently not connected to a database.");
1927
1928 if (strcmp(db, dbname) != 0)
1929 pg_fatal("cross-database references are not implemented: %s",
1930 pattern);
1931}
1932
1933/*
1934 * checkExtensionMembership
1935 * Determine whether object is an extension member, and if so,
1936 * record an appropriate dependency and set the object's dump flag.
1937 *
1938 * It's important to call this for each object that could be an extension
1939 * member. Generally, we integrate this with determining the object's
1940 * to-be-dumped-ness, since extension membership overrides other rules for that.
1941 *
1942 * Returns true if object is an extension member, else false.
1943 */
1944static bool
1946{
1948
1949 if (ext == NULL)
1950 return false;
1951
1952 dobj->ext_member = true;
1953
1954 /* Record dependency so that getDependencies needn't deal with that */
1955 addObjectDependency(dobj, ext->dobj.dumpId);
1956
1957 /*
1958 * In 9.6 and above, mark the member object to have any non-initial ACLs
1959 * dumped. (Any initial ACLs will be removed later, using data from
1960 * pg_init_privs, so that we'll dump only the delta from the extension's
1961 * initial setup.)
1962 *
1963 * Prior to 9.6, we do not include any extension member components.
1964 *
1965 * In binary upgrades, we still dump all components of the members
1966 * individually, since the idea is to exactly reproduce the database
1967 * contents rather than replace the extension contents with something
1968 * different.
1969 *
1970 * Note: it might be interesting someday to implement storage and delta
1971 * dumping of extension members' RLS policies and/or security labels.
1972 * However there is a pitfall for RLS policies: trying to dump them
1973 * requires getting a lock on their tables, and the calling user might not
1974 * have privileges for that. We need no lock to examine a table's ACLs,
1975 * so the current feature doesn't have a problem of that sort.
1976 */
1977 if (fout->dopt->binary_upgrade)
1978 dobj->dump = ext->dobj.dump;
1979 else
1980 {
1981 if (fout->remoteVersion < 90600)
1982 dobj->dump = DUMP_COMPONENT_NONE;
1983 else
1984 dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL);
1985 }
1986
1987 return true;
1988}
1989
1990/*
1991 * selectDumpableNamespace: policy-setting subroutine
1992 * Mark a namespace as to be dumped or not
1993 */
1994static void
1996{
1997 /*
1998 * DUMP_COMPONENT_DEFINITION typically implies a CREATE SCHEMA statement
1999 * and (for --clean) a DROP SCHEMA statement. (In the absence of
2000 * DUMP_COMPONENT_DEFINITION, this value is irrelevant.)
2001 */
2002 nsinfo->create = true;
2003
2004 /*
2005 * If specific tables are being dumped, do not dump any complete
2006 * namespaces. If specific namespaces are being dumped, dump just those
2007 * namespaces. Otherwise, dump all non-system namespaces.
2008 */
2010 nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
2011 else if (schema_include_oids.head != NULL)
2012 nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
2014 nsinfo->dobj.catId.oid) ?
2016 else if (fout->remoteVersion >= 90600 &&
2017 strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
2018 {
2019 /*
2020 * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
2021 * they are interesting (and not the original ACLs which were set at
2022 * initdb time, see pg_init_privs).
2023 */
2024 nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
2025 }
2026 else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
2027 strcmp(nsinfo->dobj.name, "information_schema") == 0)
2028 {
2029 /* Other system schemas don't get dumped */
2030 nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
2031 }
2032 else if (strcmp(nsinfo->dobj.name, "public") == 0)
2033 {
2034 /*
2035 * The public schema is a strange beast that sits in a sort of
2036 * no-mans-land between being a system object and a user object.
2037 * CREATE SCHEMA would fail, so its DUMP_COMPONENT_DEFINITION is just
2038 * a comment and an indication of ownership. If the owner is the
2039 * default, omit that superfluous DUMP_COMPONENT_DEFINITION. Before
2040 * v15, the default owner was BOOTSTRAP_SUPERUSERID.
2041 */
2042 nsinfo->create = false;
2043 nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
2044 if (nsinfo->nspowner == ROLE_PG_DATABASE_OWNER)
2045 nsinfo->dobj.dump &= ~DUMP_COMPONENT_DEFINITION;
2046 nsinfo->dobj.dump_contains = DUMP_COMPONENT_ALL;
2047
2048 /*
2049 * Also, make like it has a comment even if it doesn't; this is so
2050 * that we'll emit a command to drop the comment, if appropriate.
2051 * (Without this, we'd not call dumpCommentExtended for it.)
2052 */
2053 nsinfo->dobj.components |= DUMP_COMPONENT_COMMENT;
2054 }
2055 else
2056 nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
2057
2058 /*
2059 * In any case, a namespace can be excluded by an exclusion switch
2060 */
2061 if (nsinfo->dobj.dump_contains &&
2063 nsinfo->dobj.catId.oid))
2064 nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
2065
2066 /*
2067 * If the schema belongs to an extension, allow extension membership to
2068 * override the dump decision for the schema itself. However, this does
2069 * not change dump_contains, so this won't change what we do with objects
2070 * within the schema. (If they belong to the extension, they'll get
2071 * suppressed by it, otherwise not.)
2072 */
2074}
2075
2076/*
2077 * selectDumpableTable: policy-setting subroutine
2078 * Mark a table as to be dumped or not
2079 */
2080static void
2082{
2084 return; /* extension membership overrides all else */
2085
2086 /*
2087 * If specific tables are being dumped, dump just those tables; else, dump
2088 * according to the parent namespace's dump flag.
2089 */
2092 tbinfo->dobj.catId.oid) ?
2094 else
2095 tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
2096
2097 /*
2098 * In any case, a table can be excluded by an exclusion switch
2099 */
2100 if (tbinfo->dobj.dump &&
2102 tbinfo->dobj.catId.oid))
2103 tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
2104}
2105
2106/*
2107 * selectDumpableType: policy-setting subroutine
2108 * Mark a type as to be dumped or not
2109 *
2110 * If it's a table's rowtype or an autogenerated array type, we also apply a
2111 * special type code to facilitate sorting into the desired order. (We don't
2112 * want to consider those to be ordinary types because that would bring tables
2113 * up into the datatype part of the dump order.) We still set the object's
2114 * dump flag; that's not going to cause the dummy type to be dumped, but we
2115 * need it so that casts involving such types will be dumped correctly -- see
2116 * dumpCast. This means the flag should be set the same as for the underlying
2117 * object (the table or base type).
2118 */
2119static void
2121{
2122 /* skip complex types, except for standalone composite types */
2123 if (OidIsValid(tyinfo->typrelid) &&
2124 tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
2125 {
2126 TableInfo *tytable = findTableByOid(tyinfo->typrelid);
2127
2128 tyinfo->dobj.objType = DO_DUMMY_TYPE;
2129 if (tytable != NULL)
2130 tyinfo->dobj.dump = tytable->dobj.dump;
2131 else
2132 tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
2133 return;
2134 }
2135
2136 /* skip auto-generated array and multirange types */
2137 if (tyinfo->isArray || tyinfo->isMultirange)
2138 {
2139 tyinfo->dobj.objType = DO_DUMMY_TYPE;
2140
2141 /*
2142 * Fall through to set the dump flag; we assume that the subsequent
2143 * rules will do the same thing as they would for the array's base
2144 * type or multirange's range type. (We cannot reliably look up the
2145 * base type here, since getTypes may not have processed it yet.)
2146 */
2147 }
2148
2150 return; /* extension membership overrides all else */
2151
2152 /* Dump based on if the contents of the namespace are being dumped */
2153 tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
2154}
2155
2156/*
2157 * selectDumpableDefaultACL: policy-setting subroutine
2158 * Mark a default ACL as to be dumped or not
2159 *
2160 * For per-schema default ACLs, dump if the schema is to be dumped.
2161 * Otherwise dump if we are dumping "everything". Note that dumpSchema
2162 * and aclsSkip are checked separately.
2163 */
2164static void
2166{
2167 /* Default ACLs can't be extension members */
2168
2169 if (dinfo->dobj.namespace)
2170 /* default ACLs are considered part of the namespace */
2171 dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
2172 else
2173 dinfo->dobj.dump = dopt->include_everything ?
2175}
2176
2177/*
2178 * selectDumpableCast: policy-setting subroutine
2179 * Mark a cast as to be dumped or not
2180 *
2181 * Casts do not belong to any particular namespace (since they haven't got
2182 * names), nor do they have identifiable owners. To distinguish user-defined
2183 * casts from built-in ones, we must resort to checking whether the cast's
2184 * OID is in the range reserved for initdb.
2185 */
2186static void
2188{
2189 if (checkExtensionMembership(&cast->dobj, fout))
2190 return; /* extension membership overrides all else */
2191
2192 /*
2193 * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
2194 * support ACLs currently.
2195 */
2196 if (cast->dobj.catId.oid <= g_last_builtin_oid)
2197 cast->dobj.dump = DUMP_COMPONENT_NONE;
2198 else
2199 cast->dobj.dump = fout->dopt->include_everything ?
2201}
2202
2203/*
2204 * selectDumpableProcLang: policy-setting subroutine
2205 * Mark a procedural language as to be dumped or not
2206 *
2207 * Procedural languages do not belong to any particular namespace. To
2208 * identify built-in languages, we must resort to checking whether the
2209 * language's OID is in the range reserved for initdb.
2210 */
2211static void
2213{
2214 if (checkExtensionMembership(&plang->dobj, fout))
2215 return; /* extension membership overrides all else */
2216
2217 /*
2218 * Only include procedural languages when we are dumping everything.
2219 *
2220 * For from-initdb procedural languages, only include ACLs, as we do for
2221 * the pg_catalog namespace. We need this because procedural languages do
2222 * not live in any namespace.
2223 */
2225 plang->dobj.dump = DUMP_COMPONENT_NONE;
2226 else
2227 {
2228 if (plang->dobj.catId.oid <= g_last_builtin_oid)
2229 plang->dobj.dump = fout->remoteVersion < 90600 ?
2231 else
2232 plang->dobj.dump = DUMP_COMPONENT_ALL;
2233 }
2234}
2235
2236/*
2237 * selectDumpableAccessMethod: policy-setting subroutine
2238 * Mark an access method as to be dumped or not
2239 *
2240 * Access methods do not belong to any particular namespace. To identify
2241 * built-in access methods, we must resort to checking whether the
2242 * method's OID is in the range reserved for initdb.
2243 */
2244static void
2246{
2247 /* see getAccessMethods() comment about v9.6. */
2248 if (fout->remoteVersion < 90600)
2249 {
2250 method->dobj.dump = DUMP_COMPONENT_NONE;
2251 return;
2252 }
2253
2254 if (checkExtensionMembership(&method->dobj, fout))
2255 return; /* extension membership overrides all else */
2256
2257 /*
2258 * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
2259 * they do not support ACLs currently.
2260 */
2261 if (method->dobj.catId.oid <= g_last_builtin_oid)
2262 method->dobj.dump = DUMP_COMPONENT_NONE;
2263 else
2264 method->dobj.dump = fout->dopt->include_everything ?
2266}
2267
2268/*
2269 * selectDumpableExtension: policy-setting subroutine
2270 * Mark an extension as to be dumped or not
2271 *
2272 * Built-in extensions should be skipped except for checking ACLs, since we
2273 * assume those will already be installed in the target database. We identify
2274 * such extensions by their having OIDs in the range reserved for initdb.
2275 * We dump all user-added extensions by default. No extensions are dumped
2276 * if include_everything is false (i.e., a --schema or --table switch was
2277 * given), except if --extension specifies a list of extensions to dump.
2278 */
2279static void
2281{
2282 /*
2283 * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
2284 * change permissions on their member objects, if they wish to, and have
2285 * those changes preserved.
2286 */
2287 if (extinfo->dobj.catId.oid <= g_last_builtin_oid)
2288 extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
2289 else
2290 {
2291 /* check if there is a list of extensions to dump */
2293 extinfo->dobj.dump = extinfo->dobj.dump_contains =
2295 extinfo->dobj.catId.oid) ?
2297 else
2298 extinfo->dobj.dump = extinfo->dobj.dump_contains =
2299 dopt->include_everything ?
2301
2302 /* check that the extension is not explicitly excluded */
2303 if (extinfo->dobj.dump &&
2305 extinfo->dobj.catId.oid))
2306 extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_NONE;
2307 }
2308}
2309
2310/*
2311 * selectDumpablePublicationObject: policy-setting subroutine
2312 * Mark a publication object as to be dumped or not
2313 *
2314 * A publication can have schemas and tables which have schemas, but those are
2315 * ignored in decision making, because publications are only dumped when we are
2316 * dumping everything.
2317 */
2318static void
2320{
2321 if (checkExtensionMembership(dobj, fout))
2322 return; /* extension membership overrides all else */
2323
2324 dobj->dump = fout->dopt->include_everything ?
2326}
2327
2328/*
2329 * selectDumpableStatisticsObject: policy-setting subroutine
2330 * Mark an extended statistics object as to be dumped or not
2331 *
2332 * We dump an extended statistics object if the schema it's in and the table
2333 * it's for are being dumped. (This'll need more thought if statistics
2334 * objects ever support cross-table stats.)
2335 */
2336static void
2338{
2339 if (checkExtensionMembership(&sobj->dobj, fout))
2340 return; /* extension membership overrides all else */
2341
2342 sobj->dobj.dump = sobj->dobj.namespace->dobj.dump_contains;
2343 if (sobj->stattable == NULL ||
2344 !(sobj->stattable->dobj.dump & DUMP_COMPONENT_DEFINITION))
2345 sobj->dobj.dump = DUMP_COMPONENT_NONE;
2346}
2347
2348/*
2349 * selectDumpableObject: policy-setting subroutine
2350 * Mark a generic dumpable object as to be dumped or not
2351 *
2352 * Use this only for object types without a special-case routine above.
2353 */
2354static void
2356{
2357 if (checkExtensionMembership(dobj, fout))
2358 return; /* extension membership overrides all else */
2359
2360 /*
2361 * Default policy is to dump if parent namespace is dumpable, or for
2362 * non-namespace-associated items, dump if we're dumping "everything".
2363 */
2364 if (dobj->namespace)
2365 dobj->dump = dobj->namespace->dobj.dump_contains;
2366 else
2367 dobj->dump = fout->dopt->include_everything ?
2369}
2370
2371/*
2372 * Dump a table's contents for loading using the COPY command
2373 * - this routine is called by the Archiver when it wants the table
2374 * to be dumped.
2375 */
2376static int
2378{
2379 const TableDataInfo *tdinfo = dcontext;
2380 const TableInfo *tbinfo = tdinfo->tdtable;
2381 const char *classname = tbinfo->dobj.name;
2383
2384 /*
2385 * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
2386 * which uses it already.
2387 */
2390 PGresult *res;
2391 int ret;
2392 char *copybuf;
2393 const char *column_list;
2394
2395 pg_log_info("dumping contents of table \"%s.%s\"",
2396 tbinfo->dobj.namespace->dobj.name, classname);
2397
2398 /*
2399 * Specify the column list explicitly so that we have no possibility of
2400 * retrieving data in the wrong column order. (The default column
2401 * ordering of COPY will not be what we want in certain corner cases
2402 * involving ADD COLUMN and inheritance.)
2403 */
2405
2406 /*
2407 * Use COPY (SELECT ...) TO when dumping a foreign table's data, when a
2408 * filter condition was specified, and when in binary upgrade mode and
2409 * dumping an old pg_largeobject_metadata defined WITH OIDS. For other
2410 * cases a simple COPY suffices.
2411 */
2412 if (tdinfo->filtercond || tbinfo->relkind == RELKIND_FOREIGN_TABLE ||
2413 (fout->dopt->binary_upgrade && fout->remoteVersion < 120000 &&
2414 tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId))
2415 {
2416 /* Temporary allows to access to foreign tables to dump data */
2417 if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2419
2420 appendPQExpBufferStr(q, "COPY (SELECT ");
2421 /* klugery to get rid of parens in column list */
2422 if (strlen(column_list) > 2)
2423 {
2425 q->data[q->len - 1] = ' ';
2426 }
2427 else
2428 appendPQExpBufferStr(q, "* ");
2429
2430 appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
2432 tdinfo->filtercond ? tdinfo->filtercond : "");
2433 }
2434 else
2435 {
2436 appendPQExpBuffer(q, "COPY %s %s TO stdout;",
2438 column_list);
2439 }
2441 PQclear(res);
2443
2444 for (;;)
2445 {
2446 ret = PQgetCopyData(conn, &copybuf, 0);
2447
2448 if (ret < 0)
2449 break; /* done or error */
2450
2451 if (copybuf)
2452 {
2453 WriteData(fout, copybuf, ret);
2455 }
2456
2457 /* ----------
2458 * THROTTLE:
2459 *
2460 * There was considerable discussion in late July, 2000 regarding
2461 * slowing down pg_dump when backing up large tables. Users with both
2462 * slow & fast (multi-processor) machines experienced performance
2463 * degradation when doing a backup.
2464 *
2465 * Initial attempts based on sleeping for a number of ms for each ms
2466 * of work were deemed too complex, then a simple 'sleep in each loop'
2467 * implementation was suggested. The latter failed because the loop
2468 * was too tight. Finally, the following was implemented:
2469 *
2470 * If throttle is non-zero, then
2471 * See how long since the last sleep.
2472 * Work out how long to sleep (based on ratio).
2473 * If sleep is more than 100ms, then
2474 * sleep
2475 * reset timer
2476 * EndIf
2477 * EndIf
2478 *
2479 * where the throttle value was the number of ms to sleep per ms of
2480 * work. The calculation was done in each loop.
2481 *
2482 * Most of the hard work is done in the backend, and this solution
2483 * still did not work particularly well: on slow machines, the ratio
2484 * was 50:1, and on medium paced machines, 1:1, and on fast
2485 * multi-processor machines, it had little or no effect, for reasons
2486 * that were unclear.
2487 *
2488 * Further discussion ensued, and the proposal was dropped.
2489 *
2490 * For those people who want this feature, it can be implemented using
2491 * gettimeofday in each loop, calculating the time since last sleep,
2492 * multiplying that by the sleep ratio, then if the result is more
2493 * than a preset 'minimum sleep time' (say 100ms), call the 'select'
2494 * function to sleep for a subsecond period ie.
2495 *
2496 * select(0, NULL, NULL, NULL, &tvi);
2497 *
2498 * This will return after the interval specified in the structure tvi.
2499 * Finally, call gettimeofday again to save the 'last sleep time'.
2500 * ----------
2501 */
2502 }
2503 archprintf(fout, "\\.\n\n\n");
2504
2505 if (ret == -2)
2506 {
2507 /* copy data transfer failed */
2508 pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
2509 pg_log_error_detail("Error message from server: %s", PQerrorMessage(conn));
2510 pg_log_error_detail("Command was: %s", q->data);
2511 exit_nicely(1);
2512 }
2513
2514 /* Check command status and return to normal libpq state */
2515 res = PQgetResult(conn);
2516 if (PQresultStatus(res) != PGRES_COMMAND_OK)
2517 {
2518 pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
2519 pg_log_error_detail("Error message from server: %s", PQerrorMessage(conn));
2520 pg_log_error_detail("Command was: %s", q->data);
2521 exit_nicely(1);
2522 }
2523 PQclear(res);
2524
2525 /* Do this to ensure we've pumped libpq back to idle state */
2526 if (PQgetResult(conn) != NULL)
2527 pg_log_warning("unexpected extra results during COPY of table \"%s\"",
2528 classname);
2529
2531
2532 /* Revert back the setting */
2533 if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2534 set_restrict_relation_kind(fout, "view, foreign-table");
2535
2536 return 1;
2537}
2538
2539/*
2540 * Dump table data using INSERT commands.
2541 *
2542 * Caution: when we restore from an archive file direct to database, the
2543 * INSERT commands emitted by this function have to be parsed by
2544 * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
2545 * E'' strings, or dollar-quoted strings. So don't emit anything like that.
2546 */
2547static int
2549{
2550 const TableDataInfo *tdinfo = dcontext;
2551 const TableInfo *tbinfo = tdinfo->tdtable;
2552 DumpOptions *dopt = fout->dopt;
2555 char *attgenerated;
2556 PGresult *res;
2557 int nfields,
2558 i;
2559 int rows_per_statement = dopt->dump_inserts;
2560 int rows_this_statement = 0;
2561
2562 /* Temporary allows to access to foreign tables to dump data */
2563 if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2565
2566 /*
2567 * If we're going to emit INSERTs with column names, the most efficient
2568 * way to deal with generated columns is to exclude them entirely. For
2569 * INSERTs without column names, we have to emit DEFAULT rather than the
2570 * actual column value --- but we can save a few cycles by fetching nulls
2571 * rather than the uninteresting-to-us value.
2572 */
2573 attgenerated = pg_malloc_array(char, tbinfo->numatts);
2574 appendPQExpBufferStr(q, "DECLARE _pg_dump_cursor CURSOR FOR SELECT ");
2575 nfields = 0;
2576 for (i = 0; i < tbinfo->numatts; i++)
2577 {
2578 if (tbinfo->attisdropped[i])
2579 continue;
2580 if (tbinfo->attgenerated[i] && dopt->column_inserts)
2581 continue;
2582 if (nfields > 0)
2583 appendPQExpBufferStr(q, ", ");
2584 if (tbinfo->attgenerated[i])
2585 appendPQExpBufferStr(q, "NULL");
2586 else
2587 appendPQExpBufferStr(q, fmtId(tbinfo->attnames[i]));
2588 attgenerated[nfields] = tbinfo->attgenerated[i];
2589 nfields++;
2590 }
2591 /* Servers before 9.4 will complain about zero-column SELECT */
2592 if (nfields == 0)
2593 appendPQExpBufferStr(q, "NULL");
2594 appendPQExpBuffer(q, " FROM ONLY %s",
2596 if (tdinfo->filtercond)
2597 appendPQExpBuffer(q, " %s", tdinfo->filtercond);
2598
2600
2601 while (1)
2602 {
2603 res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
2605
2606 /* cross-check field count, allowing for dummy NULL if any */
2607 if (nfields != PQnfields(res) &&
2608 !(nfields == 0 && PQnfields(res) == 1))
2609 pg_fatal("wrong number of fields retrieved from table \"%s\"",
2610 tbinfo->dobj.name);
2611
2612 /*
2613 * First time through, we build as much of the INSERT statement as
2614 * possible in "insertStmt", which we can then just print for each
2615 * statement. If the table happens to have zero dumpable columns then
2616 * this will be a complete statement, otherwise it will end in
2617 * "VALUES" and be ready to have the row's column values printed.
2618 */
2619 if (insertStmt == NULL)
2620 {
2621 const TableInfo *targettab;
2622
2624
2625 /*
2626 * When load-via-partition-root is set or forced, get the root
2627 * table name for the partition table, so that we can reload data
2628 * through the root table.
2629 */
2630 if (tbinfo->ispartition &&
2631 (dopt->load_via_partition_root ||
2634 else
2635 targettab = tbinfo;
2636
2637 appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
2639
2640 /* corner case for zero-column table */
2641 if (nfields == 0)
2642 {
2643 appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
2644 }
2645 else
2646 {
2647 /* append the list of column names if required */
2648 if (dopt->column_inserts)
2649 {
2651 for (int field = 0; field < nfields; field++)
2652 {
2653 if (field > 0)
2656 fmtId(PQfname(res, field)));
2657 }
2659 }
2660
2661 if (tbinfo->needs_override)
2662 appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
2663
2665 }
2666 }
2667
2668 for (int tuple = 0; tuple < PQntuples(res); tuple++)
2669 {
2670 /* Write the INSERT if not in the middle of a multi-row INSERT. */
2671 if (rows_this_statement == 0)
2672 archputs(insertStmt->data, fout);
2673
2674 /*
2675 * If it is zero-column table then we've already written the
2676 * complete statement, which will mean we've disobeyed
2677 * --rows-per-insert when it's set greater than 1. We do support
2678 * a way to make this multi-row with: SELECT UNION ALL SELECT
2679 * UNION ALL ... but that's non-standard so we should avoid it
2680 * given that using INSERTs is mostly only ever needed for
2681 * cross-database exports.
2682 */
2683 if (nfields == 0)
2684 continue;
2685
2686 /* Emit a row heading */
2687 if (rows_per_statement == 1)
2688 archputs(" (", fout);
2689 else if (rows_this_statement > 0)
2690 archputs(",\n\t(", fout);
2691 else
2692 archputs("\n\t(", fout);
2693
2694 for (int field = 0; field < nfields; field++)
2695 {
2696 if (field > 0)
2697 archputs(", ", fout);
2698 if (attgenerated[field])
2699 {
2700 archputs("DEFAULT", fout);
2701 continue;
2702 }
2703 if (PQgetisnull(res, tuple, field))
2704 {
2705 archputs("NULL", fout);
2706 continue;
2707 }
2708
2709 /* XXX This code is partially duplicated in ruleutils.c */
2710 switch (PQftype(res, field))
2711 {
2712 case INT2OID:
2713 case INT4OID:
2714 case INT8OID:
2715 case OIDOID:
2716 case FLOAT4OID:
2717 case FLOAT8OID:
2718 case NUMERICOID:
2719 {
2720 /*
2721 * These types are printed without quotes unless
2722 * they contain values that aren't accepted by the
2723 * scanner unquoted (e.g., 'NaN'). Note that
2724 * strtod() and friends might accept NaN, so we
2725 * can't use that to test.
2726 *
2727 * In reality we only need to defend against
2728 * infinity and NaN, so we need not get too crazy
2729 * about pattern matching here.
2730 */
2731 const char *s = PQgetvalue(res, tuple, field);
2732
2733 if (strspn(s, "0123456789 +-eE.") == strlen(s))
2734 archputs(s, fout);
2735 else
2736 archprintf(fout, "'%s'", s);
2737 }
2738 break;
2739
2740 case BITOID:
2741 case VARBITOID:
2742 archprintf(fout, "B'%s'",
2743 PQgetvalue(res, tuple, field));
2744 break;
2745
2746 case BOOLOID:
2747 if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2748 archputs("true", fout);
2749 else
2750 archputs("false", fout);
2751 break;
2752
2753 default:
2754 /* All other types are printed as string literals. */
2757 PQgetvalue(res, tuple, field),
2758 fout);
2759 archputs(q->data, fout);
2760 break;
2761 }
2762 }
2763
2764 /* Terminate the row ... */
2765 archputs(")", fout);
2766
2767 /* ... and the statement, if the target no. of rows is reached */
2769 {
2770 if (dopt->do_nothing)
2771 archputs(" ON CONFLICT DO NOTHING;\n", fout);
2772 else
2773 archputs(";\n", fout);
2774 /* Reset the row counter */
2776 }
2777 }
2778
2779 if (PQntuples(res) <= 0)
2780 {
2781 PQclear(res);
2782 break;
2783 }
2784 PQclear(res);
2785 }
2786
2787 /* Terminate any statements that didn't make the row count. */
2788 if (rows_this_statement > 0)
2789 {
2790 if (dopt->do_nothing)
2791 archputs(" ON CONFLICT DO NOTHING;\n", fout);
2792 else
2793 archputs(";\n", fout);
2794 }
2795
2796 archputs("\n\n", fout);
2797
2798 ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2799
2801 if (insertStmt != NULL)
2803 free(attgenerated);
2804
2805 /* Revert back the setting */
2806 if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2807 set_restrict_relation_kind(fout, "view, foreign-table");
2808
2809 return 1;
2810}
2811
2812/*
2813 * getRootTableInfo:
2814 * get the root TableInfo for the given partition table.
2815 */
2816static TableInfo *
2818{
2820
2821 Assert(tbinfo->ispartition);
2822 Assert(tbinfo->numParents == 1);
2823
2824 parentTbinfo = tbinfo->parents[0];
2825 while (parentTbinfo->ispartition)
2826 {
2827 Assert(parentTbinfo->numParents == 1);
2828 parentTbinfo = parentTbinfo->parents[0];
2829 }
2830
2831 return parentTbinfo;
2832}
2833
2834/*
2835 * forcePartitionRootLoad
2836 * Check if we must force load_via_partition_root for this partition.
2837 *
2838 * This is required if any level of ancestral partitioned table has an
2839 * unsafe partitioning scheme.
2840 */
2841static bool
2843{
2845
2846 Assert(tbinfo->ispartition);
2847 Assert(tbinfo->numParents == 1);
2848
2849 parentTbinfo = tbinfo->parents[0];
2850 if (parentTbinfo->unsafe_partitions)
2851 return true;
2852 while (parentTbinfo->ispartition)
2853 {
2854 Assert(parentTbinfo->numParents == 1);
2855 parentTbinfo = parentTbinfo->parents[0];
2856 if (parentTbinfo->unsafe_partitions)
2857 return true;
2858 }
2859
2860 return false;
2861}
2862
2863/*
2864 * dumpTableData -
2865 * dump the contents of a single table
2866 *
2867 * Actually, this just makes an ArchiveEntry for the table contents.
2868 */
2869static void
2871{
2872 DumpOptions *dopt = fout->dopt;
2873 const TableInfo *tbinfo = tdinfo->tdtable;
2876 DataDumperPtr dumpFn;
2877 char *tdDefn = NULL;
2878 char *copyStmt;
2879 const char *copyFrom;
2880
2881 /* We had better have loaded per-column details about this table */
2882 Assert(tbinfo->interesting);
2883
2884 /*
2885 * When load-via-partition-root is set or forced, get the root table name
2886 * for the partition table, so that we can reload data through the root
2887 * table. Then construct a comment to be inserted into the TOC entry's
2888 * defn field, so that such cases can be identified reliably.
2889 */
2890 if (tbinfo->ispartition &&
2891 (dopt->load_via_partition_root ||
2893 {
2894 const TableInfo *parentTbinfo;
2895 char *sanitized;
2896
2900 printfPQExpBuffer(copyBuf, "-- load via partition root %s",
2901 sanitized);
2902 free(sanitized);
2903 tdDefn = pg_strdup(copyBuf->data);
2904 }
2905 else
2907
2908 if (dopt->dump_inserts == 0)
2909 {
2910 /* Dump/restore using COPY */
2911 dumpFn = dumpTableData_copy;
2912 /* must use 2 steps here 'cause fmtId is nonreentrant */
2913 printfPQExpBuffer(copyBuf, "COPY %s ",
2914 copyFrom);
2915 appendPQExpBuffer(copyBuf, "%s FROM stdin;\n",
2917 copyStmt = copyBuf->data;
2918 }
2919 else
2920 {
2921 /* Restore using INSERT */
2922 dumpFn = dumpTableData_insert;
2923 copyStmt = NULL;
2924 }
2925
2926 /*
2927 * Note: although the TableDataInfo is a full DumpableObject, we treat its
2928 * dependency on its table as "special" and pass it to ArchiveEntry now.
2929 * See comments for BuildArchiveDependencies.
2930 */
2931 if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2932 {
2933 TocEntry *te;
2934
2935 te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2936 ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2937 .namespace = tbinfo->dobj.namespace->dobj.name,
2938 .owner = tbinfo->rolname,
2939 .description = "TABLE DATA",
2940 .section = SECTION_DATA,
2941 .createStmt = tdDefn,
2942 .copyStmt = copyStmt,
2943 .deps = &(tbinfo->dobj.dumpId),
2944 .nDeps = 1,
2945 .dumpFn = dumpFn,
2946 .dumpArg = tdinfo));
2947
2948 /*
2949 * Set the TocEntry's dataLength in case we are doing a parallel dump
2950 * and want to order dump jobs by table size. We choose to measure
2951 * dataLength in table pages (including TOAST pages) during dump, so
2952 * no scaling is needed.
2953 *
2954 * However, relpages is declared as "integer" in pg_class, and hence
2955 * also in TableInfo, but it's really BlockNumber a/k/a unsigned int.
2956 * Cast so that we get the right interpretation of table sizes
2957 * exceeding INT_MAX pages.
2958 */
2959 te->dataLength = (BlockNumber) tbinfo->relpages;
2960 te->dataLength += (BlockNumber) tbinfo->toastpages;
2961
2962 /*
2963 * If pgoff_t is only 32 bits wide, the above refinement is useless,
2964 * and instead we'd better worry about integer overflow. Clamp to
2965 * INT_MAX if the correct result exceeds that.
2966 */
2967 if (sizeof(te->dataLength) == 4 &&
2968 (tbinfo->relpages < 0 || tbinfo->toastpages < 0 ||
2969 te->dataLength < 0))
2970 te->dataLength = INT_MAX;
2971 }
2972
2975}
2976
2977/*
2978 * refreshMatViewData -
2979 * load or refresh the contents of a single materialized view
2980 *
2981 * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2982 * statement.
2983 */
2984static void
2986{
2987 TableInfo *tbinfo = tdinfo->tdtable;
2988 PQExpBuffer q;
2989
2990 /* If the materialized view is not flagged as populated, skip this. */
2991 if (!tbinfo->relispopulated)
2992 return;
2993
2994 q = createPQExpBuffer();
2995
2996 appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2998
2999 if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
3001 tdinfo->dobj.catId, /* catalog ID */
3002 tdinfo->dobj.dumpId, /* dump ID */
3003 ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
3004 .namespace = tbinfo->dobj.namespace->dobj.name,
3005 .owner = tbinfo->rolname,
3006 .description = "MATERIALIZED VIEW DATA",
3007 .section = SECTION_POST_DATA,
3008 .createStmt = q->data,
3009 .deps = tdinfo->dobj.dependencies,
3010 .nDeps = tdinfo->dobj.nDeps));
3011
3013}
3014
3015/*
3016 * getTableData -
3017 * set up dumpable objects representing the contents of tables
3018 */
3019static void
3020getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind)
3021{
3022 int i;
3023
3024 for (i = 0; i < numTables; i++)
3025 {
3026 if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
3027 (!relkind || tblinfo[i].relkind == relkind))
3028 makeTableDataInfo(dopt, &(tblinfo[i]));
3029 }
3030}
3031
3032/*
3033 * Make a dumpable object for the data of this specific table
3034 *
3035 * Note: we make a TableDataInfo if and only if we are going to dump the
3036 * table data; the "dump" field in such objects isn't very interesting.
3037 */
3038static void
3040{
3042
3043 /*
3044 * Nothing to do if we already decided to dump the table. This will
3045 * happen for "config" tables.
3046 */
3047 if (tbinfo->dataObj != NULL)
3048 return;
3049
3050 /* Skip VIEWs (no data to dump) */
3051 if (tbinfo->relkind == RELKIND_VIEW)
3052 return;
3053 /* Skip FOREIGN TABLEs (no data to dump) unless requested explicitly */
3054 if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
3057 tbinfo->foreign_server)))
3058 return;
3059 /* Skip partitioned tables (data in partitions) */
3060 if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
3061 return;
3062
3063 /* Don't dump data in unlogged tables, if so requested */
3064 if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
3066 return;
3067
3068 /* Check that the data is not explicitly excluded */
3070 tbinfo->dobj.catId.oid))
3071 return;
3072
3073 /* OK, let's dump it */
3075
3076 if (tbinfo->relkind == RELKIND_MATVIEW)
3077 tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
3078 else if (tbinfo->relkind == RELKIND_SEQUENCE)
3079 tdinfo->dobj.objType = DO_SEQUENCE_SET;
3080 else
3081 tdinfo->dobj.objType = DO_TABLE_DATA;
3082
3083 /*
3084 * Note: use tableoid 0 so that this object won't be mistaken for
3085 * something that pg_depend entries apply to.
3086 */
3087 tdinfo->dobj.catId.tableoid = 0;
3088 tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3089 AssignDumpId(&tdinfo->dobj);
3090 tdinfo->dobj.name = tbinfo->dobj.name;
3091 tdinfo->dobj.namespace = tbinfo->dobj.namespace;
3092 tdinfo->tdtable = tbinfo;
3093 tdinfo->filtercond = NULL; /* might get set later */
3094 addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
3095
3096 /* A TableDataInfo contains data, of course */
3097 tdinfo->dobj.components |= DUMP_COMPONENT_DATA;
3098
3099 tbinfo->dataObj = tdinfo;
3100
3101 /*
3102 * Materialized view statistics must be restored after the data, because
3103 * REFRESH MATERIALIZED VIEW replaces the storage and resets the stats.
3104 *
3105 * The dependency is added here because the statistics objects are created
3106 * first.
3107 */
3108 if (tbinfo->relkind == RELKIND_MATVIEW && tbinfo->stats != NULL)
3109 {
3110 tbinfo->stats->section = SECTION_POST_DATA;
3111 addObjectDependency(&tbinfo->stats->dobj, tdinfo->dobj.dumpId);
3112 }
3113
3114 /* Make sure that we'll collect per-column info for this table. */
3115 tbinfo->interesting = true;
3116}
3117
3118/*
3119 * The refresh for a materialized view must be dependent on the refresh for
3120 * any materialized view that this one is dependent on.
3121 *
3122 * This must be called after all the objects are created, but before they are
3123 * sorted.
3124 */
3125static void
3127{
3128 PQExpBuffer query;
3129 PGresult *res;
3130 int ntups,
3131 i;
3132 int i_classid,
3133 i_objid,
3134 i_refobjid;
3135
3136 /* No Mat Views before 9.3. */
3137 if (fout->remoteVersion < 90300)
3138 return;
3139
3140 query = createPQExpBuffer();
3141
3142 appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
3143 "( "
3144 "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
3145 "FROM pg_depend d1 "
3146 "JOIN pg_class c1 ON c1.oid = d1.objid "
3147 "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
3148 " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
3149 "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
3150 "AND d2.objid = r1.oid "
3151 "AND d2.refobjid <> d1.objid "
3152 "JOIN pg_class c2 ON c2.oid = d2.refobjid "
3153 "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
3155 "WHERE d1.classid = 'pg_class'::regclass "
3156 "UNION "
3157 "SELECT w.objid, d3.refobjid, c3.relkind "
3158 "FROM w "
3159 "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
3160 "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
3161 "AND d3.objid = r3.oid "
3162 "AND d3.refobjid <> w.refobjid "
3163 "JOIN pg_class c3 ON c3.oid = d3.refobjid "
3164 "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
3166 ") "
3167 "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
3168 "FROM w "
3169 "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
3170
3171 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3172
3173 ntups = PQntuples(res);
3174
3175 i_classid = PQfnumber(res, "classid");
3176 i_objid = PQfnumber(res, "objid");
3177 i_refobjid = PQfnumber(res, "refobjid");
3178
3179 for (i = 0; i < ntups; i++)
3180 {
3181 CatalogId objId;
3183 DumpableObject *dobj;
3187
3188 objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
3189 objId.oid = atooid(PQgetvalue(res, i, i_objid));
3190 refobjId.tableoid = objId.tableoid;
3191 refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
3192
3193 dobj = findObjectByCatalogId(objId);
3194 if (dobj == NULL)
3195 continue;
3196
3197 Assert(dobj->objType == DO_TABLE);
3198 tbinfo = (TableInfo *) dobj;
3199 Assert(tbinfo->relkind == RELKIND_MATVIEW);
3200 dobj = (DumpableObject *) tbinfo->dataObj;
3201 if (dobj == NULL)
3202 continue;
3204
3206 if (refdobj == NULL)
3207 continue;
3208
3209 Assert(refdobj->objType == DO_TABLE);
3211 Assert(reftbinfo->relkind == RELKIND_MATVIEW);
3212 refdobj = (DumpableObject *) reftbinfo->dataObj;
3213 if (refdobj == NULL)
3214 continue;
3215 Assert(refdobj->objType == DO_REFRESH_MATVIEW);
3216
3218
3219 if (!reftbinfo->relispopulated)
3220 tbinfo->relispopulated = false;
3221 }
3222
3223 PQclear(res);
3224
3225 destroyPQExpBuffer(query);
3226}
3227
3228/*
3229 * getTableDataFKConstraints -
3230 * add dump-order dependencies reflecting foreign key constraints
3231 *
3232 * This code is executed only in a data-only dump --- in schema+data dumps
3233 * we handle foreign key issues by not creating the FK constraints until
3234 * after the data is loaded. In a data-only dump, however, we want to
3235 * order the table data objects in such a way that a table's referenced
3236 * tables are restored first. (In the presence of circular references or
3237 * self-references this may be impossible; we'll detect and complain about
3238 * that during the dependency sorting step.)
3239 */
3240static void
3242{
3244 int numObjs;
3245 int i;
3246
3247 /* Search through all the dumpable objects for FK constraints */
3249 for (i = 0; i < numObjs; i++)
3250 {
3251 if (dobjs[i]->objType == DO_FK_CONSTRAINT)
3252 {
3255
3256 /* Not interesting unless both tables are to be dumped */
3257 if (cinfo->contable == NULL ||
3258 cinfo->contable->dataObj == NULL)
3259 continue;
3260 ftable = findTableByOid(cinfo->confrelid);
3261 if (ftable == NULL ||
3262 ftable->dataObj == NULL)
3263 continue;
3264
3265 /*
3266 * Okay, make referencing table's TABLE_DATA object depend on the
3267 * referenced table's TABLE_DATA object.
3268 */
3269 addObjectDependency(&cinfo->contable->dataObj->dobj,
3270 ftable->dataObj->dobj.dumpId);
3271 }
3272 }
3273 free(dobjs);
3274}
3275
3276
3277/*
3278 * dumpDatabase:
3279 * dump the database definition
3280 */
3281static void
3283{
3284 DumpOptions *dopt = fout->dopt;
3290 PGresult *res;
3291 int i_tableoid,
3292 i_oid,
3293 i_datname,
3294 i_datdba,
3295 i_encoding,
3297 i_collate,
3298 i_ctype,
3302 i_minmxid,
3303 i_datacl,
3312 const char *datname,
3313 *dba,
3314 *encoding,
3316 *collate,
3317 *ctype,
3318 *locale,
3319 *icurules,
3321 *datconnlimit,
3322 *tablespace;
3323 uint32 frozenxid,
3324 minmxid;
3325 char *qdatname;
3326
3327 pg_log_info("saving database definition");
3328
3329 /*
3330 * Fetch the database-level properties for this database.
3331 */
3332 appendPQExpBufferStr(dbQry, "SELECT tableoid, oid, datname, "
3333 "datdba, "
3334 "pg_encoding_to_char(encoding) AS encoding, "
3335 "datcollate, datctype, datfrozenxid, "
3336 "datacl, acldefault('d', datdba) AS acldefault, "
3337 "datistemplate, datconnlimit, ");
3338 if (fout->remoteVersion >= 90300)
3339 appendPQExpBufferStr(dbQry, "datminmxid, ");
3340 else
3341 appendPQExpBufferStr(dbQry, "0 AS datminmxid, ");
3342 if (fout->remoteVersion >= 170000)
3343 appendPQExpBufferStr(dbQry, "datlocprovider, datlocale, datcollversion, ");
3344 else if (fout->remoteVersion >= 150000)
3345 appendPQExpBufferStr(dbQry, "datlocprovider, daticulocale AS datlocale, datcollversion, ");
3346 else
3347 appendPQExpBufferStr(dbQry, "'c' AS datlocprovider, NULL AS datlocale, NULL AS datcollversion, ");
3348 if (fout->remoteVersion >= 160000)
3349 appendPQExpBufferStr(dbQry, "daticurules, ");
3350 else
3351 appendPQExpBufferStr(dbQry, "NULL AS daticurules, ");
3353 "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
3354 "shobj_description(oid, 'pg_database') AS description "
3355 "FROM pg_database "
3356 "WHERE datname = current_database()");
3357
3359
3360 i_tableoid = PQfnumber(res, "tableoid");
3361 i_oid = PQfnumber(res, "oid");
3362 i_datname = PQfnumber(res, "datname");
3363 i_datdba = PQfnumber(res, "datdba");
3364 i_encoding = PQfnumber(res, "encoding");
3365 i_datlocprovider = PQfnumber(res, "datlocprovider");
3366 i_collate = PQfnumber(res, "datcollate");
3367 i_ctype = PQfnumber(res, "datctype");
3368 i_datlocale = PQfnumber(res, "datlocale");
3369 i_daticurules = PQfnumber(res, "daticurules");
3370 i_frozenxid = PQfnumber(res, "datfrozenxid");
3371 i_minmxid = PQfnumber(res, "datminmxid");
3372 i_datacl = PQfnumber(res, "datacl");
3373 i_acldefault = PQfnumber(res, "acldefault");
3374 i_datistemplate = PQfnumber(res, "datistemplate");
3375 i_datconnlimit = PQfnumber(res, "datconnlimit");
3376 i_datcollversion = PQfnumber(res, "datcollversion");
3377 i_tablespace = PQfnumber(res, "tablespace");
3378
3379 dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
3380 dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
3381 datname = PQgetvalue(res, 0, i_datname);
3382 dba = getRoleName(PQgetvalue(res, 0, i_datdba));
3383 encoding = PQgetvalue(res, 0, i_encoding);
3385 collate = PQgetvalue(res, 0, i_collate);
3386 ctype = PQgetvalue(res, 0, i_ctype);
3387 if (!PQgetisnull(res, 0, i_datlocale))
3388 locale = PQgetvalue(res, 0, i_datlocale);
3389 else
3390 locale = NULL;
3391 if (!PQgetisnull(res, 0, i_daticurules))
3393 else
3394 icurules = NULL;
3395 frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
3396 minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
3397 dbdacl.acl = PQgetvalue(res, 0, i_datacl);
3398 dbdacl.acldefault = PQgetvalue(res, 0, i_acldefault);
3402
3404
3405 /*
3406 * Prepare the CREATE DATABASE command. We must specify OID (if we want
3407 * to preserve that), as well as the encoding, locale, and tablespace
3408 * since those can't be altered later. Other DB properties are left to
3409 * the DATABASE PROPERTIES entry, so that they can be applied after
3410 * reconnecting to the target DB.
3411 *
3412 * For binary upgrade, we use the FILE_COPY strategy because testing has
3413 * shown it to be faster. When the server is in binary upgrade mode, it
3414 * will also skip the checkpoints this strategy ordinarily performs.
3415 */
3416 if (dopt->binary_upgrade)
3417 {
3419 "CREATE DATABASE %s WITH TEMPLATE = template0 "
3420 "OID = %u STRATEGY = FILE_COPY",
3421 qdatname, dbCatId.oid);
3422 }
3423 else
3424 {
3425 appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
3426 qdatname);
3427 }
3428 if (strlen(encoding) > 0)
3429 {
3430 appendPQExpBufferStr(creaQry, " ENCODING = ");
3432 }
3433
3434 appendPQExpBufferStr(creaQry, " LOCALE_PROVIDER = ");
3435 if (datlocprovider[0] == 'b')
3436 appendPQExpBufferStr(creaQry, "builtin");
3437 else if (datlocprovider[0] == 'c')
3439 else if (datlocprovider[0] == 'i')
3441 else
3442 pg_fatal("unrecognized locale provider: %s",
3444
3445 if (strlen(collate) > 0 && strcmp(collate, ctype) == 0)
3446 {
3447 appendPQExpBufferStr(creaQry, " LOCALE = ");
3449 }
3450 else
3451 {
3452 if (strlen(collate) > 0)
3453 {
3454 appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
3456 }
3457 if (strlen(ctype) > 0)
3458 {
3459 appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
3461 }
3462 }
3463 if (locale)
3464 {
3465 if (datlocprovider[0] == 'b')
3466 appendPQExpBufferStr(creaQry, " BUILTIN_LOCALE = ");
3467 else
3468 appendPQExpBufferStr(creaQry, " ICU_LOCALE = ");
3469
3471 }
3472
3473 if (icurules)
3474 {
3475 appendPQExpBufferStr(creaQry, " ICU_RULES = ");
3477 }
3478
3479 /*
3480 * For binary upgrade, carry over the collation version. For normal
3481 * dump/restore, omit the version, so that it is computed upon restore.
3482 */
3483 if (dopt->binary_upgrade)
3484 {
3485 if (!PQgetisnull(res, 0, i_datcollversion))
3486 {
3487 appendPQExpBufferStr(creaQry, " COLLATION_VERSION = ");
3490 fout);
3491 }
3492 }
3493
3494 /*
3495 * Note: looking at dopt->outputNoTablespaces here is completely the wrong
3496 * thing; the decision whether to specify a tablespace should be left till
3497 * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
3498 * label the DATABASE entry with the tablespace and let the normal
3499 * tablespace selection logic work ... but CREATE DATABASE doesn't pay
3500 * attention to default_tablespace, so that won't work.
3501 */
3502 if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
3503 !dopt->outputNoTablespaces)
3504 appendPQExpBuffer(creaQry, " TABLESPACE = %s",
3505 fmtId(tablespace));
3507
3508 appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
3509 qdatname);
3510
3512
3514 dbCatId, /* catalog ID */
3515 dbDumpId, /* dump ID */
3516 ARCHIVE_OPTS(.tag = datname,
3517 .owner = dba,
3518 .description = "DATABASE",
3519 .section = SECTION_PRE_DATA,
3520 .createStmt = creaQry->data,
3521 .dropStmt = delQry->data));
3522
3523 /* Compute correct tag for archive entry */
3524 appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
3525
3526 /* Dump DB comment if any */
3527 {
3528 /*
3529 * 8.2 and up keep comments on shared objects in a shared table, so we
3530 * cannot use the dumpComment() code used for other database objects.
3531 * Be careful that the ArchiveEntry parameters match that function.
3532 */
3533 char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
3534
3535 if (comment && *comment && !dopt->no_comments)
3536 {
3538
3539 /*
3540 * Generates warning when loaded into a differently-named
3541 * database.
3542 */
3543 appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
3546
3548 ARCHIVE_OPTS(.tag = labelq->data,
3549 .owner = dba,
3550 .description = "COMMENT",
3551 .section = SECTION_NONE,
3552 .createStmt = dbQry->data,
3553 .deps = &dbDumpId,
3554 .nDeps = 1));
3555 }
3556 }
3557
3558 /* Dump DB security label, if enabled */
3559 if (!dopt->no_security_labels)
3560 {
3561 PGresult *shres;
3563
3565
3566 buildShSecLabelQuery("pg_database", dbCatId.oid, seclabelQry);
3570 if (seclabelQry->len > 0)
3572 ARCHIVE_OPTS(.tag = labelq->data,
3573 .owner = dba,
3574 .description = "SECURITY LABEL",
3575 .section = SECTION_NONE,
3576 .createStmt = seclabelQry->data,
3577 .deps = &dbDumpId,
3578 .nDeps = 1));
3580 PQclear(shres);
3581 }
3582
3583 /*
3584 * Dump ACL if any. Note that we do not support initial privileges
3585 * (pg_init_privs) on databases.
3586 */
3587 dbdacl.privtype = 0;
3588 dbdacl.initprivs = NULL;
3589
3590 dumpACL(fout, dbDumpId, InvalidDumpId, "DATABASE",
3591 qdatname, NULL, NULL,
3592 NULL, dba, &dbdacl);
3593
3594 /*
3595 * Now construct a DATABASE PROPERTIES archive entry to restore any
3596 * non-default database-level properties. (The reason this must be
3597 * separate is that we cannot put any additional commands into the TOC
3598 * entry that has CREATE DATABASE. pg_restore would execute such a group
3599 * in an implicit transaction block, and the backend won't allow CREATE
3600 * DATABASE in that context.)
3601 */
3604
3605 if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
3606 appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
3608
3609 if (strcmp(datistemplate, "t") == 0)
3610 {
3611 appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
3612 qdatname);
3613
3614 /*
3615 * The backend won't accept DROP DATABASE on a template database. We
3616 * can deal with that by removing the template marking before the DROP
3617 * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
3618 * since no such command is currently supported, fake it with a direct
3619 * UPDATE on pg_database.
3620 */
3621 appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
3622 "SET datistemplate = false WHERE datname = ");
3625 }
3626
3627 /*
3628 * We do not restore pg_database.dathasloginevt because it is set
3629 * automatically on login event trigger creation.
3630 */
3631
3632 /* Add database-specific SET options */
3634
3635 /*
3636 * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
3637 * entry, too, for lack of a better place.
3638 */
3639 if (dopt->binary_upgrade)
3640 {
3641 appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
3642 appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
3643 "SET datfrozenxid = '%u', datminmxid = '%u'\n"
3644 "WHERE datname = ",
3645 frozenxid, minmxid);
3648 }
3649
3650 if (creaQry->len > 0)
3652 ARCHIVE_OPTS(.tag = datname,
3653 .owner = dba,
3654 .description = "DATABASE PROPERTIES",
3655 .section = SECTION_PRE_DATA,
3656 .createStmt = creaQry->data,
3657 .dropStmt = delQry->data,
3658 .deps = &dbDumpId));
3659
3660 /*
3661 * pg_largeobject comes from the old system intact, so set its
3662 * relfrozenxids, relminmxids and relfilenode.
3663 *
3664 * pg_largeobject_metadata also comes from the old system intact for
3665 * upgrades from v16 and newer, so set its relfrozenxids, relminmxids, and
3666 * relfilenode, too. pg_upgrade can't copy/link the files from older
3667 * versions because aclitem (needed by pg_largeobject_metadata.lomacl)
3668 * changed its storage format in v16.
3669 */
3670 if (dopt->binary_upgrade)
3671 {
3678 int ii_relfrozenxid,
3680 ii_oid,
3682
3683 if (fout->remoteVersion >= 90300)
3684 appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid, relfilenode, oid\n"
3685 "FROM pg_catalog.pg_class\n"
3686 "WHERE oid IN (%u, %u, %u, %u);\n",
3689 else
3690 appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid, relfilenode, oid\n"
3691 "FROM pg_catalog.pg_class\n"
3692 "WHERE oid IN (%u, %u);\n",
3694
3696
3697 ii_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
3698 ii_relminmxid = PQfnumber(lo_res, "relminmxid");
3699 ii_relfilenode = PQfnumber(lo_res, "relfilenode");
3700 ii_oid = PQfnumber(lo_res, "oid");
3701
3702 appendPQExpBufferStr(loHorizonQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
3703 appendPQExpBufferStr(lomHorizonQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
3704 appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n");
3705 appendPQExpBufferStr(lomOutQry, "\n-- For binary upgrade, preserve pg_largeobject_metadata and index relfilenodes\n");
3706 for (int i = 0; i < PQntuples(lo_res); ++i)
3707 {
3708 Oid oid;
3709 RelFileNumber relfilenumber;
3712
3713 oid = atooid(PQgetvalue(lo_res, i, ii_oid));
3714 relfilenumber = atooid(PQgetvalue(lo_res, i, ii_relfilenode));
3715
3716 if (oid == LargeObjectRelationId ||
3718 {
3720 outQry = loOutQry;
3721 }
3722 else
3723 {
3725 outQry = lomOutQry;
3726 }
3727
3728 appendPQExpBuffer(horizonQry, "UPDATE pg_catalog.pg_class\n"
3729 "SET relfrozenxid = '%u', relminmxid = '%u'\n"
3730 "WHERE oid = %u;\n",
3734
3735 if (oid == LargeObjectRelationId ||
3738 "SELECT pg_catalog.binary_upgrade_set_next_heap_relfilenode('%u'::pg_catalog.oid);\n",
3739 relfilenumber);
3740 else if (oid == LargeObjectLOidPNIndexId ||
3743 "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
3744 relfilenumber);
3745 }
3746
3748 "TRUNCATE pg_catalog.pg_largeobject;\n");
3750 "TRUNCATE pg_catalog.pg_largeobject_metadata;\n");
3751
3754
3756 ARCHIVE_OPTS(.tag = "pg_largeobject",
3757 .description = "pg_largeobject",
3758 .section = SECTION_PRE_DATA,
3759 .createStmt = loOutQry->data));
3760
3761 if (fout->remoteVersion >= 160000)
3763 ARCHIVE_OPTS(.tag = "pg_largeobject_metadata",
3764 .description = "pg_largeobject_metadata",
3765 .section = SECTION_PRE_DATA,
3766 .createStmt = lomOutQry->data));
3767
3768 PQclear(lo_res);
3769
3775 }
3776
3777 PQclear(res);
3778
3779 free(qdatname);
3784}
3785
3786/*
3787 * Collect any database-specific or role-and-database-specific SET options
3788 * for this database, and append them to outbuf.
3789 */
3790static void
3792 const char *dbname, Oid dboid)
3793{
3794 PGconn *conn = GetConnection(AH);
3796 PGresult *res;
3797
3798 /* First collect database-specific options */
3799 printfPQExpBuffer(buf, "SELECT unnest(setconfig) FROM pg_db_role_setting "
3800 "WHERE setrole = 0 AND setdatabase = '%u'::oid",
3801 dboid);
3802
3803 res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3804
3805 for (int i = 0; i < PQntuples(res); i++)
3807 "DATABASE", dbname, NULL, NULL,
3808 outbuf);
3809
3810 PQclear(res);
3811
3812 /* Now look for role-and-database-specific options */
3813 printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
3814 "FROM pg_db_role_setting s, pg_roles r "
3815 "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
3816 dboid);
3817
3818 res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3819
3820 for (int i = 0; i < PQntuples(res); i++)
3822 "ROLE", PQgetvalue(res, i, 0),
3823 "DATABASE", dbname,
3824 outbuf);
3825
3826 PQclear(res);
3827
3829}
3830
3831/*
3832 * dumpEncoding: put the correct encoding into the archive
3833 */
3834static void
3836{
3837 const char *encname = pg_encoding_to_char(AH->encoding);
3839
3840 pg_log_info("saving encoding = %s", encname);
3841
3842 appendPQExpBufferStr(qry, "SET client_encoding = ");
3844 appendPQExpBufferStr(qry, ";\n");
3845
3847 ARCHIVE_OPTS(.tag = "ENCODING",
3848 .description = "ENCODING",
3849 .section = SECTION_PRE_DATA,
3850 .createStmt = qry->data));
3851
3852 destroyPQExpBuffer(qry);
3853}
3854
3855
3856/*
3857 * dumpStdStrings: put the correct escape string behavior into the archive
3858 */
3859static void
3861{
3862 const char *stdstrings = AH->std_strings ? "on" : "off";
3864
3865 pg_log_info("saving \"standard_conforming_strings = %s\"",
3866 stdstrings);
3867
3868 appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3869 stdstrings);
3870
3872 ARCHIVE_OPTS(.tag = "STDSTRINGS",
3873 .description = "STDSTRINGS",
3874 .section = SECTION_PRE_DATA,
3875 .createStmt = qry->data));
3876
3877 destroyPQExpBuffer(qry);
3878}
3879
3880/*
3881 * dumpSearchPath: record the active search_path in the archive
3882 */
3883static void
3885{
3888 PGresult *res;
3889 char **schemanames = NULL;
3890 int nschemanames = 0;
3891 int i;
3892
3893 /*
3894 * We use the result of current_schemas(), not the search_path GUC,
3895 * because that might contain wildcards such as "$user", which won't
3896 * necessarily have the same value during restore. Also, this way avoids
3897 * listing schemas that may appear in search_path but not actually exist,
3898 * which seems like a prudent exclusion.
3899 */
3901 "SELECT pg_catalog.current_schemas(false)");
3902
3903 if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3904 pg_fatal("could not parse result of current_schemas()");
3905
3906 /*
3907 * We use set_config(), not a simple "SET search_path" command, because
3908 * the latter has less-clean behavior if the search path is empty. While
3909 * that's likely to get fixed at some point, it seems like a good idea to
3910 * be as backwards-compatible as possible in what we put into archives.
3911 */
3912 for (i = 0; i < nschemanames; i++)
3913 {
3914 if (i > 0)
3915 appendPQExpBufferStr(path, ", ");
3917 }
3918
3919 appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3920 appendStringLiteralAH(qry, path->data, AH);
3921 appendPQExpBufferStr(qry, ", false);\n");
3922
3923 pg_log_info("saving \"search_path = %s\"", path->data);
3924
3926 ARCHIVE_OPTS(.tag = "SEARCHPATH",
3927 .description = "SEARCHPATH",
3928 .section = SECTION_PRE_DATA,
3929 .createStmt = qry->data));
3930
3931 /* Also save it in AH->searchpath, in case we're doing plain text dump */
3932 AH->searchpath = pg_strdup(qry->data);
3933
3935 PQclear(res);
3936 destroyPQExpBuffer(qry);
3937 destroyPQExpBuffer(path);
3938}
3939
3940
3941/*
3942 * getLOs:
3943 * Collect schema-level data about large objects
3944 */
3945static void
3947{
3948 DumpOptions *dopt = fout->dopt;
3950 PGresult *res;
3951 int ntups;
3952 int i;
3953 int n;
3954 int i_oid;
3955 int i_lomowner;
3956 int i_lomacl;
3957 int i_acldefault;
3958
3959 pg_log_info("reading large objects");
3960
3961 /*
3962 * Fetch LO OIDs and owner/ACL data. Order the data so that all the blobs
3963 * with the same owner/ACL appear together.
3964 */
3966 "SELECT oid, lomowner, lomacl, "
3967 "acldefault('L', lomowner) AS acldefault "
3968 "FROM pg_largeobject_metadata ");
3969
3970 /*
3971 * For binary upgrades, we transfer pg_largeobject_metadata via COPY or by
3972 * copying/linking its files from the old cluster. On such upgrades, we
3973 * only need to consider large objects that have comments or security
3974 * labels, since we still restore those objects via COMMENT/SECURITY LABEL
3975 * commands.
3976 */
3977 if (dopt->binary_upgrade)
3979 "WHERE oid IN "
3980 "(SELECT objoid FROM pg_description "
3981 "WHERE classoid = " CppAsString2(LargeObjectRelationId) " "
3982 "UNION SELECT objoid FROM pg_seclabel "
3983 "WHERE classoid = " CppAsString2(LargeObjectRelationId) ") ");
3984
3986 "ORDER BY lomowner, lomacl::pg_catalog.text, oid");
3987
3989
3990 i_oid = PQfnumber(res, "oid");
3991 i_lomowner = PQfnumber(res, "lomowner");
3992 i_lomacl = PQfnumber(res, "lomacl");
3993 i_acldefault = PQfnumber(res, "acldefault");
3994
3995 ntups = PQntuples(res);
3996
3997 /*
3998 * Group the blobs into suitably-sized groups that have the same owner and
3999 * ACL setting, and build a metadata and a data DumpableObject for each
4000 * group. (If we supported initprivs for blobs, we'd have to insist that
4001 * groups also share initprivs settings, since the DumpableObject only has
4002 * room for one.) i is the index of the first tuple in the current group,
4003 * and n is the number of tuples we include in the group.
4004 */
4005 for (i = 0; i < ntups; i += n)
4006 {
4007 Oid thisoid = atooid(PQgetvalue(res, i, i_oid));
4008 char *thisowner = PQgetvalue(res, i, i_lomowner);
4009 char *thisacl = PQgetvalue(res, i, i_lomacl);
4010 LoInfo *loinfo;
4012 char namebuf[64];
4013
4014 /* Scan to find first tuple not to be included in group */
4015 n = 1;
4016 while (n < MAX_BLOBS_PER_ARCHIVE_ENTRY && i + n < ntups)
4017 {
4018 if (strcmp(thisowner, PQgetvalue(res, i + n, i_lomowner)) != 0 ||
4019 strcmp(thisacl, PQgetvalue(res, i + n, i_lomacl)) != 0)
4020 break;
4021 n++;
4022 }
4023
4024 /* Build the metadata DumpableObject */
4025 loinfo = (LoInfo *) pg_malloc(offsetof(LoInfo, looids) + n * sizeof(Oid));
4026
4028 loinfo->dobj.catId.tableoid = LargeObjectRelationId;
4029 loinfo->dobj.catId.oid = thisoid;
4030 AssignDumpId(&loinfo->dobj);
4031
4032 if (n > 1)
4033 snprintf(namebuf, sizeof(namebuf), "%u..%u", thisoid,
4034 atooid(PQgetvalue(res, i + n - 1, i_oid)));
4035 else
4036 snprintf(namebuf, sizeof(namebuf), "%u", thisoid);
4037 loinfo->dobj.name = pg_strdup(namebuf);
4038 loinfo->dacl.acl = pg_strdup(thisacl);
4039 loinfo->dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
4040 loinfo->dacl.privtype = 0;
4041 loinfo->dacl.initprivs = NULL;
4042 loinfo->rolname = getRoleName(thisowner);
4043 loinfo->numlos = n;
4044 loinfo->looids[0] = thisoid;
4045 /* Collect OIDs of the remaining blobs in this group */
4046 for (int k = 1; k < n; k++)
4047 {
4049
4050 loinfo->looids[k] = atooid(PQgetvalue(res, i + k, i_oid));
4051
4052 /* Make sure we can look up loinfo by any of the blobs' OIDs */
4053 extraID.tableoid = LargeObjectRelationId;
4054 extraID.oid = loinfo->looids[k];
4056 }
4057
4058 /* LOs have data */
4059 loinfo->dobj.components |= DUMP_COMPONENT_DATA;
4060
4061 /* Mark whether LO group has a non-empty ACL */
4062 if (!PQgetisnull(res, i, i_lomacl))
4063 loinfo->dobj.components |= DUMP_COMPONENT_ACL;
4064
4065 /*
4066 * In binary upgrade mode, pg_largeobject and pg_largeobject_metadata
4067 * are transferred via COPY or by copying/linking the files from the
4068 * old cluster. Thus, we do not need to dump LO data, definitions, or
4069 * ACLs.
4070 */
4071 if (dopt->binary_upgrade)
4073
4074 /*
4075 * Create a "BLOBS" data item for the group, too. This is just a
4076 * placeholder for sorting; it carries no data now.
4077 */
4079 lodata->objType = DO_LARGE_OBJECT_DATA;
4080 lodata->catId = nilCatalogId;
4082 lodata->name = pg_strdup(namebuf);
4083 lodata->components |= DUMP_COMPONENT_DATA;
4084 /* Set up explicit dependency from data to metadata */
4085 lodata->dependencies = pg_malloc_object(DumpId);
4086 lodata->dependencies[0] = loinfo->dobj.dumpId;
4087 lodata->nDeps = lodata->allocDeps = 1;
4088 }
4089
4090 PQclear(res);
4092}
4093
4094/*
4095 * dumpLO
4096 *
4097 * dump the definition (metadata) of the given large object group
4098 */
4099static void
4101{
4103
4104 /*
4105 * The "definition" is just a newline-separated list of OIDs. We need to
4106 * put something into the dropStmt too, but it can just be a comment.
4107 */
4108 for (int i = 0; i < loinfo->numlos; i++)
4109 appendPQExpBuffer(cquery, "%u\n", loinfo->looids[i]);
4110
4111 if (loinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4112 ArchiveEntry(fout, loinfo->dobj.catId, loinfo->dobj.dumpId,
4113 ARCHIVE_OPTS(.tag = loinfo->dobj.name,
4114 .owner = loinfo->rolname,
4115 .description = "BLOB METADATA",
4116 .section = SECTION_DATA,
4117 .createStmt = cquery->data,
4118 .dropStmt = "-- dummy"));
4119
4120 /*
4121 * Dump per-blob comments and seclabels if any. We assume these are rare
4122 * enough that it's okay to generate retail TOC entries for them.
4123 */
4124 if (loinfo->dobj.dump & (DUMP_COMPONENT_COMMENT |
4126 {
4127 for (int i = 0; i < loinfo->numlos; i++)
4128 {
4129 CatalogId catId;
4130 char namebuf[32];
4131
4132 /* Build identifying info for this blob */
4133 catId.tableoid = loinfo->dobj.catId.tableoid;
4134 catId.oid = loinfo->looids[i];
4135 snprintf(namebuf, sizeof(namebuf), "%u", loinfo->looids[i]);
4136
4137 if (loinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4138 dumpComment(fout, "LARGE OBJECT", namebuf,
4139 NULL, loinfo->rolname,
4140 catId, 0, loinfo->dobj.dumpId);
4141
4142 if (loinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4143 dumpSecLabel(fout, "LARGE OBJECT", namebuf,
4144 NULL, loinfo->rolname,
4145 catId, 0, loinfo->dobj.dumpId);
4146 }
4147 }
4148
4149 /*
4150 * Dump the ACLs if any (remember that all blobs in the group will have
4151 * the same ACL). If there's just one blob, dump a simple ACL entry; if
4152 * there's more, make a "LARGE OBJECTS" entry that really contains only
4153 * the ACL for the first blob. _printTocEntry() will be cued by the tag
4154 * string to emit a mutated version for each blob.
4155 */
4156 if (loinfo->dobj.dump & DUMP_COMPONENT_ACL)
4157 {
4158 char namebuf[32];
4159
4160 /* Build identifying info for the first blob */
4161 snprintf(namebuf, sizeof(namebuf), "%u", loinfo->looids[0]);
4162
4163 if (loinfo->numlos > 1)
4164 {
4165 char tagbuf[64];
4166
4167 snprintf(tagbuf, sizeof(tagbuf), "LARGE OBJECTS %u..%u",
4168 loinfo->looids[0], loinfo->looids[loinfo->numlos - 1]);
4169
4170 dumpACL(fout, loinfo->dobj.dumpId, InvalidDumpId,
4171 "LARGE OBJECT", namebuf, NULL, NULL,
4172 tagbuf, loinfo->rolname, &loinfo->dacl);
4173 }
4174 else
4175 {
4176 dumpACL(fout, loinfo->dobj.dumpId, InvalidDumpId,
4177 "LARGE OBJECT", namebuf, NULL, NULL,
4178 NULL, loinfo->rolname, &loinfo->dacl);
4179 }
4180 }
4181
4183}
4184
4185/*
4186 * dumpLOs:
4187 * dump the data contents of the large objects in the given group
4188 */
4189static int
4190dumpLOs(Archive *fout, const void *arg)
4191{
4192 const LoInfo *loinfo = (const LoInfo *) arg;
4194 char buf[LOBBUFSIZE];
4195
4196 pg_log_info("saving large objects \"%s\"", loinfo->dobj.name);
4197
4198 for (int i = 0; i < loinfo->numlos; i++)
4199 {
4200 Oid loOid = loinfo->looids[i];
4201 int loFd;
4202 int cnt;
4203
4204 /* Open the LO */
4205 loFd = lo_open(conn, loOid, INV_READ);
4206 if (loFd == -1)
4207 pg_fatal("could not open large object %u: %s",
4209
4210 StartLO(fout, loOid);
4211
4212 /* Now read it in chunks, sending data to archive */
4213 do
4214 {
4215 cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
4216 if (cnt < 0)
4217 pg_fatal("error reading large object %u: %s",
4219
4220 WriteData(fout, buf, cnt);
4221 } while (cnt > 0);
4222
4223 lo_close(conn, loFd);
4224
4225 EndLO(fout, loOid);
4226 }
4227
4228 return 1;
4229}
4230
4231/*
4232 * getPolicies
4233 * get information about all RLS policies on dumpable tables.
4234 */
4235void
4237{
4238 DumpOptions *dopt = fout->dopt;
4239 PQExpBuffer query;
4241 PGresult *res;
4243 int i_oid;
4244 int i_tableoid;
4245 int i_polrelid;
4246 int i_polname;
4247 int i_polcmd;
4248 int i_polpermissive;
4249 int i_polroles;
4250 int i_polqual;
4251 int i_polwithcheck;
4252 int i,
4253 j,
4254 ntups;
4255
4256 /* No policies before 9.5 */
4257 if (fout->remoteVersion < 90500)
4258 return;
4259
4260 /* Skip if --no-policies was specified */
4261 if (dopt->no_policies)
4262 return;
4263
4264 query = createPQExpBuffer();
4266
4267 /*
4268 * Identify tables of interest, and check which ones have RLS enabled.
4269 */
4271 for (i = 0; i < numTables; i++)
4272 {
4273 TableInfo *tbinfo = &tblinfo[i];
4274
4275 /* Ignore row security on tables not to be dumped */
4276 if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
4277 continue;
4278
4279 /* It can't have RLS or policies if it's not a table */
4280 if (tbinfo->relkind != RELKIND_RELATION &&
4282 continue;
4283
4284 /* Add it to the list of table OIDs to be probed below */
4285 if (tbloids->len > 1) /* do we have more than the '{'? */
4287 appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
4288
4289 /* Is RLS enabled? (That's separate from whether it has policies) */
4290 if (tbinfo->rowsec)
4291 {
4292 tbinfo->dobj.components |= DUMP_COMPONENT_POLICY;
4293
4294 /*
4295 * We represent RLS being enabled on a table by creating a
4296 * PolicyInfo object with null polname.
4297 *
4298 * Note: use tableoid 0 so that this object won't be mistaken for
4299 * something that pg_depend entries apply to.
4300 */
4302 polinfo->dobj.objType = DO_POLICY;
4303 polinfo->dobj.catId.tableoid = 0;
4304 polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
4305 AssignDumpId(&polinfo->dobj);
4306 polinfo->dobj.namespace = tbinfo->dobj.namespace;
4307 polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
4308 polinfo->poltable = tbinfo;
4309 polinfo->polname = NULL;
4310 polinfo->polcmd = '\0';
4311 polinfo->polpermissive = 0;
4312 polinfo->polroles = NULL;
4313 polinfo->polqual = NULL;
4314 polinfo->polwithcheck = NULL;
4315 }
4316 }
4318
4319 /*
4320 * Now, read all RLS policies belonging to the tables of interest, and
4321 * create PolicyInfo objects for them. (Note that we must filter the
4322 * results server-side not locally, because we dare not apply pg_get_expr
4323 * to tables we don't have lock on.)
4324 */
4325 pg_log_info("reading row-level security policies");
4326
4327 printfPQExpBuffer(query,
4328 "SELECT pol.oid, pol.tableoid, pol.polrelid, pol.polname, pol.polcmd, ");
4329 if (fout->remoteVersion >= 100000)
4330 appendPQExpBufferStr(query, "pol.polpermissive, ");
4331 else
4332 appendPQExpBufferStr(query, "'t' as polpermissive, ");
4333 appendPQExpBuffer(query,
4334 "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
4335 " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
4336 "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
4337 "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
4338 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
4339 "JOIN pg_catalog.pg_policy pol ON (src.tbloid = pol.polrelid)",
4340 tbloids->data);
4341
4342 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4343
4344 ntups = PQntuples(res);
4345 if (ntups > 0)
4346 {
4347 i_oid = PQfnumber(res, "oid");
4348 i_tableoid = PQfnumber(res, "tableoid");
4349 i_polrelid = PQfnumber(res, "polrelid");
4350 i_polname = PQfnumber(res, "polname");
4351 i_polcmd = PQfnumber(res, "polcmd");
4352 i_polpermissive = PQfnumber(res, "polpermissive");
4353 i_polroles = PQfnumber(res, "polroles");
4354 i_polqual = PQfnumber(res, "polqual");
4355 i_polwithcheck = PQfnumber(res, "polwithcheck");
4356
4358
4359 for (j = 0; j < ntups; j++)
4360 {
4363
4364 tbinfo->dobj.components |= DUMP_COMPONENT_POLICY;
4365
4366 polinfo[j].dobj.objType = DO_POLICY;
4367 polinfo[j].dobj.catId.tableoid =
4368 atooid(PQgetvalue(res, j, i_tableoid));
4369 polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
4370 AssignDumpId(&polinfo[j].dobj);
4371 polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4372 polinfo[j].poltable = tbinfo;
4373 polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
4374 polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
4375
4376 polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
4377 polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
4378
4379 if (PQgetisnull(res, j, i_polroles))
4380 polinfo[j].polroles = NULL;
4381 else
4382 polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
4383
4384 if (PQgetisnull(res, j, i_polqual))
4385 polinfo[j].polqual = NULL;
4386 else
4387 polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
4388
4389 if (PQgetisnull(res, j, i_polwithcheck))
4390 polinfo[j].polwithcheck = NULL;
4391 else
4392 polinfo[j].polwithcheck
4394 }
4395 }
4396
4397 PQclear(res);
4398
4399 destroyPQExpBuffer(query);
4401}
4402
4403/*
4404 * dumpPolicy
4405 * dump the definition of the given policy
4406 */
4407static void
4409{
4410 DumpOptions *dopt = fout->dopt;
4411 TableInfo *tbinfo = polinfo->poltable;
4412 PQExpBuffer query;
4415 char *qtabname;
4416 const char *cmd;
4417 char *tag;
4418
4419 /* Do nothing if not dumping schema */
4420 if (!dopt->dumpSchema)
4421 return;
4422
4423 /*
4424 * If polname is NULL, then this record is just indicating that ROW LEVEL
4425 * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
4426 * ROW LEVEL SECURITY.
4427 */
4428 if (polinfo->polname == NULL)
4429 {
4430 query = createPQExpBuffer();
4431
4432 appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
4434
4435 /*
4436 * We must emit the ROW SECURITY object's dependency on its table
4437 * explicitly, because it will not match anything in pg_depend (unlike
4438 * the case for other PolicyInfo objects).
4439 */
4440 if (polinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4441 ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
4442 ARCHIVE_OPTS(.tag = polinfo->dobj.name,
4443 .namespace = polinfo->dobj.namespace->dobj.name,
4444 .owner = tbinfo->rolname,
4445 .description = "ROW SECURITY",
4446 .section = SECTION_POST_DATA,
4447 .createStmt = query->data,
4448 .deps = &(tbinfo->dobj.dumpId),
4449 .nDeps = 1));
4450
4451 destroyPQExpBuffer(query);
4452 return;
4453 }
4454
4455 if (polinfo->polcmd == '*')
4456 cmd = "";
4457 else if (polinfo->polcmd == 'r')
4458 cmd = " FOR SELECT";
4459 else if (polinfo->polcmd == 'a')
4460 cmd = " FOR INSERT";
4461 else if (polinfo->polcmd == 'w')
4462 cmd = " FOR UPDATE";
4463 else if (polinfo->polcmd == 'd')
4464 cmd = " FOR DELETE";
4465 else
4466 pg_fatal("unexpected policy command type: %c",
4467 polinfo->polcmd);
4468
4469 query = createPQExpBuffer();
4472
4473 qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
4474
4475 appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
4476
4477 appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
4478 !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
4479
4480 if (polinfo->polroles != NULL)
4481 appendPQExpBuffer(query, " TO %s", polinfo->polroles);
4482
4483 if (polinfo->polqual != NULL)
4484 appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
4485
4486 if (polinfo->polwithcheck != NULL)
4487 appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
4488
4489 appendPQExpBufferStr(query, ";\n");
4490
4491 appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
4493
4494 appendPQExpBuffer(polprefix, "POLICY %s ON",
4495 fmtId(polinfo->polname));
4496
4497 tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
4498
4499 if (polinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4500 ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
4501 ARCHIVE_OPTS(.tag = tag,
4502 .namespace = polinfo->dobj.namespace->dobj.name,
4503 .owner = tbinfo->rolname,
4504 .description = "POLICY",
4505 .section = SECTION_POST_DATA,
4506 .createStmt = query->data,
4507 .dropStmt = delqry->data));
4508
4509 if (polinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4511 tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
4512 polinfo->dobj.catId, 0, polinfo->dobj.dumpId);
4513
4514 free(tag);
4515 destroyPQExpBuffer(query);
4518 free(qtabname);
4519}
4520
4521/*
4522 * getPublications
4523 * get information about publications
4524 */
4525void
4527{
4528 DumpOptions *dopt = fout->dopt;
4529 PQExpBuffer query;
4530 PGresult *res;
4532 int i_tableoid;
4533 int i_oid;
4534 int i_pubname;
4535 int i_pubowner;
4536 int i_puballtables;
4538 int i_pubinsert;
4539 int i_pubupdate;
4540 int i_pubdelete;
4541 int i_pubtruncate;
4542 int i_pubviaroot;
4543 int i_pubgencols;
4544 int i,
4545 ntups;
4546
4547 if (dopt->no_publications || fout->remoteVersion < 100000)
4548 return;
4549
4550 query = createPQExpBuffer();
4551
4552 /* Get the publications. */
4553 appendPQExpBufferStr(query, "SELECT p.tableoid, p.oid, p.pubname, "
4554 "p.pubowner, p.puballtables, p.pubinsert, "
4555 "p.pubupdate, p.pubdelete, ");
4556
4557 if (fout->remoteVersion >= 110000)
4558 appendPQExpBufferStr(query, "p.pubtruncate, ");
4559 else
4560 appendPQExpBufferStr(query, "false AS pubtruncate, ");
4561
4562 if (fout->remoteVersion >= 130000)
4563 appendPQExpBufferStr(query, "p.pubviaroot, ");
4564 else
4565 appendPQExpBufferStr(query, "false AS pubviaroot, ");
4566
4567 if (fout->remoteVersion >= 180000)
4568 appendPQExpBufferStr(query, "p.pubgencols, ");
4569 else
4570 appendPQExpBuffer(query, "'%c' AS pubgencols, ", PUBLISH_GENCOLS_NONE);
4571
4572 if (fout->remoteVersion >= 190000)
4573 appendPQExpBufferStr(query, "p.puballsequences ");
4574 else
4575 appendPQExpBufferStr(query, "false AS puballsequences ");
4576
4577 appendPQExpBufferStr(query, "FROM pg_publication p");
4578
4579 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4580
4581 ntups = PQntuples(res);
4582
4583 if (ntups == 0)
4584 goto cleanup;
4585
4586 i_tableoid = PQfnumber(res, "tableoid");
4587 i_oid = PQfnumber(res, "oid");
4588 i_pubname = PQfnumber(res, "pubname");
4589 i_pubowner = PQfnumber(res, "pubowner");
4590 i_puballtables = PQfnumber(res, "puballtables");
4591 i_puballsequences = PQfnumber(res, "puballsequences");
4592 i_pubinsert = PQfnumber(res, "pubinsert");
4593 i_pubupdate = PQfnumber(res, "pubupdate");
4594 i_pubdelete = PQfnumber(res, "pubdelete");
4595 i_pubtruncate = PQfnumber(res, "pubtruncate");
4596 i_pubviaroot = PQfnumber(res, "pubviaroot");
4597 i_pubgencols = PQfnumber(res, "pubgencols");
4598
4600
4601 for (i = 0; i < ntups; i++)
4602 {
4603 pubinfo[i].dobj.objType = DO_PUBLICATION;
4604 pubinfo[i].dobj.catId.tableoid =
4605 atooid(PQgetvalue(res, i, i_tableoid));
4606 pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4607 AssignDumpId(&pubinfo[i].dobj);
4608 pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
4609 pubinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_pubowner));
4610 pubinfo[i].puballtables =
4611 (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
4612 pubinfo[i].puballsequences =
4613 (strcmp(PQgetvalue(res, i, i_puballsequences), "t") == 0);
4614 pubinfo[i].pubinsert =
4615 (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
4616 pubinfo[i].pubupdate =
4617 (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
4618 pubinfo[i].pubdelete =
4619 (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
4620 pubinfo[i].pubtruncate =
4621 (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
4622 pubinfo[i].pubviaroot =
4623 (strcmp(PQgetvalue(res, i, i_pubviaroot), "t") == 0);
4624 pubinfo[i].pubgencols_type =
4625 *(PQgetvalue(res, i, i_pubgencols));
4626
4627 /* Decide whether we want to dump it */
4629 }
4630
4631cleanup:
4632 PQclear(res);
4633
4634 destroyPQExpBuffer(query);
4635}
4636
4637/*
4638 * dumpPublication
4639 * dump the definition of the given publication
4640 */
4641static void
4643{
4644 DumpOptions *dopt = fout->dopt;
4646 PQExpBuffer query;
4647 char *qpubname;
4648 bool first = true;
4649
4650 /* Do nothing if not dumping schema */
4651 if (!dopt->dumpSchema)
4652 return;
4653
4655 query = createPQExpBuffer();
4656
4657 qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
4658
4659 appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
4660 qpubname);
4661
4662 appendPQExpBuffer(query, "CREATE PUBLICATION %s",
4663 qpubname);
4664
4665 if (pubinfo->puballtables && pubinfo->puballsequences)
4666 appendPQExpBufferStr(query, " FOR ALL TABLES, ALL SEQUENCES");
4667 else if (pubinfo->puballtables)
4668 appendPQExpBufferStr(query, " FOR ALL TABLES");
4669 else if (pubinfo->puballsequences)
4670 appendPQExpBufferStr(query, " FOR ALL SEQUENCES");
4671
4672 appendPQExpBufferStr(query, " WITH (publish = '");
4673 if (pubinfo->pubinsert)
4674 {
4675 appendPQExpBufferStr(query, "insert");
4676 first = false;
4677 }
4678
4679 if (pubinfo->pubupdate)
4680 {
4681 if (!first)
4682 appendPQExpBufferStr(query, ", ");
4683
4684 appendPQExpBufferStr(query, "update");
4685 first = false;
4686 }
4687
4688 if (pubinfo->pubdelete)
4689 {
4690 if (!first)
4691 appendPQExpBufferStr(query, ", ");
4692
4693 appendPQExpBufferStr(query, "delete");
4694 first = false;
4695 }
4696
4697 if (pubinfo->pubtruncate)
4698 {
4699 if (!first)
4700 appendPQExpBufferStr(query, ", ");
4701
4702 appendPQExpBufferStr(query, "truncate");
4703 first = false;
4704 }
4705
4706 appendPQExpBufferChar(query, '\'');
4707
4708 if (pubinfo->pubviaroot)
4709 appendPQExpBufferStr(query, ", publish_via_partition_root = true");
4710
4711 if (pubinfo->pubgencols_type == PUBLISH_GENCOLS_STORED)
4712 appendPQExpBufferStr(query, ", publish_generated_columns = stored");
4713
4714 appendPQExpBufferStr(query, ");\n");
4715
4716 if (pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4717 ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
4718 ARCHIVE_OPTS(.tag = pubinfo->dobj.name,
4719 .owner = pubinfo->rolname,
4720 .description = "PUBLICATION",
4721 .section = SECTION_POST_DATA,
4722 .createStmt = query->data,
4723 .dropStmt = delq->data));
4724
4725 if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4726 dumpComment(fout, "PUBLICATION", qpubname,
4727 NULL, pubinfo->rolname,
4728 pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4729
4730 if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4731 dumpSecLabel(fout, "PUBLICATION", qpubname,
4732 NULL, pubinfo->rolname,
4733 pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4734
4736 destroyPQExpBuffer(query);
4737 free(qpubname);
4738}
4739
4740/*
4741 * getPublicationNamespaces
4742 * get information about publication membership for dumpable schemas.
4743 */
4744void
4746{
4747 PQExpBuffer query;
4748 PGresult *res;
4750 DumpOptions *dopt = fout->dopt;
4751 int i_tableoid;
4752 int i_oid;
4753 int i_pnpubid;
4754 int i_pnnspid;
4755 int i,
4756 j,
4757 ntups;
4758
4759 if (dopt->no_publications || fout->remoteVersion < 150000)
4760 return;
4761
4762 query = createPQExpBuffer();
4763
4764 /* Collect all publication membership info. */
4766 "SELECT tableoid, oid, pnpubid, pnnspid "
4767 "FROM pg_catalog.pg_publication_namespace");
4768 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4769
4770 ntups = PQntuples(res);
4771
4772 i_tableoid = PQfnumber(res, "tableoid");
4773 i_oid = PQfnumber(res, "oid");
4774 i_pnpubid = PQfnumber(res, "pnpubid");
4775 i_pnnspid = PQfnumber(res, "pnnspid");
4776
4777 /* this allocation may be more than we need */
4779 j = 0;
4780
4781 for (i = 0; i < ntups; i++)
4782 {
4787
4788 /*
4789 * Ignore any entries for which we aren't interested in either the
4790 * publication or the rel.
4791 */
4793 if (pubinfo == NULL)
4794 continue;
4796 if (nspinfo == NULL)
4797 continue;
4798
4799 /* OK, make a DumpableObject for this relationship */
4801 pubsinfo[j].dobj.catId.tableoid =
4802 atooid(PQgetvalue(res, i, i_tableoid));
4803 pubsinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4804 AssignDumpId(&pubsinfo[j].dobj);
4805 pubsinfo[j].dobj.namespace = nspinfo->dobj.namespace;
4806 pubsinfo[j].dobj.name = nspinfo->dobj.name;
4807 pubsinfo[j].publication = pubinfo;
4808 pubsinfo[j].pubschema = nspinfo;
4809
4810 /* Decide whether we want to dump it */
4812
4813 j++;
4814 }
4815
4816 PQclear(res);
4817 destroyPQExpBuffer(query);
4818}
4819
4820/*
4821 * getPublicationTables
4822 * get information about publication membership for dumpable tables.
4823 */
4824void
4826{
4827 PQExpBuffer query;
4828 PGresult *res;
4830 DumpOptions *dopt = fout->dopt;
4831 int i_tableoid;
4832 int i_oid;
4833 int i_prpubid;
4834 int i_prrelid;
4835 int i_prrelqual;
4836 int i_prattrs;
4837 int i,
4838 j,
4839 ntups;
4840
4841 if (dopt->no_publications || fout->remoteVersion < 100000)
4842 return;
4843
4844 query = createPQExpBuffer();
4845
4846 /* Collect all publication membership info. */
4847 if (fout->remoteVersion >= 150000)
4849 "SELECT tableoid, oid, prpubid, prrelid, "
4850 "pg_catalog.pg_get_expr(prqual, prrelid) AS prrelqual, "
4851 "(CASE\n"
4852 " WHEN pr.prattrs IS NOT NULL THEN\n"
4853 " (SELECT array_agg(attname)\n"
4854 " FROM\n"
4855 " pg_catalog.generate_series(0, pg_catalog.array_upper(pr.prattrs::pg_catalog.int2[], 1)) s,\n"
4856 " pg_catalog.pg_attribute\n"
4857 " WHERE attrelid = pr.prrelid AND attnum = prattrs[s])\n"
4858 " ELSE NULL END) prattrs "
4859 "FROM pg_catalog.pg_publication_rel pr");
4860 else
4862 "SELECT tableoid, oid, prpubid, prrelid, "
4863 "NULL AS prrelqual, NULL AS prattrs "
4864 "FROM pg_catalog.pg_publication_rel");
4865 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4866
4867 ntups = PQntuples(res);
4868
4869 i_tableoid = PQfnumber(res, "tableoid");
4870 i_oid = PQfnumber(res, "oid");
4871 i_prpubid = PQfnumber(res, "prpubid");
4872 i_prrelid = PQfnumber(res, "prrelid");
4873 i_prrelqual = PQfnumber(res, "prrelqual");
4874 i_prattrs = PQfnumber(res, "prattrs");
4875
4876 /* this allocation may be more than we need */
4878 j = 0;
4879
4880 for (i = 0; i < ntups; i++)
4881 {
4886
4887 /*
4888 * Ignore any entries for which we aren't interested in either the
4889 * publication or the rel.
4890 */
4892 if (pubinfo == NULL)
4893 continue;
4895 if (tbinfo == NULL)
4896 continue;
4897
4898 /* OK, make a DumpableObject for this relationship */
4899 pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
4900 pubrinfo[j].dobj.catId.tableoid =
4901 atooid(PQgetvalue(res, i, i_tableoid));
4902 pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4903 AssignDumpId(&pubrinfo[j].dobj);
4904 pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4905 pubrinfo[j].dobj.name = tbinfo->dobj.name;
4906 pubrinfo[j].publication = pubinfo;
4907 pubrinfo[j].pubtable = tbinfo;
4908 if (PQgetisnull(res, i, i_prrelqual))
4909 pubrinfo[j].pubrelqual = NULL;
4910 else
4911 pubrinfo[j].pubrelqual = pg_strdup(PQgetvalue(res, i, i_prrelqual));
4912
4913 if (!PQgetisnull(res, i, i_prattrs))
4914 {
4915 char **attnames;
4916 int nattnames;
4918
4919 if (!parsePGArray(PQgetvalue(res, i, i_prattrs),
4920 &attnames, &nattnames))
4921 pg_fatal("could not parse %s array", "prattrs");
4923 for (int k = 0; k < nattnames; k++)
4924 {
4925 if (k > 0)
4927
4928 appendPQExpBufferStr(attribs, fmtId(attnames[k]));
4929 }
4930 pubrinfo[j].pubrattrs = attribs->data;
4931 free(attribs); /* but not attribs->data */
4932 free(attnames);
4933 }
4934 else
4935 pubrinfo[j].pubrattrs = NULL;
4936
4937 /* Decide whether we want to dump it */
4939
4940 j++;
4941 }
4942
4943 PQclear(res);
4944 destroyPQExpBuffer(query);
4945}
4946
4947/*
4948 * dumpPublicationNamespace
4949 * dump the definition of the given publication schema mapping.
4950 */
4951static void
4953{
4954 DumpOptions *dopt = fout->dopt;
4955 NamespaceInfo *schemainfo = pubsinfo->pubschema;
4956 PublicationInfo *pubinfo = pubsinfo->publication;
4957 PQExpBuffer query;
4958 char *tag;
4959
4960 /* Do nothing if not dumping schema */
4961 if (!dopt->dumpSchema)
4962 return;
4963
4964 tag = psprintf("%s %s", pubinfo->dobj.name, schemainfo->dobj.name);
4965
4966 query = createPQExpBuffer();
4967
4968 appendPQExpBuffer(query, "ALTER PUBLICATION %s ", fmtId(pubinfo->dobj.name));
4969 appendPQExpBuffer(query, "ADD TABLES IN SCHEMA %s;\n", fmtId(schemainfo->dobj.name));
4970
4971 /*
4972 * There is no point in creating drop query as the drop is done by schema
4973 * drop.
4974 */
4975 if (pubsinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4976 ArchiveEntry(fout, pubsinfo->dobj.catId, pubsinfo->dobj.dumpId,
4977 ARCHIVE_OPTS(.tag = tag,
4978 .namespace = schemainfo->dobj.name,
4979 .owner = pubinfo->rolname,
4980 .description = "PUBLICATION TABLES IN SCHEMA",
4981 .section = SECTION_POST_DATA,
4982 .createStmt = query->data));
4983
4984 /* These objects can't currently have comments or seclabels */
4985
4986 free(tag);
4987 destroyPQExpBuffer(query);
4988}
4989
4990/*
4991 * dumpPublicationTable
4992 * dump the definition of the given publication table mapping
4993 */
4994static void
4996{
4997 DumpOptions *dopt = fout->dopt;
4998 PublicationInfo *pubinfo = pubrinfo->publication;
4999 TableInfo *tbinfo = pubrinfo->pubtable;
5000 PQExpBuffer query;
5001 char *tag;
5002
5003 /* Do nothing if not dumping schema */
5004 if (!dopt->dumpSchema)
5005 return;
5006
5007 tag = psprintf("%s %s", pubinfo->dobj.name, tbinfo->dobj.name);
5008
5009 query = createPQExpBuffer();
5010
5011 appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
5012 fmtId(pubinfo->dobj.name));
5013 appendPQExpBuffer(query, " %s",
5015
5016 if (pubrinfo->pubrattrs)
5017 appendPQExpBuffer(query, " (%s)", pubrinfo->pubrattrs);
5018
5019 if (pubrinfo->pubrelqual)
5020 {
5021 /*
5022 * It's necessary to add parentheses around the expression because
5023 * pg_get_expr won't supply the parentheses for things like WHERE
5024 * TRUE.
5025 */
5026 appendPQExpBuffer(query, " WHERE (%s)", pubrinfo->pubrelqual);
5027 }
5028 appendPQExpBufferStr(query, ";\n");
5029
5030 /*
5031 * There is no point in creating a drop query as the drop is done by table
5032 * drop. (If you think to change this, see also _printTocEntry().)
5033 * Although this object doesn't really have ownership as such, set the
5034 * owner field anyway to ensure that the command is run by the correct
5035 * role at restore time.
5036 */
5037 if (pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5038 ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
5039 ARCHIVE_OPTS(.tag = tag,
5040 .namespace = tbinfo->dobj.namespace->dobj.name,
5041 .owner = pubinfo->rolname,
5042 .description = "PUBLICATION TABLE",
5043 .section = SECTION_POST_DATA,
5044 .createStmt = query->data));
5045
5046 /* These objects can't currently have comments or seclabels */
5047
5048 free(tag);
5049 destroyPQExpBuffer(query);
5050}
5051
5052/*
5053 * Is the currently connected user a superuser?
5054 */
5055static bool
5057{
5059 const char *val;
5060
5061 val = PQparameterStatus(AH->connection, "is_superuser");
5062
5063 if (val && strcmp(val, "on") == 0)
5064 return true;
5065
5066 return false;
5067}
5068
5069/*
5070 * Set the given value to restrict_nonsystem_relation_kind value. Since
5071 * restrict_nonsystem_relation_kind is introduced in minor version releases,
5072 * the setting query is effective only where available.
5073 */
5074static void
5076{
5078 PGresult *res;
5079
5080 appendPQExpBuffer(query,
5081 "SELECT set_config(name, '%s', false) "
5082 "FROM pg_settings "
5083 "WHERE name = 'restrict_nonsystem_relation_kind'",
5084 value);
5085 res = ExecuteSqlQuery(AH, query->data, PGRES_TUPLES_OK);
5086
5087 PQclear(res);
5088 destroyPQExpBuffer(query);
5089}
5090
5091/*
5092 * getSubscriptions
5093 * get information about subscriptions
5094 */
5095void
5097{
5098 DumpOptions *dopt = fout->dopt;
5099 PQExpBuffer query;
5100 PGresult *res;
5101 SubscriptionInfo *subinfo;
5102 int i_tableoid;
5103 int i_oid;
5104 int i_subname;
5105 int i_subowner;
5106 int i_subbinary;
5107 int i_substream;
5111 int i_subrunasowner;
5112 int i_subconninfo;
5113 int i_subslotname;
5114 int i_subsynccommit;
5117 int i_suborigin;
5119 int i_subenabled;
5120 int i_subfailover;
5123 int i,
5124 ntups;
5125
5126 if (dopt->no_subscriptions || fout->remoteVersion < 100000)
5127 return;
5128
5129 if (!is_superuser(fout))
5130 {
5131 int n;
5132
5133 res = ExecuteSqlQuery(fout,
5134 "SELECT count(*) FROM pg_subscription "
5135 "WHERE subdbid = (SELECT oid FROM pg_database"
5136 " WHERE datname = current_database())",
5138 n = atoi(PQgetvalue(res, 0, 0));
5139 if (n > 0)
5140 pg_log_warning("subscriptions not dumped because current user is not a superuser");
5141 PQclear(res);
5142 return;
5143 }
5144
5145 query = createPQExpBuffer();
5146
5147 /* Get the subscriptions in current database. */
5149 "SELECT s.tableoid, s.oid, s.subname,\n"
5150 " s.subowner,\n"
5151 " s.subconninfo, s.subslotname, s.subsynccommit,\n"
5152 " s.subpublications,\n");
5153
5154 if (fout->remoteVersion >= 140000)
5155 appendPQExpBufferStr(query, " s.subbinary,\n");
5156 else
5157 appendPQExpBufferStr(query, " false AS subbinary,\n");
5158
5159 if (fout->remoteVersion >= 140000)
5160 appendPQExpBufferStr(query, " s.substream,\n");
5161 else
5162 appendPQExpBufferStr(query, " 'f' AS substream,\n");
5163
5164 if (fout->remoteVersion >= 150000)
5166 " s.subtwophasestate,\n"
5167 " s.subdisableonerr,\n");
5168 else
5169 appendPQExpBuffer(query,
5170 " '%c' AS subtwophasestate,\n"
5171 " false AS subdisableonerr,\n",
5173
5174 if (fout->remoteVersion >= 160000)
5176 " s.subpasswordrequired,\n"
5177 " s.subrunasowner,\n"
5178 " s.suborigin,\n");
5179 else
5180 appendPQExpBuffer(query,
5181 " 't' AS subpasswordrequired,\n"
5182 " 't' AS subrunasowner,\n"
5183 " '%s' AS suborigin,\n",
5185
5186 if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
5187 appendPQExpBufferStr(query, " o.remote_lsn AS suboriginremotelsn,\n"
5188 " s.subenabled,\n");
5189 else
5190 appendPQExpBufferStr(query, " NULL AS suboriginremotelsn,\n"
5191 " false AS subenabled,\n");
5192
5193 if (fout->remoteVersion >= 170000)
5195 " s.subfailover,\n");
5196 else
5198 " false AS subfailover,\n");
5199
5200 if (fout->remoteVersion >= 190000)
5202 " s.subretaindeadtuples,\n");
5203 else
5205 " false AS subretaindeadtuples,\n");
5206
5207 if (fout->remoteVersion >= 190000)
5209 " s.submaxretention,\n");
5210 else
5211 appendPQExpBuffer(query,
5212 " 0 AS submaxretention,\n");
5213
5214 if (fout->remoteVersion >= 190000)
5216 " s.subwalrcvtimeout\n");
5217 else
5219 " '-1' AS subwalrcvtimeout\n");
5220
5222 "FROM pg_subscription s\n");
5223
5224 if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
5226 "LEFT JOIN pg_catalog.pg_replication_origin_status o \n"
5227 " ON o.external_id = 'pg_' || s.oid::text \n");
5228
5230 "WHERE s.subdbid = (SELECT oid FROM pg_database\n"
5231 " WHERE datname = current_database())");
5232
5233 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5234
5235 ntups = PQntuples(res);
5236
5237 /*
5238 * Get subscription fields. We don't include subskiplsn in the dump as
5239 * after restoring the dump this value may no longer be relevant.
5240 */
5241 i_tableoid = PQfnumber(res, "tableoid");
5242 i_oid = PQfnumber(res, "oid");
5243 i_subname = PQfnumber(res, "subname");
5244 i_subowner = PQfnumber(res, "subowner");
5245 i_subenabled = PQfnumber(res, "subenabled");
5246 i_subbinary = PQfnumber(res, "subbinary");
5247 i_substream = PQfnumber(res, "substream");
5248 i_subtwophasestate = PQfnumber(res, "subtwophasestate");
5249 i_subdisableonerr = PQfnumber(res, "subdisableonerr");
5250 i_subpasswordrequired = PQfnumber(res, "subpasswordrequired");
5251 i_subrunasowner = PQfnumber(res, "subrunasowner");
5252 i_subfailover = PQfnumber(res, "subfailover");
5253 i_subretaindeadtuples = PQfnumber(res, "subretaindeadtuples");
5254 i_submaxretention = PQfnumber(res, "submaxretention");
5255 i_subconninfo = PQfnumber(res, "subconninfo");
5256 i_subslotname = PQfnumber(res, "subslotname");
5257 i_subsynccommit = PQfnumber(res, "subsynccommit");
5258 i_subwalrcvtimeout = PQfnumber(res, "subwalrcvtimeout");
5259 i_subpublications = PQfnumber(res, "subpublications");
5260 i_suborigin = PQfnumber(res, "suborigin");
5261 i_suboriginremotelsn = PQfnumber(res, "suboriginremotelsn");
5262
5263 subinfo = pg_malloc_array(SubscriptionInfo, ntups);
5264
5265 for (i = 0; i < ntups; i++)
5266 {
5267 subinfo[i].dobj.objType = DO_SUBSCRIPTION;
5268 subinfo[i].dobj.catId.tableoid =
5269 atooid(PQgetvalue(res, i, i_tableoid));
5270 subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5271 AssignDumpId(&subinfo[i].dobj);
5272 subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
5273 subinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_subowner));
5274
5275 subinfo[i].subenabled =
5276 (strcmp(PQgetvalue(res, i, i_subenabled), "t") == 0);
5277 subinfo[i].subbinary =
5278 (strcmp(PQgetvalue(res, i, i_subbinary), "t") == 0);
5279 subinfo[i].substream = *(PQgetvalue(res, i, i_substream));
5280 subinfo[i].subtwophasestate = *(PQgetvalue(res, i, i_subtwophasestate));
5281 subinfo[i].subdisableonerr =
5282 (strcmp(PQgetvalue(res, i, i_subdisableonerr), "t") == 0);
5283 subinfo[i].subpasswordrequired =
5284 (strcmp(PQgetvalue(res, i, i_subpasswordrequired), "t") == 0);
5285 subinfo[i].subrunasowner =
5286 (strcmp(PQgetvalue(res, i, i_subrunasowner), "t") == 0);
5287 subinfo[i].subfailover =
5288 (strcmp(PQgetvalue(res, i, i_subfailover), "t") == 0);
5289 subinfo[i].subretaindeadtuples =
5290 (strcmp(PQgetvalue(res, i, i_subretaindeadtuples), "t") == 0);
5291 subinfo[i].submaxretention =
5293 subinfo[i].subconninfo =
5295 if (PQgetisnull(res, i, i_subslotname))
5296 subinfo[i].subslotname = NULL;
5297 else
5298 subinfo[i].subslotname =
5300 subinfo[i].subsynccommit =
5302 subinfo[i].subwalrcvtimeout =
5304 subinfo[i].subpublications =
5306 subinfo[i].suborigin = pg_strdup(PQgetvalue(res, i, i_suborigin));
5308 subinfo[i].suboriginremotelsn = NULL;
5309 else
5310 subinfo[i].suboriginremotelsn =
5312
5313 /* Decide whether we want to dump it */
5314 selectDumpableObject(&(subinfo[i].dobj), fout);
5315 }
5316 PQclear(res);
5317
5318 destroyPQExpBuffer(query);
5319}
5320
5321/*
5322 * getSubscriptionRelations
5323 * Get information about subscription membership for dumpable relations. This
5324 * will be used only in binary-upgrade mode for PG17 or later versions.
5325 */
5326void
5328{
5329 DumpOptions *dopt = fout->dopt;
5330 SubscriptionInfo *subinfo = NULL;
5332 PGresult *res;
5333 int i_srsubid;
5334 int i_srrelid;
5335 int i_srsubstate;
5336 int i_srsublsn;
5337 int ntups;
5339
5340 if (dopt->no_subscriptions || !dopt->binary_upgrade ||
5341 fout->remoteVersion < 170000)
5342 return;
5343
5344 res = ExecuteSqlQuery(fout,
5345 "SELECT srsubid, srrelid, srsubstate, srsublsn "
5346 "FROM pg_catalog.pg_subscription_rel "
5347 "ORDER BY srsubid",
5349 ntups = PQntuples(res);
5350 if (ntups == 0)
5351 goto cleanup;
5352
5353 /* Get pg_subscription_rel attributes */
5354 i_srsubid = PQfnumber(res, "srsubid");
5355 i_srrelid = PQfnumber(res, "srrelid");
5356 i_srsubstate = PQfnumber(res, "srsubstate");
5357 i_srsublsn = PQfnumber(res, "srsublsn");
5358
5360 for (int i = 0; i < ntups; i++)
5361 {
5363 Oid relid = atooid(PQgetvalue(res, i, i_srrelid));
5364 TableInfo *tblinfo;
5365
5366 /*
5367 * If we switched to a new subscription, check if the subscription
5368 * exists.
5369 */
5371 {
5373 if (subinfo == NULL)
5374 pg_fatal("subscription with OID %u does not exist", cur_srsubid);
5375
5377 }
5378
5379 tblinfo = findTableByOid(relid);
5380 if (tblinfo == NULL)
5381 pg_fatal("failed sanity check, relation with OID %u not found",
5382 relid);
5383
5384 /* OK, make a DumpableObject for this relationship */
5385 subrinfo[i].dobj.objType = DO_SUBSCRIPTION_REL;
5386 subrinfo[i].dobj.catId.tableoid = relid;
5387 subrinfo[i].dobj.catId.oid = cur_srsubid;
5388 AssignDumpId(&subrinfo[i].dobj);
5389 subrinfo[i].dobj.namespace = tblinfo->dobj.namespace;
5390 subrinfo[i].dobj.name = tblinfo->dobj.name;
5391 subrinfo[i].subinfo = subinfo;
5392 subrinfo[i].tblinfo = tblinfo;
5393 subrinfo[i].srsubstate = PQgetvalue(res, i, i_srsubstate)[0];
5394 if (PQgetisnull(res, i, i_srsublsn))
5395 subrinfo[i].srsublsn = NULL;
5396 else
5397 subrinfo[i].srsublsn = pg_strdup(PQgetvalue(res, i, i_srsublsn));
5398
5399 /* Decide whether we want to dump it */
5401 }
5402
5403cleanup:
5404 PQclear(res);
5405}
5406
5407/*
5408 * dumpSubscriptionTable
5409 * Dump the definition of the given subscription table mapping. This will be
5410 * used only in binary-upgrade mode for PG17 or later versions.
5411 */
5412static void
5414{
5415 DumpOptions *dopt = fout->dopt;
5416 SubscriptionInfo *subinfo = subrinfo->subinfo;
5417 PQExpBuffer query;
5418 char *tag;
5419
5420 /* Do nothing if not dumping schema */
5421 if (!dopt->dumpSchema)
5422 return;
5423
5425
5426 tag = psprintf("%s %s", subinfo->dobj.name, subrinfo->tblinfo->dobj.name);
5427
5428 query = createPQExpBuffer();
5429
5430 if (subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5431 {
5432 /*
5433 * binary_upgrade_add_sub_rel_state will add the subscription relation
5434 * to pg_subscription_rel table. This will be used only in
5435 * binary-upgrade mode.
5436 */
5438 "\n-- For binary upgrade, must preserve the subscriber table.\n");
5440 "SELECT pg_catalog.binary_upgrade_add_sub_rel_state(");
5441 appendStringLiteralAH(query, subinfo->dobj.name, fout);
5442 appendPQExpBuffer(query,
5443 ", %u, '%c'",
5444 subrinfo->tblinfo->dobj.catId.oid,
5445 subrinfo->srsubstate);
5446
5447 if (subrinfo->srsublsn && subrinfo->srsublsn[0] != '\0')
5448 appendPQExpBuffer(query, ", '%s'", subrinfo->srsublsn);
5449 else
5450 appendPQExpBufferStr(query, ", NULL");
5451
5452 appendPQExpBufferStr(query, ");\n");
5453 }
5454
5455 /*
5456 * There is no point in creating a drop query as the drop is done by table
5457 * drop. (If you think to change this, see also _printTocEntry().)
5458 * Although this object doesn't really have ownership as such, set the
5459 * owner field anyway to ensure that the command is run by the correct
5460 * role at restore time.
5461 */
5462 if (subrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5463 ArchiveEntry(fout, subrinfo->dobj.catId, subrinfo->dobj.dumpId,
5464 ARCHIVE_OPTS(.tag = tag,
5465 .namespace = subrinfo->tblinfo->dobj.namespace->dobj.name,
5466 .owner = subinfo->rolname,
5467 .description = "SUBSCRIPTION TABLE",
5468 .section = SECTION_POST_DATA,
5469 .createStmt = query->data));
5470
5471 /* These objects can't currently have comments or seclabels */
5472
5473 free(tag);
5474 destroyPQExpBuffer(query);
5475}
5476
5477/*
5478 * dumpSubscription
5479 * dump the definition of the given subscription
5480 */
5481static void
5483{
5484 DumpOptions *dopt = fout->dopt;
5486 PQExpBuffer query;
5487 PQExpBuffer publications;
5488 char *qsubname;
5489 char **pubnames = NULL;
5490 int npubnames = 0;
5491 int i;
5492
5493 /* Do nothing if not dumping schema */
5494 if (!dopt->dumpSchema)
5495 return;
5496
5498 query = createPQExpBuffer();
5499
5500 qsubname = pg_strdup(fmtId(subinfo->dobj.name));
5501
5502 appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
5503 qsubname);
5504
5505 appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
5506 qsubname);
5507 appendStringLiteralAH(query, subinfo->subconninfo, fout);
5508
5509 /* Build list of quoted publications and append them to query. */
5511 pg_fatal("could not parse %s array", "subpublications");
5512
5513 publications = createPQExpBuffer();
5514 for (i = 0; i < npubnames; i++)
5515 {
5516 if (i > 0)
5517 appendPQExpBufferStr(publications, ", ");
5518
5519 appendPQExpBufferStr(publications, fmtId(pubnames[i]));
5520 }
5521
5522 appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
5523 if (subinfo->subslotname)
5524 appendStringLiteralAH(query, subinfo->subslotname, fout);
5525 else
5526 appendPQExpBufferStr(query, "NONE");
5527
5528 if (subinfo->subbinary)
5529 appendPQExpBufferStr(query, ", binary = true");
5530
5531 if (subinfo->substream == LOGICALREP_STREAM_ON)
5532 appendPQExpBufferStr(query, ", streaming = on");
5533 else if (subinfo->substream == LOGICALREP_STREAM_PARALLEL)
5534 appendPQExpBufferStr(query, ", streaming = parallel");
5535 else
5536 appendPQExpBufferStr(query, ", streaming = off");
5537
5539 appendPQExpBufferStr(query, ", two_phase = on");
5540
5541 if (subinfo->subdisableonerr)
5542 appendPQExpBufferStr(query, ", disable_on_error = true");
5543
5544 if (!subinfo->subpasswordrequired)
5545 appendPQExpBufferStr(query, ", password_required = false");
5546
5547 if (subinfo->subrunasowner)
5548 appendPQExpBufferStr(query, ", run_as_owner = true");
5549
5550 if (subinfo->subfailover)
5551 appendPQExpBufferStr(query, ", failover = true");
5552
5553 if (subinfo->subretaindeadtuples)
5554 appendPQExpBufferStr(query, ", retain_dead_tuples = true");
5555
5556 if (subinfo->submaxretention)
5557 appendPQExpBuffer(query, ", max_retention_duration = %d", subinfo->submaxretention);
5558
5559 if (strcmp(subinfo->subsynccommit, "off") != 0)
5560 appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
5561
5562 if (strcmp(subinfo->subwalrcvtimeout, "-1") != 0)
5563 appendPQExpBuffer(query, ", wal_receiver_timeout = %s", fmtId(subinfo->subwalrcvtimeout));
5564
5565 if (pg_strcasecmp(subinfo->suborigin, LOGICALREP_ORIGIN_ANY) != 0)
5566 appendPQExpBuffer(query, ", origin = %s", subinfo->suborigin);
5567
5568 appendPQExpBufferStr(query, ");\n");
5569
5570 /*
5571 * In binary-upgrade mode, we allow the replication to continue after the
5572 * upgrade.
5573 */
5574 if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
5575 {
5576 if (subinfo->suboriginremotelsn)
5577 {
5578 /*
5579 * Preserve the remote_lsn for the subscriber's replication
5580 * origin. This value is required to start the replication from
5581 * the position before the upgrade. This value will be stale if
5582 * the publisher gets upgraded before the subscriber node.
5583 * However, this shouldn't be a problem as the upgrade of the
5584 * publisher ensures that all the transactions were replicated
5585 * before upgrading it.
5586 */
5588 "\n-- For binary upgrade, must preserve the remote_lsn for the subscriber's replication origin.\n");
5590 "SELECT pg_catalog.binary_upgrade_replorigin_advance(");
5591 appendStringLiteralAH(query, subinfo->dobj.name, fout);
5592 appendPQExpBuffer(query, ", '%s');\n", subinfo->suboriginremotelsn);
5593 }
5594
5595 if (subinfo->subenabled)
5596 {
5597 /*
5598 * Enable the subscription to allow the replication to continue
5599 * after the upgrade.
5600 */
5602 "\n-- For binary upgrade, must preserve the subscriber's running state.\n");
5603 appendPQExpBuffer(query, "ALTER SUBSCRIPTION %s ENABLE;\n", qsubname);
5604 }
5605 }
5606
5607 if (subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5608 ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
5609 ARCHIVE_OPTS(.tag = subinfo->dobj.name,
5610 .owner = subinfo->rolname,
5611 .description = "SUBSCRIPTION",
5612 .section = SECTION_POST_DATA,
5613 .createStmt = query->data,
5614 .dropStmt = delq->data));
5615
5616 if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
5617 dumpComment(fout, "SUBSCRIPTION", qsubname,
5618 NULL, subinfo->rolname,
5619 subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
5620
5621 if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
5622 dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
5623 NULL, subinfo->rolname,
5624 subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
5625
5626 destroyPQExpBuffer(publications);
5627 free(pubnames);
5628
5630 destroyPQExpBuffer(query);
5631 free(qsubname);
5632}
5633
5634/*
5635 * Given a "create query", append as many ALTER ... DEPENDS ON EXTENSION as
5636 * the object needs.
5637 */
5638static void
5640 PQExpBuffer create,
5641 const DumpableObject *dobj,
5642 const char *catalog,
5643 const char *keyword,
5644 const char *objname)
5645{
5646 if (dobj->depends_on_ext)
5647 {
5648 char *nm;
5649 PGresult *res;
5650 PQExpBuffer query;
5651 int ntups;
5652 int i_extname;
5653 int i;
5654
5655 /* dodge fmtId() non-reentrancy */
5656 nm = pg_strdup(objname);
5657
5658 query = createPQExpBuffer();
5659 appendPQExpBuffer(query,
5660 "SELECT e.extname "
5661 "FROM pg_catalog.pg_depend d, pg_catalog.pg_extension e "
5662 "WHERE d.refobjid = e.oid AND classid = '%s'::pg_catalog.regclass "
5663 "AND objid = '%u'::pg_catalog.oid AND deptype = 'x' "
5664 "AND refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass",
5665 catalog,
5666 dobj->catId.oid);
5667 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5668 ntups = PQntuples(res);
5669 i_extname = PQfnumber(res, "extname");
5670 for (i = 0; i < ntups; i++)
5671 {
5672 appendPQExpBuffer(create, "\nALTER %s %s DEPENDS ON EXTENSION %s;",
5673 keyword, nm,
5674 fmtId(PQgetvalue(res, i, i_extname)));
5675 }
5676
5677 PQclear(res);
5678 destroyPQExpBuffer(query);
5679 pg_free(nm);
5680 }
5681}
5682
5683static Oid
5685{
5686 /*
5687 * If the old version didn't assign an array type, but the new version
5688 * does, we must select an unused type OID to assign. This currently only
5689 * happens for domains, when upgrading pre-v11 to v11 and up.
5690 *
5691 * Note: local state here is kind of ugly, but we must have some, since we
5692 * mustn't choose the same unused OID more than once.
5693 */
5695 PGresult *res;
5696 bool is_dup;
5697
5698 do
5699 {
5702 "SELECT EXISTS(SELECT 1 "
5703 "FROM pg_catalog.pg_type "
5704 "WHERE oid = '%u'::pg_catalog.oid);",
5707 is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
5708 PQclear(res);
5709 } while (is_dup);
5710
5712}
5713
5714static void
5718 bool force_array_type,
5720{
5722 PGresult *res;
5726 TypeInfo *tinfo;
5727
5728 appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
5730 "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5731 pg_type_oid);
5732
5734 if (tinfo)
5735 pg_type_array_oid = tinfo->typarray;
5736 else
5738
5741
5743 {
5745 "\n-- For binary upgrade, must preserve pg_type array oid\n");
5747 "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5749 }
5750
5751 /*
5752 * Pre-set the multirange type oid and its own array type oid.
5753 */
5755 {
5756 if (fout->remoteVersion >= 140000)
5757 {
5759 "SELECT t.oid, t.typarray "
5760 "FROM pg_catalog.pg_type t "
5761 "JOIN pg_catalog.pg_range r "
5762 "ON t.oid = r.rngmultitypid "
5763 "WHERE r.rngtypid = '%u'::pg_catalog.oid;",
5764 pg_type_oid);
5765
5767
5768 pg_type_multirange_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "oid")));
5769 pg_type_multirange_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
5770
5771 PQclear(res);
5772 }
5773 else
5774 {
5777 }
5778
5780 "\n-- For binary upgrade, must preserve multirange pg_type oid\n");
5782 "SELECT pg_catalog.binary_upgrade_set_next_multirange_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5785 "\n-- For binary upgrade, must preserve multirange pg_type array oid\n");
5787 "SELECT pg_catalog.binary_upgrade_set_next_multirange_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5789 }
5790
5792}
5793
5794static void
5805
5806/*
5807 * bsearch() comparator for BinaryUpgradeClassOidItem
5808 */
5809static int
5810BinaryUpgradeClassOidItemCmp(const void *p1, const void *p2)
5811{
5814
5815 return pg_cmp_u32(v1.oid, v2.oid);
5816}
5817
5818/*
5819 * collectBinaryUpgradeClassOids
5820 *
5821 * Construct a table of pg_class information required for
5822 * binary_upgrade_set_pg_class_oids(). The table is sorted by OID for speed in
5823 * lookup.
5824 */
5825static void
5827{
5828 PGresult *res;
5829 const char *query;
5830
5831 query = "SELECT c.oid, c.relkind, c.relfilenode, c.reltoastrelid, "
5832 "ct.relfilenode, i.indexrelid, cti.relfilenode "
5833 "FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_index i "
5834 "ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
5835 "LEFT JOIN pg_catalog.pg_class ct ON (c.reltoastrelid = ct.oid) "
5836 "LEFT JOIN pg_catalog.pg_class AS cti ON (i.indexrelid = cti.oid) "
5837 "ORDER BY c.oid;";
5838
5839 res = ExecuteSqlQuery(fout, query, PGRES_TUPLES_OK);
5840
5844
5845 for (int i = 0; i < nbinaryUpgradeClassOids; i++)
5846 {
5854 }
5855
5856 PQclear(res);
5857}
5858
5859static void
5862{
5863 BinaryUpgradeClassOidItem key = {0};
5865
5867
5868 /*
5869 * Preserve the OID and relfilenumber of the table, table's index, table's
5870 * toast table and toast table's index if any.
5871 *
5872 * One complexity is that the current table definition might not require
5873 * the creation of a TOAST table, but the old database might have a TOAST
5874 * table that was created earlier, before some wide columns were dropped.
5875 * By setting the TOAST oid we force creation of the TOAST heap and index
5876 * by the new backend, so we can copy the files during binary upgrade
5877 * without worrying about this case.
5878 */
5879 key.oid = pg_class_oid;
5883
5885 "\n-- For binary upgrade, must preserve pg_class oids and relfilenodes\n");
5886
5887 if (entry->relkind != RELKIND_INDEX &&
5889 {
5891 "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
5892 pg_class_oid);
5893
5894 /*
5895 * Not every relation has storage. Also, in a pre-v12 database,
5896 * partitioned tables have a relfilenumber, which should not be
5897 * preserved when upgrading.
5898 */
5899 if (RelFileNumberIsValid(entry->relfilenumber) &&
5902 "SELECT pg_catalog.binary_upgrade_set_next_heap_relfilenode('%u'::pg_catalog.oid);\n",
5903 entry->relfilenumber);
5904
5905 /*
5906 * In a pre-v12 database, partitioned tables might be marked as having
5907 * toast tables, but we should ignore them if so.
5908 */
5909 if (OidIsValid(entry->toast_oid) &&
5911 {
5913 "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
5914 entry->toast_oid);
5916 "SELECT pg_catalog.binary_upgrade_set_next_toast_relfilenode('%u'::pg_catalog.oid);\n",
5917 entry->toast_relfilenumber);
5918
5919 /* every toast table has an index */
5921 "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
5922 entry->toast_index_oid);
5924 "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
5926 }
5927 }
5928 else
5929 {
5930 /* Preserve the OID and relfilenumber of the index */
5932 "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
5933 pg_class_oid);
5935 "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
5936 entry->relfilenumber);
5937 }
5938
5940}
5941
5942/*
5943 * If the DumpableObject is a member of an extension, add a suitable
5944 * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
5945 *
5946 * For somewhat historical reasons, objname should already be quoted,
5947 * but not objnamespace (if any).
5948 */
5949static void
5951 const DumpableObject *dobj,
5952 const char *objtype,
5953 const char *objname,
5954 const char *objnamespace)
5955{
5957 int i;
5958
5959 if (!dobj->ext_member)
5960 return;
5961
5962 /*
5963 * Find the parent extension. We could avoid this search if we wanted to
5964 * add a link field to DumpableObject, but the space costs of that would
5965 * be considerable. We assume that member objects could only have a
5966 * direct dependency on their own extension, not any others.
5967 */
5968 for (i = 0; i < dobj->nDeps; i++)
5969 {
5971 if (extobj && extobj->objType == DO_EXTENSION)
5972 break;
5973 extobj = NULL;
5974 }
5975 if (extobj == NULL)
5976 pg_fatal("could not find parent extension for %s %s",
5977 objtype, objname);
5978
5980 "\n-- For binary upgrade, handle extension membership the hard way\n");
5981 appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
5982 fmtId(extobj->name),
5983 objtype);
5984 if (objnamespace && *objnamespace)
5986 appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
5987}
5988
5989/*
5990 * getNamespaces:
5991 * get information about all namespaces in the system catalogs
5992 */
5993void
5995{
5996 PGresult *res;
5997 int ntups;
5998 int i;
5999 PQExpBuffer query;
6001 int i_tableoid;
6002 int i_oid;
6003 int i_nspname;
6004 int i_nspowner;
6005 int i_nspacl;
6006 int i_acldefault;
6007
6008 query = createPQExpBuffer();
6009
6010 /*
6011 * we fetch all namespaces including system ones, so that every object we
6012 * read in can be linked to a containing namespace.
6013 */
6014 appendPQExpBufferStr(query, "SELECT n.tableoid, n.oid, n.nspname, "
6015 "n.nspowner, "
6016 "n.nspacl, "
6017 "acldefault('n', n.nspowner) AS acldefault "
6018 "FROM pg_namespace n");
6019
6020 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6021
6022 ntups = PQntuples(res);
6023
6025
6026 i_tableoid = PQfnumber(res, "tableoid");
6027 i_oid = PQfnumber(res, "oid");
6028 i_nspname = PQfnumber(res, "nspname");
6029 i_nspowner = PQfnumber(res, "nspowner");
6030 i_nspacl = PQfnumber(res, "nspacl");
6031 i_acldefault = PQfnumber(res, "acldefault");
6032
6033 for (i = 0; i < ntups; i++)
6034 {
6035 const char *nspowner;
6036
6037 nsinfo[i].dobj.objType = DO_NAMESPACE;
6038 nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6039 nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6040 AssignDumpId(&nsinfo[i].dobj);
6041 nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
6042 nsinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_nspacl));
6043 nsinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
6044 nsinfo[i].dacl.privtype = 0;
6045 nsinfo[i].dacl.initprivs = NULL;
6046 nspowner = PQgetvalue(res, i, i_nspowner);
6047 nsinfo[i].nspowner = atooid(nspowner);
6048 nsinfo[i].rolname = getRoleName(nspowner);
6049
6050 /* Decide whether to dump this namespace */
6052
6053 /* Mark whether namespace has an ACL */
6054 if (!PQgetisnull(res, i, i_nspacl))
6055 nsinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
6056
6057 /*
6058 * We ignore any pg_init_privs.initprivs entry for the public schema
6059 * and assume a predetermined default, for several reasons. First,
6060 * dropping and recreating the schema removes its pg_init_privs entry,
6061 * but an empty destination database starts with this ACL nonetheless.
6062 * Second, we support dump/reload of public schema ownership changes.
6063 * ALTER SCHEMA OWNER filters nspacl through aclnewowner(), but
6064 * initprivs continues to reflect the initial owner. Hence,
6065 * synthesize the value that nspacl will have after the restore's
6066 * ALTER SCHEMA OWNER. Third, this makes the destination database
6067 * match the source's ACL, even if the latter was an initdb-default
6068 * ACL, which changed in v15. An upgrade pulls in changes to most
6069 * system object ACLs that the DBA had not customized. We've made the
6070 * public schema depart from that, because changing its ACL so easily
6071 * breaks applications.
6072 */
6073 if (strcmp(nsinfo[i].dobj.name, "public") == 0)
6074 {
6077
6078 /* Standard ACL as of v15 is {owner=UC/owner,=U/owner} */
6089
6090 nsinfo[i].dacl.privtype = 'i';
6091 nsinfo[i].dacl.initprivs = pstrdup(aclarray->data);
6092 nsinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
6093
6096 }
6097 }
6098
6099 PQclear(res);
6100 destroyPQExpBuffer(query);
6101}
6102
6103/*
6104 * findNamespace:
6105 * given a namespace OID, look up the info read by getNamespaces
6106 */
6107static NamespaceInfo *
6109{
6111
6113 if (nsinfo == NULL)
6114 pg_fatal("schema with OID %u does not exist", nsoid);
6115 return nsinfo;
6116}
6117
6118/*
6119 * getExtensions:
6120 * read all extensions in the system catalogs and return them in the
6121 * ExtensionInfo* structure
6122 *
6123 * numExtensions is set to the number of extensions read in
6124 */
6127{
6128 DumpOptions *dopt = fout->dopt;
6129 PGresult *res;
6130 int ntups;
6131 int i;
6132 PQExpBuffer query;
6134 int i_tableoid;
6135 int i_oid;
6136 int i_extname;
6137 int i_nspname;
6138 int i_extrelocatable;
6139 int i_extversion;
6140 int i_extconfig;
6141 int i_extcondition;
6142
6143 query = createPQExpBuffer();
6144
6145 appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
6146 "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
6147 "FROM pg_extension x "
6148 "JOIN pg_namespace n ON n.oid = x.extnamespace");
6149
6150 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6151
6152 ntups = PQntuples(res);
6153 if (ntups == 0)
6154 goto cleanup;
6155
6157
6158 i_tableoid = PQfnumber(res, "tableoid");
6159 i_oid = PQfnumber(res, "oid");
6160 i_extname = PQfnumber(res, "extname");
6161 i_nspname = PQfnumber(res, "nspname");
6162 i_extrelocatable = PQfnumber(res, "extrelocatable");
6163 i_extversion = PQfnumber(res, "extversion");
6164 i_extconfig = PQfnumber(res, "extconfig");
6165 i_extcondition = PQfnumber(res, "extcondition");
6166
6167 for (i = 0; i < ntups; i++)
6168 {
6169 extinfo[i].dobj.objType = DO_EXTENSION;
6170 extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6171 extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6172 AssignDumpId(&extinfo[i].dobj);
6173 extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
6174 extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
6175 extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
6176 extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
6177 extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
6178 extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
6179
6180 /* Decide whether we want to dump it */
6182 }
6183
6184cleanup:
6185 PQclear(res);
6186 destroyPQExpBuffer(query);
6187
6188 *numExtensions = ntups;
6189
6190 return extinfo;
6191}
6192
6193/*
6194 * getTypes:
6195 * get information about all types in the system catalogs
6196 *
6197 * NB: this must run after getFuncs() because we assume we can do
6198 * findFuncByOid().
6199 */
6200void
6202{
6203 PGresult *res;
6204 int ntups;
6205 int i;
6209 int i_tableoid;
6210 int i_oid;
6211 int i_typname;
6212 int i_typnamespace;
6213 int i_typacl;
6214 int i_acldefault;
6215 int i_typowner;
6216 int i_typelem;
6217 int i_typrelid;
6218 int i_typrelkind;
6219 int i_typtype;
6220 int i_typisdefined;
6221 int i_isarray;
6222 int i_typarray;
6223
6224 /*
6225 * we include even the built-in types because those may be used as array
6226 * elements by user-defined types
6227 *
6228 * we filter out the built-in types when we dump out the types
6229 *
6230 * same approach for undefined (shell) types and array types
6231 *
6232 * Note: as of 8.3 we can reliably detect whether a type is an
6233 * auto-generated array type by checking the element type's typarray.
6234 * (Before that the test is capable of generating false positives.) We
6235 * still check for name beginning with '_', though, so as to avoid the
6236 * cost of the subselect probe for all standard types. This would have to
6237 * be revisited if the backend ever allows renaming of array types.
6238 */
6239 appendPQExpBufferStr(query, "SELECT tableoid, oid, typname, "
6240 "typnamespace, typacl, "
6241 "acldefault('T', typowner) AS acldefault, "
6242 "typowner, "
6243 "typelem, typrelid, typarray, "
6244 "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
6245 "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
6246 "typtype, typisdefined, "
6247 "typname[0] = '_' AND typelem != 0 AND "
6248 "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
6249 "FROM pg_type");
6250
6251 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6252
6253 ntups = PQntuples(res);
6254
6256
6257 i_tableoid = PQfnumber(res, "tableoid");
6258 i_oid = PQfnumber(res, "oid");
6259 i_typname = PQfnumber(res, "typname");
6260 i_typnamespace = PQfnumber(res, "typnamespace");
6261 i_typacl = PQfnumber(res, "typacl");
6262 i_acldefault = PQfnumber(res, "acldefault");
6263 i_typowner = PQfnumber(res, "typowner");
6264 i_typelem = PQfnumber(res, "typelem");
6265 i_typrelid = PQfnumber(res, "typrelid");
6266 i_typrelkind = PQfnumber(res, "typrelkind");
6267 i_typtype = PQfnumber(res, "typtype");
6268 i_typisdefined = PQfnumber(res, "typisdefined");
6269 i_isarray = PQfnumber(res, "isarray");
6270 i_typarray = PQfnumber(res, "typarray");
6271
6272 for (i = 0; i < ntups; i++)
6273 {
6274 tyinfo[i].dobj.objType = DO_TYPE;
6275 tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6276 tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6277 AssignDumpId(&tyinfo[i].dobj);
6278 tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
6279 tyinfo[i].dobj.namespace =
6281 tyinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_typacl));
6282 tyinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
6283 tyinfo[i].dacl.privtype = 0;
6284 tyinfo[i].dacl.initprivs = NULL;
6285 tyinfo[i].ftypname = NULL; /* may get filled later */
6286 tyinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_typowner));
6287 tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
6288 tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
6289 tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
6290 tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
6291 tyinfo[i].shellType = NULL;
6292
6293 if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
6294 tyinfo[i].isDefined = true;
6295 else
6296 tyinfo[i].isDefined = false;
6297
6298 if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
6299 tyinfo[i].isArray = true;
6300 else
6301 tyinfo[i].isArray = false;
6302
6303 tyinfo[i].typarray = atooid(PQgetvalue(res, i, i_typarray));
6304
6305 if (tyinfo[i].typtype == TYPTYPE_MULTIRANGE)
6306 tyinfo[i].isMultirange = true;
6307 else
6308 tyinfo[i].isMultirange = false;
6309
6310 /* Decide whether we want to dump it */
6312
6313 /* Mark whether type has an ACL */
6314 if (!PQgetisnull(res, i, i_typacl))
6315 tyinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
6316
6317 /*
6318 * If it's a domain, fetch info about its constraints, if any
6319 */
6320 tyinfo[i].nDomChecks = 0;
6321 tyinfo[i].domChecks = NULL;
6322 tyinfo[i].notnull = NULL;
6323 if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
6324 tyinfo[i].typtype == TYPTYPE_DOMAIN)
6326
6327 /*
6328 * If it's a base type, make a DumpableObject representing a shell
6329 * definition of the type. We will need to dump that ahead of the I/O
6330 * functions for the type. Similarly, range types need a shell
6331 * definition in case they have a canonicalize function.
6332 *
6333 * Note: the shell type doesn't have a catId. You might think it
6334 * should copy the base type's catId, but then it might capture the
6335 * pg_depend entries for the type, which we don't want.
6336 */
6337 if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
6338 (tyinfo[i].typtype == TYPTYPE_BASE ||
6339 tyinfo[i].typtype == TYPTYPE_RANGE))
6340 {
6342 stinfo->dobj.objType = DO_SHELL_TYPE;
6343 stinfo->dobj.catId = nilCatalogId;
6344 AssignDumpId(&stinfo->dobj);
6345 stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
6346 stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
6347 stinfo->baseType = &(tyinfo[i]);
6348 tyinfo[i].shellType = stinfo;
6349
6350 /*
6351 * Initially mark the shell type as not to be dumped. We'll only
6352 * dump it if the I/O or canonicalize functions need to be dumped;
6353 * this is taken care of while sorting dependencies.
6354 */
6355 stinfo->dobj.dump = DUMP_COMPONENT_NONE;
6356 }
6357 }
6358
6359 PQclear(res);
6360
6361 destroyPQExpBuffer(query);
6362}
6363
6364/*
6365 * getOperators:
6366 * get information about all operators in the system catalogs
6367 */
6368void
6370{
6371 PGresult *res;
6372 int ntups;
6373 int i;
6376 int i_tableoid;
6377 int i_oid;
6378 int i_oprname;
6379 int i_oprnamespace;
6380 int i_oprowner;
6381 int i_oprkind;
6382 int i_oprleft;
6383 int i_oprright;
6384 int i_oprcode;
6385
6386 /*
6387 * find all operators, including builtin operators; we filter out
6388 * system-defined operators at dump-out time.
6389 */
6390
6391 appendPQExpBufferStr(query, "SELECT tableoid, oid, oprname, "
6392 "oprnamespace, "
6393 "oprowner, "
6394 "oprkind, "
6395 "oprleft, "
6396 "oprright, "
6397 "oprcode::oid AS oprcode "
6398 "FROM pg_operator");
6399
6400 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6401
6402 ntups = PQntuples(res);
6403
6405
6406 i_tableoid = PQfnumber(res, "tableoid");
6407 i_oid = PQfnumber(res, "oid");
6408 i_oprname = PQfnumber(res, "oprname");
6409 i_oprnamespace = PQfnumber(res, "oprnamespace");
6410 i_oprowner = PQfnumber(res, "oprowner");
6411 i_oprkind = PQfnumber(res, "oprkind");
6412 i_oprleft = PQfnumber(res, "oprleft");
6413 i_oprright = PQfnumber(res, "oprright");
6414 i_oprcode = PQfnumber(res, "oprcode");
6415
6416 for (i = 0; i < ntups; i++)
6417 {
6418 oprinfo[i].dobj.objType = DO_OPERATOR;
6419 oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6420 oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6421 AssignDumpId(&oprinfo[i].dobj);
6422 oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
6423 oprinfo[i].dobj.namespace =
6425 oprinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_oprowner));
6426 oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
6427 oprinfo[i].oprleft = atooid(PQgetvalue(res, i, i_oprleft));
6428 oprinfo[i].oprright = atooid(PQgetvalue(res, i, i_oprright));
6429 oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
6430
6431 /* Decide whether we want to dump it */
6433 }
6434
6435 PQclear(res);
6436
6437 destroyPQExpBuffer(query);
6438}
6439
6440/*
6441 * getCollations:
6442 * get information about all collations in the system catalogs
6443 */
6444void
6446{
6447 PGresult *res;
6448 int ntups;
6449 int i;
6450 PQExpBuffer query;
6452 int i_tableoid;
6453 int i_oid;
6454 int i_collname;
6455 int i_collnamespace;
6456 int i_collowner;
6457 int i_collencoding;
6458
6459 query = createPQExpBuffer();
6460
6461 /*
6462 * find all collations, including builtin collations; we filter out
6463 * system-defined collations at dump-out time.
6464 */
6465
6466 appendPQExpBufferStr(query, "SELECT tableoid, oid, collname, "
6467 "collnamespace, "
6468 "collowner, "
6469 "collencoding "
6470 "FROM pg_collation");
6471
6472 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6473
6474 ntups = PQntuples(res);
6475
6477
6478 i_tableoid = PQfnumber(res, "tableoid");
6479 i_oid = PQfnumber(res, "oid");
6480 i_collname = PQfnumber(res, "collname");
6481 i_collnamespace = PQfnumber(res, "collnamespace");
6482 i_collowner = PQfnumber(res, "collowner");
6483 i_collencoding = PQfnumber(res, "collencoding");
6484
6485 for (i = 0; i < ntups; i++)
6486 {
6487 collinfo[i].dobj.objType = DO_COLLATION;
6488 collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6489 collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6490 AssignDumpId(&collinfo[i].dobj);
6491 collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
6492 collinfo[i].dobj.namespace =
6494 collinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_collowner));
6495 collinfo[i].collencoding = atoi(PQgetvalue(res, i, i_collencoding));
6496
6497 /* Decide whether we want to dump it */
6499 }
6500
6501 PQclear(res);
6502
6503 destroyPQExpBuffer(query);
6504}
6505
6506/*
6507 * getConversions:
6508 * get information about all conversions in the system catalogs
6509 */
6510void
6512{
6513 PGresult *res;
6514 int ntups;
6515 int i;
6516 PQExpBuffer query;
6518 int i_tableoid;
6519 int i_oid;
6520 int i_conname;
6521 int i_connamespace;
6522 int i_conowner;
6523
6524 query = createPQExpBuffer();
6525
6526 /*
6527 * find all conversions, including builtin conversions; we filter out
6528 * system-defined conversions at dump-out time.
6529 */
6530
6531 appendPQExpBufferStr(query, "SELECT tableoid, oid, conname, "
6532 "connamespace, "
6533 "conowner "
6534 "FROM pg_conversion");
6535
6536 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6537
6538 ntups = PQntuples(res);
6539
6541
6542 i_tableoid = PQfnumber(res, "tableoid");
6543 i_oid = PQfnumber(res, "oid");
6544 i_conname = PQfnumber(res, "conname");
6545 i_connamespace = PQfnumber(res, "connamespace");
6546 i_conowner = PQfnumber(res, "conowner");
6547
6548 for (i = 0; i < ntups; i++)
6549 {
6550 convinfo[i].dobj.objType = DO_CONVERSION;
6551 convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6552 convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6553 AssignDumpId(&convinfo[i].dobj);
6554 convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
6555 convinfo[i].dobj.namespace =
6557 convinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_conowner));
6558
6559 /* Decide whether we want to dump it */
6561 }
6562
6563 PQclear(res);
6564
6565 destroyPQExpBuffer(query);
6566}
6567
6568/*
6569 * getAccessMethods:
6570 * get information about all user-defined access methods
6571 */
6572void
6574{
6575 PGresult *res;
6576 int ntups;
6577 int i;
6578 PQExpBuffer query;
6580 int i_tableoid;
6581 int i_oid;
6582 int i_amname;
6583 int i_amhandler;
6584 int i_amtype;
6585
6586 query = createPQExpBuffer();
6587
6588 /*
6589 * Select all access methods from pg_am table. v9.6 introduced CREATE
6590 * ACCESS METHOD, so earlier versions usually have only built-in access
6591 * methods. v9.6 also changed the access method API, replacing dozens of
6592 * pg_am columns with amhandler. Even if a user created an access method
6593 * by "INSERT INTO pg_am", we have no way to translate pre-v9.6 pg_am
6594 * columns to a v9.6+ CREATE ACCESS METHOD. Hence, before v9.6, read
6595 * pg_am just to facilitate findAccessMethodByOid() providing the
6596 * OID-to-name mapping.
6597 */
6598 appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, ");
6599 if (fout->remoteVersion >= 90600)
6601 "amtype, "
6602 "amhandler::pg_catalog.regproc AS amhandler ");
6603 else
6605 "'i'::pg_catalog.\"char\" AS amtype, "
6606 "'-'::pg_catalog.regproc AS amhandler ");
6607 appendPQExpBufferStr(query, "FROM pg_am");
6608
6609 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6610
6611 ntups = PQntuples(res);
6612
6614
6615 i_tableoid = PQfnumber(res, "tableoid");
6616 i_oid = PQfnumber(res, "oid");
6617 i_amname = PQfnumber(res, "amname");
6618 i_amhandler = PQfnumber(res, "amhandler");
6619 i_amtype = PQfnumber(res, "amtype");
6620
6621 for (i = 0; i < ntups; i++)
6622 {
6623 aminfo[i].dobj.objType = DO_ACCESS_METHOD;
6624 aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6625 aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6626 AssignDumpId(&aminfo[i].dobj);
6627 aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
6628 aminfo[i].dobj.namespace = NULL;
6629 aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
6630 aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
6631
6632 /* Decide whether we want to dump it */
6634 }
6635
6636 PQclear(res);
6637
6638 destroyPQExpBuffer(query);
6639}
6640
6641
6642/*
6643 * getOpclasses:
6644 * get information about all opclasses in the system catalogs
6645 */
6646void
6648{
6649 PGresult *res;
6650 int ntups;
6651 int i;
6654 int i_tableoid;
6655 int i_oid;
6656 int i_opcmethod;
6657 int i_opcname;
6658 int i_opcnamespace;
6659 int i_opcowner;
6660
6661 /*
6662 * find all opclasses, including builtin opclasses; we filter out
6663 * system-defined opclasses at dump-out time.
6664 */
6665
6666 appendPQExpBufferStr(query, "SELECT tableoid, oid, opcmethod, opcname, "
6667 "opcnamespace, "
6668 "opcowner "
6669 "FROM pg_opclass");
6670
6671 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6672
6673 ntups = PQntuples(res);
6674
6676
6677 i_tableoid = PQfnumber(res, "tableoid");
6678 i_oid = PQfnumber(res, "oid");
6679 i_opcmethod = PQfnumber(res, "opcmethod");
6680 i_opcname = PQfnumber(res, "opcname");
6681 i_opcnamespace = PQfnumber(res, "opcnamespace");
6682 i_opcowner = PQfnumber(res, "opcowner");
6683
6684 for (i = 0; i < ntups; i++)
6685 {
6686 opcinfo[i].dobj.objType = DO_OPCLASS;
6687 opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6688 opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6689 AssignDumpId(&opcinfo[i].dobj);
6690 opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
6691 opcinfo[i].dobj.namespace =
6693 opcinfo[i].opcmethod = atooid(PQgetvalue(res, i, i_opcmethod));
6694 opcinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opcowner));
6695
6696 /* Decide whether we want to dump it */
6698 }
6699
6700 PQclear(res);
6701
6702 destroyPQExpBuffer(query);
6703}
6704
6705/*
6706 * getOpfamilies:
6707 * get information about all opfamilies in the system catalogs
6708 */
6709void
6711{
6712 PGresult *res;
6713 int ntups;
6714 int i;
6715 PQExpBuffer query;
6717 int i_tableoid;
6718 int i_oid;
6719 int i_opfmethod;
6720 int i_opfname;
6721 int i_opfnamespace;
6722 int i_opfowner;
6723
6724 query = createPQExpBuffer();
6725
6726 /*
6727 * find all opfamilies, including builtin opfamilies; we filter out
6728 * system-defined opfamilies at dump-out time.
6729 */
6730
6731 appendPQExpBufferStr(query, "SELECT tableoid, oid, opfmethod, opfname, "
6732 "opfnamespace, "
6733 "opfowner "
6734 "FROM pg_opfamily");
6735
6736 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6737
6738 ntups = PQntuples(res);
6739
6741
6742 i_tableoid = PQfnumber(res, "tableoid");
6743 i_oid = PQfnumber(res, "oid");
6744 i_opfname = PQfnumber(res, "opfname");
6745 i_opfmethod = PQfnumber(res, "opfmethod");
6746 i_opfnamespace = PQfnumber(res, "opfnamespace");
6747 i_opfowner = PQfnumber(res, "opfowner");
6748
6749 for (i = 0; i < ntups; i++)
6750 {
6751 opfinfo[i].dobj.objType = DO_OPFAMILY;
6752 opfinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6753 opfinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6754 AssignDumpId(&opfinfo[i].dobj);
6755 opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
6756 opfinfo[i].dobj.namespace =
6758 opfinfo[i].opfmethod = atooid(PQgetvalue(res, i, i_opfmethod));
6759 opfinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opfowner));
6760
6761 /* Decide whether we want to dump it */
6763 }
6764
6765 PQclear(res);
6766
6767 destroyPQExpBuffer(query);
6768}
6769
6770/*
6771 * getAggregates:
6772 * get information about all user-defined aggregates in the system catalogs
6773 */
6774void
6776{
6777 DumpOptions *dopt = fout->dopt;
6778 PGresult *res;
6779 int ntups;
6780 int i;
6783 int i_tableoid;
6784 int i_oid;
6785 int i_aggname;
6786 int i_aggnamespace;
6787 int i_pronargs;
6788 int i_proargtypes;
6789 int i_proowner;
6790 int i_aggacl;
6791 int i_acldefault;
6792
6793 /*
6794 * Find all interesting aggregates. See comment in getFuncs() for the
6795 * rationale behind the filtering logic.
6796 */
6797 if (fout->remoteVersion >= 90600)
6798 {
6799 const char *agg_check;
6800
6801 agg_check = (fout->remoteVersion >= 110000 ? "p.prokind = 'a'"
6802 : "p.proisagg");
6803
6804 appendPQExpBuffer(query, "SELECT p.tableoid, p.oid, "
6805 "p.proname AS aggname, "
6806 "p.pronamespace AS aggnamespace, "
6807 "p.pronargs, p.proargtypes, "
6808 "p.proowner, "
6809 "p.proacl AS aggacl, "
6810 "acldefault('f', p.proowner) AS acldefault "
6811 "FROM pg_proc p "
6812 "LEFT JOIN pg_init_privs pip ON "
6813 "(p.oid = pip.objoid "
6814 "AND pip.classoid = 'pg_proc'::regclass "
6815 "AND pip.objsubid = 0) "
6816 "WHERE %s AND ("
6817 "p.pronamespace != "
6818 "(SELECT oid FROM pg_namespace "
6819 "WHERE nspname = 'pg_catalog') OR "
6820 "p.proacl IS DISTINCT FROM pip.initprivs",
6821 agg_check);
6822 if (dopt->binary_upgrade)
6824 " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
6825 "classid = 'pg_proc'::regclass AND "
6826 "objid = p.oid AND "
6827 "refclassid = 'pg_extension'::regclass AND "
6828 "deptype = 'e')");
6829 appendPQExpBufferChar(query, ')');
6830 }
6831 else
6832 {
6833 appendPQExpBufferStr(query, "SELECT tableoid, oid, proname AS aggname, "
6834 "pronamespace AS aggnamespace, "
6835 "pronargs, proargtypes, "
6836 "proowner, "
6837 "proacl AS aggacl, "
6838 "acldefault('f', proowner) AS acldefault "
6839 "FROM pg_proc p "
6840 "WHERE proisagg AND ("
6841 "pronamespace != "
6842 "(SELECT oid FROM pg_namespace "
6843 "WHERE nspname = 'pg_catalog')");
6844 if (dopt->binary_upgrade)
6846 " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
6847 "classid = 'pg_proc'::regclass AND "
6848 "objid = p.oid AND "
6849 "refclassid = 'pg_extension'::regclass AND "
6850 "deptype = 'e')");
6851 appendPQExpBufferChar(query, ')');
6852 }
6853
6854 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6855
6856 ntups = PQntuples(res);
6857
6859
6860 i_tableoid = PQfnumber(res, "tableoid");
6861 i_oid = PQfnumber(res, "oid");
6862 i_aggname = PQfnumber(res, "aggname");
6863 i_aggnamespace = PQfnumber(res, "aggnamespace");
6864 i_pronargs = PQfnumber(res, "pronargs");
6865 i_proargtypes = PQfnumber(res, "proargtypes");
6866 i_proowner = PQfnumber(res, "proowner");
6867 i_aggacl = PQfnumber(res, "aggacl");
6868 i_acldefault = PQfnumber(res, "acldefault");
6869
6870 for (i = 0; i < ntups; i++)
6871 {
6872 agginfo[i].aggfn.dobj.objType = DO_AGG;
6873 agginfo[i].aggfn.dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6874 agginfo[i].aggfn.dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6875 AssignDumpId(&agginfo[i].aggfn.dobj);
6876 agginfo[i].aggfn.dobj.name = pg_strdup(PQgetvalue(res, i, i_aggname));
6877 agginfo[i].aggfn.dobj.namespace =
6879 agginfo[i].aggfn.dacl.acl = pg_strdup(PQgetvalue(res, i, i_aggacl));
6880 agginfo[i].aggfn.dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
6881 agginfo[i].aggfn.dacl.privtype = 0;
6882 agginfo[i].aggfn.dacl.initprivs = NULL;
6883 agginfo[i].aggfn.rolname = getRoleName(PQgetvalue(res, i, i_proowner));
6884 agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */
6885 agginfo[i].aggfn.prorettype = InvalidOid; /* not saved */
6886 agginfo[i].aggfn.nargs = atoi(PQgetvalue(res, i, i_pronargs));
6887 if (agginfo[i].aggfn.nargs == 0)
6888 agginfo[i].aggfn.argtypes = NULL;
6889 else
6890 {
6891 agginfo[i].aggfn.argtypes = pg_malloc_array(Oid, agginfo[i].aggfn.nargs);
6893 agginfo[i].aggfn.argtypes,
6894 agginfo[i].aggfn.nargs);
6895 }
6896 agginfo[i].aggfn.postponed_def = false; /* might get set during sort */
6897
6898 /* Decide whether we want to dump it */
6899 selectDumpableObject(&(agginfo[i].aggfn.dobj), fout);
6900
6901 /* Mark whether aggregate has an ACL */
6902 if (!PQgetisnull(res, i, i_aggacl))
6903 agginfo[i].aggfn.dobj.components |= DUMP_COMPONENT_ACL;
6904 }
6905
6906 PQclear(res);
6907
6908 destroyPQExpBuffer(query);
6909}
6910
6911/*
6912 * getFuncs:
6913 * get information about all user-defined functions in the system catalogs
6914 */
6915void
6917{
6918 DumpOptions *dopt = fout->dopt;
6919 PGresult *res;
6920 int ntups;
6921 int i;
6923 FuncInfo *finfo;
6924 int i_tableoid;
6925 int i_oid;
6926 int i_proname;
6927 int i_pronamespace;
6928 int i_proowner;
6929 int i_prolang;
6930 int i_pronargs;
6931 int i_proargtypes;
6932 int i_prorettype;
6933 int i_proacl;
6934 int i_acldefault;
6935
6936 /*
6937 * Find all interesting functions. This is a bit complicated:
6938 *
6939 * 1. Always exclude aggregates; those are handled elsewhere.
6940 *
6941 * 2. Always exclude functions that are internally dependent on something
6942 * else, since presumably those will be created as a result of creating
6943 * the something else. This currently acts only to suppress constructor
6944 * functions for range types. Note this is OK only because the
6945 * constructors don't have any dependencies the range type doesn't have;
6946 * otherwise we might not get creation ordering correct.
6947 *
6948 * 3. Otherwise, we normally exclude functions in pg_catalog. However, if
6949 * they're members of extensions and we are in binary-upgrade mode then
6950 * include them, since we want to dump extension members individually in
6951 * that mode. Also, if they are used by casts or transforms then we need
6952 * to gather the information about them, though they won't be dumped if
6953 * they are built-in. Also, in 9.6 and up, include functions in
6954 * pg_catalog if they have an ACL different from what's shown in
6955 * pg_init_privs (so we have to join to pg_init_privs; annoying).
6956 */
6957 if (fout->remoteVersion >= 90600)
6958 {
6959 const char *not_agg_check;
6960
6961 not_agg_check = (fout->remoteVersion >= 110000 ? "p.prokind <> 'a'"
6962 : "NOT p.proisagg");
6963
6964 appendPQExpBuffer(query,
6965 "SELECT p.tableoid, p.oid, p.proname, p.prolang, "
6966 "p.pronargs, p.proargtypes, p.prorettype, "
6967 "p.proacl, "
6968 "acldefault('f', p.proowner) AS acldefault, "
6969 "p.pronamespace, "
6970 "p.proowner "
6971 "FROM pg_proc p "
6972 "LEFT JOIN pg_init_privs pip ON "
6973 "(p.oid = pip.objoid "
6974 "AND pip.classoid = 'pg_proc'::regclass "
6975 "AND pip.objsubid = 0) "
6976 "WHERE %s"
6977 "\n AND NOT EXISTS (SELECT 1 FROM pg_depend "
6978 "WHERE classid = 'pg_proc'::regclass AND "
6979 "objid = p.oid AND deptype = 'i')"
6980 "\n AND ("
6981 "\n pronamespace != "
6982 "(SELECT oid FROM pg_namespace "
6983 "WHERE nspname = 'pg_catalog')"
6984 "\n OR EXISTS (SELECT 1 FROM pg_cast"
6985 "\n WHERE pg_cast.oid > %u "
6986 "\n AND p.oid = pg_cast.castfunc)"
6987 "\n OR EXISTS (SELECT 1 FROM pg_transform"
6988 "\n WHERE pg_transform.oid > %u AND "
6989 "\n (p.oid = pg_transform.trffromsql"
6990 "\n OR p.oid = pg_transform.trftosql))",
6994 if (dopt->binary_upgrade)
6996 "\n OR EXISTS(SELECT 1 FROM pg_depend WHERE "
6997 "classid = 'pg_proc'::regclass AND "
6998 "objid = p.oid AND "
6999 "refclassid = 'pg_extension'::regclass AND "
7000 "deptype = 'e')");
7002 "\n OR p.proacl IS DISTINCT FROM pip.initprivs");
7003 appendPQExpBufferChar(query, ')');
7004 }
7005 else
7006 {
7007 appendPQExpBuffer(query,
7008 "SELECT tableoid, oid, proname, prolang, "
7009 "pronargs, proargtypes, prorettype, proacl, "
7010 "acldefault('f', proowner) AS acldefault, "
7011 "pronamespace, "
7012 "proowner "
7013 "FROM pg_proc p "
7014 "WHERE NOT proisagg"
7015 "\n AND NOT EXISTS (SELECT 1 FROM pg_depend "
7016 "WHERE classid = 'pg_proc'::regclass AND "
7017 "objid = p.oid AND deptype = 'i')"
7018 "\n AND ("
7019 "\n pronamespace != "
7020 "(SELECT oid FROM pg_namespace "
7021 "WHERE nspname = 'pg_catalog')"
7022 "\n OR EXISTS (SELECT 1 FROM pg_cast"
7023 "\n WHERE pg_cast.oid > '%u'::oid"
7024 "\n AND p.oid = pg_cast.castfunc)",
7026
7027 if (fout->remoteVersion >= 90500)
7028 appendPQExpBuffer(query,
7029 "\n OR EXISTS (SELECT 1 FROM pg_transform"
7030 "\n WHERE pg_transform.oid > '%u'::oid"
7031 "\n AND (p.oid = pg_transform.trffromsql"
7032 "\n OR p.oid = pg_transform.trftosql))",
7034
7035 if (dopt->binary_upgrade)
7037 "\n OR EXISTS(SELECT 1 FROM pg_depend WHERE "
7038 "classid = 'pg_proc'::regclass AND "
7039 "objid = p.oid AND "
7040 "refclassid = 'pg_extension'::regclass AND "
7041 "deptype = 'e')");
7042 appendPQExpBufferChar(query, ')');
7043 }
7044
7045 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7046
7047 ntups = PQntuples(res);
7048
7049 finfo = pg_malloc0_array(FuncInfo, ntups);
7050
7051 i_tableoid = PQfnumber(res, "tableoid");
7052 i_oid = PQfnumber(res, "oid");
7053 i_proname = PQfnumber(res, "proname");
7054 i_pronamespace = PQfnumber(res, "pronamespace");
7055 i_proowner = PQfnumber(res, "proowner");
7056 i_prolang = PQfnumber(res, "prolang");
7057 i_pronargs = PQfnumber(res, "pronargs");
7058 i_proargtypes = PQfnumber(res, "proargtypes");
7059 i_prorettype = PQfnumber(res, "prorettype");
7060 i_proacl = PQfnumber(res, "proacl");
7061 i_acldefault = PQfnumber(res, "acldefault");
7062
7063 for (i = 0; i < ntups; i++)
7064 {
7065 finfo[i].dobj.objType = DO_FUNC;
7066 finfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
7067 finfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
7068 AssignDumpId(&finfo[i].dobj);
7069 finfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_proname));
7070 finfo[i].dobj.namespace =
7072 finfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_proacl));
7074 finfo[i].dacl.privtype = 0;
7075 finfo[i].dacl.initprivs = NULL;
7076 finfo[i].rolname = getRoleName(PQgetvalue(res, i, i_proowner));
7077 finfo[i].lang = atooid(PQgetvalue(res, i, i_prolang));
7078 finfo[i].prorettype = atooid(PQgetvalue(res, i, i_prorettype));
7079 finfo[i].nargs = atoi(PQgetvalue(res, i, i_pronargs));
7080 if (finfo[i].nargs == 0)
7081 finfo[i].argtypes = NULL;
7082 else
7083 {
7084 finfo[i].argtypes = pg_malloc_array(Oid, finfo[i].nargs);
7086 finfo[i].argtypes, finfo[i].nargs);
7087 }
7088 finfo[i].postponed_def = false; /* might get set during sort */
7089
7090 /* Decide whether we want to dump it */
7091 selectDumpableObject(&(finfo[i].dobj), fout);
7092
7093 /* Mark whether function has an ACL */
7094 if (!PQgetisnull(res, i, i_proacl))
7096 }
7097
7098 PQclear(res);
7099
7100 destroyPQExpBuffer(query);
7101}
7102
7103/*
7104 * getRelationStatistics
7105 * register the statistics object as a dependent of the relation.
7106 *
7107 * reltuples is passed as a string to avoid complexities in converting from/to
7108 * floating point.
7109 */
7110static RelStatsInfo *
7112 char *reltuples, int32 relallvisible,
7113 int32 relallfrozen, char relkind,
7114 char **indAttNames, int nindAttNames)
7115{
7116 if (!fout->dopt->dumpStatistics)
7117 return NULL;
7118
7119 if ((relkind == RELKIND_RELATION) ||
7120 (relkind == RELKIND_PARTITIONED_TABLE) ||
7121 (relkind == RELKIND_INDEX) ||
7122 (relkind == RELKIND_PARTITIONED_INDEX) ||
7123 (relkind == RELKIND_MATVIEW ||
7124 relkind == RELKIND_FOREIGN_TABLE))
7125 {
7127 DumpableObject *dobj = &info->dobj;
7128
7129 dobj->objType = DO_REL_STATS;
7130 dobj->catId.tableoid = 0;
7131 dobj->catId.oid = 0;
7132 AssignDumpId(dobj);
7134 dobj->dependencies[0] = rel->dumpId;
7135 dobj->nDeps = 1;
7136 dobj->allocDeps = 1;
7138 dobj->name = pg_strdup(rel->name);
7139 dobj->namespace = rel->namespace;
7140 info->relpages = relpages;
7141 info->reltuples = pstrdup(reltuples);
7142 info->relallvisible = relallvisible;
7143 info->relallfrozen = relallfrozen;
7144 info->relkind = relkind;
7145 info->indAttNames = indAttNames;
7146 info->nindAttNames = nindAttNames;
7147
7148 /*
7149 * Ordinarily, stats go in SECTION_DATA for tables and
7150 * SECTION_POST_DATA for indexes.
7151 *
7152 * However, the section may be updated later for materialized view
7153 * stats. REFRESH MATERIALIZED VIEW replaces the storage and resets
7154 * the stats, so the stats must be restored after the data. Also, the
7155 * materialized view definition may be postponed to SECTION_POST_DATA
7156 * (see repairMatViewBoundaryMultiLoop()).
7157 */
7158 switch (info->relkind)
7159 {
7160 case RELKIND_RELATION:
7162 case RELKIND_MATVIEW:
7164 info->section = SECTION_DATA;
7165 break;
7166 case RELKIND_INDEX:
7168 info->section = SECTION_POST_DATA;
7169 break;
7170 default:
7171 pg_fatal("cannot dump statistics for relation kind \"%c\"",
7172 info->relkind);
7173 }
7174
7175 return info;
7176 }
7177 return NULL;
7178}
7179
7180/*
7181 * getTables
7182 * read all the tables (no indexes) in the system catalogs,
7183 * and return them as an array of TableInfo structures
7184 *
7185 * *numTables is set to the number of tables read in
7186 */
7187TableInfo *
7189{
7190 DumpOptions *dopt = fout->dopt;
7191 PGresult *res;
7192 int ntups;
7193 int i;
7195 TableInfo *tblinfo;
7196 int i_reltableoid;
7197 int i_reloid;
7198 int i_relname;
7199 int i_relnamespace;
7200 int i_relkind;
7201 int i_reltype;
7202 int i_relowner;
7203 int i_relchecks;
7204 int i_relhasindex;
7205 int i_relhasrules;
7206 int i_relpages;
7207 int i_reltuples;
7208 int i_relallvisible;
7209 int i_relallfrozen;
7210 int i_toastpages;
7211 int i_owning_tab;
7212 int i_owning_col;
7213 int i_reltablespace;
7214 int i_relhasoids;
7215 int i_relhastriggers;
7216 int i_relpersistence;
7217 int i_relispopulated;
7218 int i_relreplident;
7219 int i_relrowsec;
7220 int i_relforcerowsec;
7221 int i_relfrozenxid;
7222 int i_toastfrozenxid;
7223 int i_toastoid;
7224 int i_relminmxid;
7225 int i_toastminmxid;
7226 int i_reloptions;
7227 int i_checkoption;
7229 int i_reloftype;
7230 int i_foreignserver;
7231 int i_amname;
7233 int i_relacl;
7234 int i_acldefault;
7235 int i_ispartition;
7236
7237 /*
7238 * Find all the tables and table-like objects.
7239 *
7240 * We must fetch all tables in this phase because otherwise we cannot
7241 * correctly identify inherited columns, owned sequences, etc.
7242 *
7243 * We include system catalogs, so that we can work if a user table is
7244 * defined to inherit from a system catalog (pretty weird, but...)
7245 *
7246 * Note: in this phase we should collect only a minimal amount of
7247 * information about each table, basically just enough to decide if it is
7248 * interesting. In particular, since we do not yet have lock on any user
7249 * table, we MUST NOT invoke any server-side data collection functions
7250 * (for instance, pg_get_partkeydef()). Those are likely to fail or give
7251 * wrong answers if any concurrent DDL is happening.
7252 */
7253
7255 "SELECT c.tableoid, c.oid, c.relname, "
7256 "c.relnamespace, c.relkind, c.reltype, "
7257 "c.relowner, "
7258 "c.relchecks, "
7259 "c.relhasindex, c.relhasrules, c.relpages, "
7260 "c.reltuples, c.relallvisible, ");
7261
7262 if (fout->remoteVersion >= 180000)
7263 appendPQExpBufferStr(query, "c.relallfrozen, ");
7264 else
7265 appendPQExpBufferStr(query, "0 AS relallfrozen, ");
7266
7268 "c.relhastriggers, c.relpersistence, "
7269 "c.reloftype, "
7270 "c.relacl, "
7271 "acldefault(CASE WHEN c.relkind = " CppAsString2(RELKIND_SEQUENCE)
7272 " THEN 's'::\"char\" ELSE 'r'::\"char\" END, c.relowner) AS acldefault, "
7273 "CASE WHEN c.relkind = " CppAsString2(RELKIND_FOREIGN_TABLE) " THEN "
7274 "(SELECT ftserver FROM pg_catalog.pg_foreign_table WHERE ftrelid = c.oid) "
7275 "ELSE 0 END AS foreignserver, "
7276 "c.relfrozenxid, tc.relfrozenxid AS tfrozenxid, "
7277 "tc.oid AS toid, "
7278 "tc.relpages AS toastpages, "
7279 "tc.reloptions AS toast_reloptions, "
7280 "d.refobjid AS owning_tab, "
7281 "d.refobjsubid AS owning_col, "
7282 "tsp.spcname AS reltablespace, ");
7283
7284 if (fout->remoteVersion >= 120000)
7286 "false AS relhasoids, ");
7287 else
7289 "c.relhasoids, ");
7290
7291 if (fout->remoteVersion >= 90300)
7293 "c.relispopulated, ");
7294 else
7296 "'t' as relispopulated, ");
7297
7298 if (fout->remoteVersion >= 90400)
7300 "c.relreplident, ");
7301 else
7303 "'d' AS relreplident, ");
7304
7305 if (fout->remoteVersion >= 90500)
7307 "c.relrowsecurity, c.relforcerowsecurity, ");
7308 else
7310 "false AS relrowsecurity, "
7311 "false AS relforcerowsecurity, ");
7312
7313 if (fout->remoteVersion >= 90300)
7315 "c.relminmxid, tc.relminmxid AS tminmxid, ");
7316 else
7318 "0 AS relminmxid, 0 AS tminmxid, ");
7319
7320 if (fout->remoteVersion >= 90300)
7322 "array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded') AS reloptions, "
7323 "CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
7324 "WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, ");
7325 else
7327 "c.reloptions, NULL AS checkoption, ");
7328
7329 if (fout->remoteVersion >= 90600)
7331 "am.amname, ");
7332 else
7334 "NULL AS amname, ");
7335
7336 if (fout->remoteVersion >= 90600)
7338 "(d.deptype = 'i') IS TRUE AS is_identity_sequence, ");
7339 else
7341 "false AS is_identity_sequence, ");
7342
7343 if (fout->remoteVersion >= 100000)
7345 "c.relispartition AS ispartition ");
7346 else
7348 "false AS ispartition ");
7349
7350 /*
7351 * Left join to pg_depend to pick up dependency info linking sequences to
7352 * their owning column, if any (note this dependency is AUTO except for
7353 * identity sequences, where it's INTERNAL). Also join to pg_tablespace to
7354 * collect the spcname.
7355 */
7357 "\nFROM pg_class c\n"
7358 "LEFT JOIN pg_depend d ON "
7359 "(c.relkind = " CppAsString2(RELKIND_SEQUENCE) " AND "
7360 "d.classid = 'pg_class'::regclass AND d.objid = c.oid AND "
7361 "d.objsubid = 0 AND "
7362 "d.refclassid = 'pg_class'::regclass AND d.deptype IN ('a', 'i'))\n"
7363 "LEFT JOIN pg_tablespace tsp ON (tsp.oid = c.reltablespace)\n");
7364
7365 /*
7366 * In 9.6 and up, left join to pg_am to pick up the amname.
7367 */
7368 if (fout->remoteVersion >= 90600)
7370 "LEFT JOIN pg_am am ON (c.relam = am.oid)\n");
7371
7372 /*
7373 * We purposefully ignore toast OIDs for partitioned tables; the reason is
7374 * that versions 10 and 11 have them, but later versions do not, so
7375 * emitting them causes the upgrade to fail.
7376 */
7378 "LEFT JOIN pg_class tc ON (c.reltoastrelid = tc.oid"
7379 " AND tc.relkind = " CppAsString2(RELKIND_TOASTVALUE)
7380 " AND c.relkind <> " CppAsString2(RELKIND_PARTITIONED_TABLE) ")\n");
7381
7382 /*
7383 * Restrict to interesting relkinds (in particular, not indexes). Not all
7384 * relkinds are possible in older servers, but it's not worth the trouble
7385 * to emit a version-dependent list.
7386 *
7387 * Composite-type table entries won't be dumped as such, but we have to
7388 * make a DumpableObject for them so that we can track dependencies of the
7389 * composite type (pg_depend entries for columns of the composite type
7390 * link to the pg_class entry not the pg_type entry).
7391 */
7393 "WHERE c.relkind IN ("
7401 "ORDER BY c.oid");
7402
7403 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7404
7405 ntups = PQntuples(res);
7406
7407 *numTables = ntups;
7408
7409 /*
7410 * Extract data from result and lock dumpable tables. We do the locking
7411 * before anything else, to minimize the window wherein a table could
7412 * disappear under us.
7413 *
7414 * Note that we have to save info about all tables here, even when dumping
7415 * only one, because we don't yet know which tables might be inheritance
7416 * ancestors of the target table.
7417 */
7418 tblinfo = pg_malloc0_array(TableInfo, ntups);
7419
7420 i_reltableoid = PQfnumber(res, "tableoid");
7421 i_reloid = PQfnumber(res, "oid");
7422 i_relname = PQfnumber(res, "relname");
7423 i_relnamespace = PQfnumber(res, "relnamespace");
7424 i_relkind = PQfnumber(res, "relkind");
7425 i_reltype = PQfnumber(res, "reltype");
7426 i_relowner = PQfnumber(res, "relowner");
7427 i_relchecks = PQfnumber(res, "relchecks");
7428 i_relhasindex = PQfnumber(res, "relhasindex");
7429 i_relhasrules = PQfnumber(res, "relhasrules");
7430 i_relpages = PQfnumber(res, "relpages");
7431 i_reltuples = PQfnumber(res, "reltuples");
7432 i_relallvisible = PQfnumber(res, "relallvisible");
7433 i_relallfrozen = PQfnumber(res, "relallfrozen");
7434 i_toastpages = PQfnumber(res, "toastpages");
7435 i_owning_tab = PQfnumber(res, "owning_tab");
7436 i_owning_col = PQfnumber(res, "owning_col");
7437 i_reltablespace = PQfnumber(res, "reltablespace");
7438 i_relhasoids = PQfnumber(res, "relhasoids");
7439 i_relhastriggers = PQfnumber(res, "relhastriggers");
7440 i_relpersistence = PQfnumber(res, "relpersistence");
7441 i_relispopulated = PQfnumber(res, "relispopulated");
7442 i_relreplident = PQfnumber(res, "relreplident");
7443 i_relrowsec = PQfnumber(res, "relrowsecurity");
7444 i_relforcerowsec = PQfnumber(res, "relforcerowsecurity");
7445 i_relfrozenxid = PQfnumber(res, "relfrozenxid");
7446 i_toastfrozenxid = PQfnumber(res, "tfrozenxid");
7447 i_toastoid = PQfnumber(res, "toid");
7448 i_relminmxid = PQfnumber(res, "relminmxid");
7449 i_toastminmxid = PQfnumber(res, "tminmxid");
7450 i_reloptions = PQfnumber(res, "reloptions");
7451 i_checkoption = PQfnumber(res, "checkoption");
7452 i_toastreloptions = PQfnumber(res, "toast_reloptions");
7453 i_reloftype = PQfnumber(res, "reloftype");
7454 i_foreignserver = PQfnumber(res, "foreignserver");
7455 i_amname = PQfnumber(res, "amname");
7456 i_is_identity_sequence = PQfnumber(res, "is_identity_sequence");
7457 i_relacl = PQfnumber(res, "relacl");
7458 i_acldefault = PQfnumber(res, "acldefault");
7459 i_ispartition = PQfnumber(res, "ispartition");
7460
7461 if (dopt->lockWaitTimeout)
7462 {
7463 /*
7464 * Arrange to fail instead of waiting forever for a table lock.
7465 *
7466 * NB: this coding assumes that the only queries issued within the
7467 * following loop are LOCK TABLEs; else the timeout may be undesirably
7468 * applied to other things too.
7469 */
7470 resetPQExpBuffer(query);
7471 appendPQExpBufferStr(query, "SET statement_timeout = ");
7473 ExecuteSqlStatement(fout, query->data);
7474 }
7475
7476 resetPQExpBuffer(query);
7477
7478 for (i = 0; i < ntups; i++)
7479 {
7480 int32 relallvisible = atoi(PQgetvalue(res, i, i_relallvisible));
7481 int32 relallfrozen = atoi(PQgetvalue(res, i, i_relallfrozen));
7482
7483 tblinfo[i].dobj.objType = DO_TABLE;
7484 tblinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_reltableoid));
7485 tblinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_reloid));
7486 AssignDumpId(&tblinfo[i].dobj);
7487 tblinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_relname));
7488 tblinfo[i].dobj.namespace =
7490 tblinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_relacl));
7491 tblinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
7492 tblinfo[i].dacl.privtype = 0;
7493 tblinfo[i].dacl.initprivs = NULL;
7494 tblinfo[i].relkind = *(PQgetvalue(res, i, i_relkind));
7495 tblinfo[i].reltype = atooid(PQgetvalue(res, i, i_reltype));
7496 tblinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_relowner));
7497 tblinfo[i].ncheck = atoi(PQgetvalue(res, i, i_relchecks));
7498 tblinfo[i].hasindex = (strcmp(PQgetvalue(res, i, i_relhasindex), "t") == 0);
7499 tblinfo[i].hasrules = (strcmp(PQgetvalue(res, i, i_relhasrules), "t") == 0);
7500 tblinfo[i].relpages = atoi(PQgetvalue(res, i, i_relpages));
7501 if (PQgetisnull(res, i, i_toastpages))
7502 tblinfo[i].toastpages = 0;
7503 else
7504 tblinfo[i].toastpages = atoi(PQgetvalue(res, i, i_toastpages));
7505 if (PQgetisnull(res, i, i_owning_tab))
7506 {
7507 tblinfo[i].owning_tab = InvalidOid;
7508 tblinfo[i].owning_col = 0;
7509 }
7510 else
7511 {
7512 tblinfo[i].owning_tab = atooid(PQgetvalue(res, i, i_owning_tab));
7513 tblinfo[i].owning_col = atoi(PQgetvalue(res, i, i_owning_col));
7514 }
7516 tblinfo[i].hasoids = (strcmp(PQgetvalue(res, i, i_relhasoids), "t") == 0);
7517 tblinfo[i].hastriggers = (strcmp(PQgetvalue(res, i, i_relhastriggers), "t") == 0);
7518 tblinfo[i].relpersistence = *(PQgetvalue(res, i, i_relpersistence));
7519 tblinfo[i].relispopulated = (strcmp(PQgetvalue(res, i, i_relispopulated), "t") == 0);
7520 tblinfo[i].relreplident = *(PQgetvalue(res, i, i_relreplident));
7521 tblinfo[i].rowsec = (strcmp(PQgetvalue(res, i, i_relrowsec), "t") == 0);
7522 tblinfo[i].forcerowsec = (strcmp(PQgetvalue(res, i, i_relforcerowsec), "t") == 0);
7523 tblinfo[i].frozenxid = atooid(PQgetvalue(res, i, i_relfrozenxid));
7525 tblinfo[i].toast_oid = atooid(PQgetvalue(res, i, i_toastoid));
7526 tblinfo[i].minmxid = atooid(PQgetvalue(res, i, i_relminmxid));
7527 tblinfo[i].toast_minmxid = atooid(PQgetvalue(res, i, i_toastminmxid));
7528 tblinfo[i].reloptions = pg_strdup(PQgetvalue(res, i, i_reloptions));
7529 if (PQgetisnull(res, i, i_checkoption))
7530 tblinfo[i].checkoption = NULL;
7531 else
7532 tblinfo[i].checkoption = pg_strdup(PQgetvalue(res, i, i_checkoption));
7534 tblinfo[i].reloftype = atooid(PQgetvalue(res, i, i_reloftype));
7536 if (PQgetisnull(res, i, i_amname))
7537 tblinfo[i].amname = NULL;
7538 else
7539 tblinfo[i].amname = pg_strdup(PQgetvalue(res, i, i_amname));
7540 tblinfo[i].is_identity_sequence = (strcmp(PQgetvalue(res, i, i_is_identity_sequence), "t") == 0);
7541 tblinfo[i].ispartition = (strcmp(PQgetvalue(res, i, i_ispartition), "t") == 0);
7542
7543 /* other fields were zeroed above */
7544
7545 /*
7546 * Decide whether we want to dump this table.
7547 */
7548 if (tblinfo[i].relkind == RELKIND_COMPOSITE_TYPE)
7549 tblinfo[i].dobj.dump = DUMP_COMPONENT_NONE;
7550 else
7551 selectDumpableTable(&tblinfo[i], fout);
7552
7553 /*
7554 * Now, consider the table "interesting" if we need to dump its
7555 * definition, data or its statistics. Later on, we'll skip a lot of
7556 * data collection for uninteresting tables.
7557 *
7558 * Note: the "interesting" flag will also be set by flagInhTables for
7559 * parents of interesting tables, so that we collect necessary
7560 * inheritance info even when the parents are not themselves being
7561 * dumped. This is the main reason why we need an "interesting" flag
7562 * that's separate from the components-to-dump bitmask.
7563 */
7564 tblinfo[i].interesting = (tblinfo[i].dobj.dump &
7568
7569 tblinfo[i].dummy_view = false; /* might get set during sort */
7570 tblinfo[i].postponed_def = false; /* might get set during sort */
7571
7572 /* Tables have data */
7574
7575 /* Mark whether table has an ACL */
7576 if (!PQgetisnull(res, i, i_relacl))
7577 tblinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
7578 tblinfo[i].hascolumnACLs = false; /* may get set later */
7579
7580 /* Add statistics */
7581 if (tblinfo[i].interesting)
7582 {
7583 RelStatsInfo *stats;
7584
7585 stats = getRelationStatistics(fout, &tblinfo[i].dobj,
7586 tblinfo[i].relpages,
7587 PQgetvalue(res, i, i_reltuples),
7588 relallvisible, relallfrozen,
7589 tblinfo[i].relkind, NULL, 0);
7590 if (tblinfo[i].relkind == RELKIND_MATVIEW)
7591 tblinfo[i].stats = stats;
7592 }
7593
7594 /*
7595 * Read-lock target tables to make sure they aren't DROPPED or altered
7596 * in schema before we get around to dumping them.
7597 *
7598 * Note that we don't explicitly lock parents of the target tables; we
7599 * assume our lock on the child is enough to prevent schema
7600 * alterations to parent tables.
7601 *
7602 * NOTE: it'd be kinda nice to lock other relations too, not only
7603 * plain or partitioned tables, but the backend doesn't presently
7604 * allow that.
7605 *
7606 * We only need to lock the table for certain components; see
7607 * pg_dump.h
7608 */
7609 if ((tblinfo[i].dobj.dump & DUMP_COMPONENTS_REQUIRING_LOCK) &&
7610 (tblinfo[i].relkind == RELKIND_RELATION ||
7611 tblinfo[i].relkind == RELKIND_PARTITIONED_TABLE))
7612 {
7613 /*
7614 * Tables are locked in batches. When dumping from a remote
7615 * server this can save a significant amount of time by reducing
7616 * the number of round trips.
7617 */
7618 if (query->len == 0)
7619 appendPQExpBuffer(query, "LOCK TABLE %s",
7620 fmtQualifiedDumpable(&tblinfo[i]));
7621 else
7622 {
7623 appendPQExpBuffer(query, ", %s",
7624 fmtQualifiedDumpable(&tblinfo[i]));
7625
7626 /* Arbitrarily end a batch when query length reaches 100K. */
7627 if (query->len >= 100000)
7628 {
7629 /* Lock another batch of tables. */
7630 appendPQExpBufferStr(query, " IN ACCESS SHARE MODE");
7631 ExecuteSqlStatement(fout, query->data);
7632 resetPQExpBuffer(query);
7633 }
7634 }
7635 }
7636 }
7637
7638 if (query->len != 0)
7639 {
7640 /* Lock the tables in the last batch. */
7641 appendPQExpBufferStr(query, " IN ACCESS SHARE MODE");
7642 ExecuteSqlStatement(fout, query->data);
7643 }
7644
7645 if (dopt->lockWaitTimeout)
7646 {
7647 ExecuteSqlStatement(fout, "SET statement_timeout = 0");
7648 }
7649
7650 PQclear(res);
7651
7652 destroyPQExpBuffer(query);
7653
7654 return tblinfo;
7655}
7656
7657/*
7658 * getOwnedSeqs
7659 * identify owned sequences and mark them as dumpable if owning table is
7660 *
7661 * We used to do this in getTables(), but it's better to do it after the
7662 * index used by findTableByOid() has been set up.
7663 */
7664void
7666{
7667 int i;
7668
7669 /*
7670 * Force sequences that are "owned" by table columns to be dumped whenever
7671 * their owning table is being dumped.
7672 */
7673 for (i = 0; i < numTables; i++)
7674 {
7675 TableInfo *seqinfo = &tblinfo[i];
7676 TableInfo *owning_tab;
7677
7678 if (!OidIsValid(seqinfo->owning_tab))
7679 continue; /* not an owned sequence */
7680
7681 owning_tab = findTableByOid(seqinfo->owning_tab);
7682 if (owning_tab == NULL)
7683 pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
7684 seqinfo->owning_tab, seqinfo->dobj.catId.oid);
7685
7686 /*
7687 * For an identity sequence, dump exactly the same components for the
7688 * sequence as for the owning table. This is important because we
7689 * treat the identity sequence as an integral part of the table. For
7690 * example, there is not any DDL command that allows creation of such
7691 * a sequence independently of the table.
7692 *
7693 * For other owned sequences such as serial sequences, we need to dump
7694 * the components that are being dumped for the table and any
7695 * components that the sequence is explicitly marked with.
7696 *
7697 * We can't simply use the set of components which are being dumped
7698 * for the table as the table might be in an extension (and only the
7699 * non-extension components, eg: ACLs if changed, security labels, and
7700 * policies, are being dumped) while the sequence is not (and
7701 * therefore the definition and other components should also be
7702 * dumped).
7703 *
7704 * If the sequence is part of the extension then it should be properly
7705 * marked by checkExtensionMembership() and this will be a no-op as
7706 * the table will be equivalently marked.
7707 */
7708 if (seqinfo->is_identity_sequence)
7709 seqinfo->dobj.dump = owning_tab->dobj.dump;
7710 else
7711 seqinfo->dobj.dump |= owning_tab->dobj.dump;
7712
7713 /* Make sure that necessary data is available if we're dumping it */
7714 if (seqinfo->dobj.dump != DUMP_COMPONENT_NONE)
7715 {
7716 seqinfo->interesting = true;
7717 owning_tab->interesting = true;
7718 }
7719 }
7720}
7721
7722/*
7723 * getInherits
7724 * read all the inheritance information
7725 * from the system catalogs return them in the InhInfo* structure
7726 *
7727 * numInherits is set to the number of pairs read in
7728 */
7729InhInfo *
7731{
7732 PGresult *res;
7733 int ntups;
7734 int i;
7737
7738 int i_inhrelid;
7739 int i_inhparent;
7740
7741 /* find all the inheritance information */
7742 appendPQExpBufferStr(query, "SELECT inhrelid, inhparent FROM pg_inherits");
7743
7744 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7745
7746 ntups = PQntuples(res);
7747
7748 *numInherits = ntups;
7749
7751
7752 i_inhrelid = PQfnumber(res, "inhrelid");
7753 i_inhparent = PQfnumber(res, "inhparent");
7754
7755 for (i = 0; i < ntups; i++)
7756 {
7757 inhinfo[i].inhrelid = atooid(PQgetvalue(res, i, i_inhrelid));
7758 inhinfo[i].inhparent = atooid(PQgetvalue(res, i, i_inhparent));
7759 }
7760
7761 PQclear(res);
7762
7763 destroyPQExpBuffer(query);
7764
7765 return inhinfo;
7766}
7767
7768/*
7769 * getPartitioningInfo
7770 * get information about partitioning
7771 *
7772 * For the most part, we only collect partitioning info about tables we
7773 * intend to dump. However, this function has to consider all partitioned
7774 * tables in the database, because we need to know about parents of partitions
7775 * we are going to dump even if the parents themselves won't be dumped.
7776 *
7777 * Specifically, what we need to know is whether each partitioned table
7778 * has an "unsafe" partitioning scheme that requires us to force
7779 * load-via-partition-root mode for its children. Currently the only case
7780 * for which we force that is hash partitioning on enum columns, since the
7781 * hash codes depend on enum value OIDs which won't be replicated across
7782 * dump-and-reload. There are other cases in which load-via-partition-root
7783 * might be necessary, but we expect users to cope with them.
7784 */
7785void
7787{
7788 PQExpBuffer query;
7789 PGresult *res;
7790 int ntups;
7791
7792 /* hash partitioning didn't exist before v11 */
7793 if (fout->remoteVersion < 110000)
7794 return;
7795 /* needn't bother if not dumping data */
7796 if (!fout->dopt->dumpData)
7797 return;
7798
7799 query = createPQExpBuffer();
7800
7801 /*
7802 * Unsafe partitioning schemes are exactly those for which hash enum_ops
7803 * appears among the partition opclasses. We needn't check partstrat.
7804 *
7805 * Note that this query may well retrieve info about tables we aren't
7806 * going to dump and hence have no lock on. That's okay since we need not
7807 * invoke any unsafe server-side functions.
7808 */
7810 "SELECT partrelid FROM pg_partitioned_table WHERE\n"
7811 "(SELECT c.oid FROM pg_opclass c JOIN pg_am a "
7812 "ON c.opcmethod = a.oid\n"
7813 "WHERE opcname = 'enum_ops' "
7814 "AND opcnamespace = 'pg_catalog'::regnamespace "
7815 "AND amname = 'hash') = ANY(partclass)");
7816
7817 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7818
7819 ntups = PQntuples(res);
7820
7821 for (int i = 0; i < ntups; i++)
7822 {
7823 Oid tabrelid = atooid(PQgetvalue(res, i, 0));
7825
7827 if (tbinfo == NULL)
7828 pg_fatal("failed sanity check, table OID %u appearing in pg_partitioned_table not found",
7829 tabrelid);
7830 tbinfo->unsafe_partitions = true;
7831 }
7832
7833 PQclear(res);
7834
7835 destroyPQExpBuffer(query);
7836}
7837
7838/*
7839 * getIndexes
7840 * get information about every index on a dumpable table
7841 *
7842 * Note: index data is not returned directly to the caller, but it
7843 * does get entered into the DumpableObject tables.
7844 */
7845void
7847{
7850 PGresult *res;
7851 int ntups;
7852 int curtblindx;
7854 int i_tableoid,
7855 i_oid,
7856 i_indrelid,
7858 i_relpages,
7863 i_indexdef,
7865 i_indnatts,
7866 i_indkey,
7870 i_contype,
7871 i_conname,
7876 i_conoid,
7877 i_condef,
7883
7884 /*
7885 * We want to perform just one query against pg_index. However, we
7886 * mustn't try to select every row of the catalog and then sort it out on
7887 * the client side, because some of the server-side functions we need
7888 * would be unsafe to apply to tables we don't have lock on. Hence, we
7889 * build an array of the OIDs of tables we care about (and now have lock
7890 * on!), and use a WHERE clause to constrain which rows are selected.
7891 */
7893 for (int i = 0; i < numTables; i++)
7894 {
7895 TableInfo *tbinfo = &tblinfo[i];
7896
7897 if (!tbinfo->hasindex)
7898 continue;
7899
7900 /*
7901 * We can ignore indexes of uninteresting tables.
7902 */
7903 if (!tbinfo->interesting)
7904 continue;
7905
7906 /* OK, we need info for this table */
7907 if (tbloids->len > 1) /* do we have more than the '{'? */
7909 appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
7910 }
7912
7914 "SELECT t.tableoid, t.oid, i.indrelid, "
7915 "t.relname AS indexname, "
7916 "t.relpages, t.reltuples, t.relallvisible, ");
7917
7918 if (fout->remoteVersion >= 180000)
7919 appendPQExpBufferStr(query, "t.relallfrozen, ");
7920 else
7921 appendPQExpBufferStr(query, "0 AS relallfrozen, ");
7922
7924 "pg_catalog.pg_get_indexdef(i.indexrelid) AS indexdef, "
7925 "i.indkey, i.indisclustered, "
7926 "c.contype, c.conname, "
7927 "c.condeferrable, c.condeferred, "
7928 "c.tableoid AS contableoid, "
7929 "c.oid AS conoid, "
7930 "pg_catalog.pg_get_constraintdef(c.oid, false) AS condef, "
7931 "CASE WHEN i.indexprs IS NOT NULL THEN "
7932 "(SELECT pg_catalog.array_agg(attname ORDER BY attnum)"
7933 " FROM pg_catalog.pg_attribute "
7934 " WHERE attrelid = i.indexrelid) "
7935 "ELSE NULL END AS indattnames, "
7936 "(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
7937 "t.reloptions AS indreloptions, ");
7938
7939
7940 if (fout->remoteVersion >= 90400)
7942 "i.indisreplident, ");
7943 else
7945 "false AS indisreplident, ");
7946
7947 if (fout->remoteVersion >= 110000)
7949 "inh.inhparent AS parentidx, "
7950 "i.indnkeyatts AS indnkeyatts, "
7951 "i.indnatts AS indnatts, "
7952 "(SELECT pg_catalog.array_agg(attnum ORDER BY attnum) "
7953 " FROM pg_catalog.pg_attribute "
7954 " WHERE attrelid = i.indexrelid AND "
7955 " attstattarget >= 0) AS indstatcols, "
7956 "(SELECT pg_catalog.array_agg(attstattarget ORDER BY attnum) "
7957 " FROM pg_catalog.pg_attribute "
7958 " WHERE attrelid = i.indexrelid AND "
7959 " attstattarget >= 0) AS indstatvals, ");
7960 else
7962 "0 AS parentidx, "
7963 "i.indnatts AS indnkeyatts, "
7964 "i.indnatts AS indnatts, "
7965 "'' AS indstatcols, "
7966 "'' AS indstatvals, ");
7967
7968 if (fout->remoteVersion >= 150000)
7970 "i.indnullsnotdistinct, ");
7971 else
7973 "false AS indnullsnotdistinct, ");
7974
7975 if (fout->remoteVersion >= 180000)
7977 "c.conperiod ");
7978 else
7980 "NULL AS conperiod ");
7981
7982 /*
7983 * The point of the messy-looking outer join is to find a constraint that
7984 * is related by an internal dependency link to the index. If we find one,
7985 * create a CONSTRAINT entry linked to the INDEX entry. We assume an
7986 * index won't have more than one internal dependency.
7987 *
7988 * Note: the check on conrelid is redundant, but useful because that
7989 * column is indexed while conindid is not.
7990 */
7991 if (fout->remoteVersion >= 110000)
7992 {
7993 appendPQExpBuffer(query,
7994 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
7995 "JOIN pg_catalog.pg_index i ON (src.tbloid = i.indrelid) "
7996 "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
7997 "JOIN pg_catalog.pg_class t2 ON (t2.oid = i.indrelid) "
7998 "LEFT JOIN pg_catalog.pg_constraint c "
7999 "ON (i.indrelid = c.conrelid AND "
8000 "i.indexrelid = c.conindid AND "
8001 "c.contype IN ('p','u','x')) "
8002 "LEFT JOIN pg_catalog.pg_inherits inh "
8003 "ON (inh.inhrelid = indexrelid) "
8004 "WHERE (i.indisvalid OR t2.relkind = 'p') "
8005 "AND i.indisready "
8006 "ORDER BY i.indrelid, indexname",
8007 tbloids->data);
8008 }
8009 else
8010 {
8011 /*
8012 * the test on indisready is necessary in 9.2, and harmless in
8013 * earlier/later versions
8014 */
8015 appendPQExpBuffer(query,
8016 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8017 "JOIN pg_catalog.pg_index i ON (src.tbloid = i.indrelid) "
8018 "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
8019 "LEFT JOIN pg_catalog.pg_constraint c "
8020 "ON (i.indrelid = c.conrelid AND "
8021 "i.indexrelid = c.conindid AND "
8022 "c.contype IN ('p','u','x')) "
8023 "WHERE i.indisvalid AND i.indisready "
8024 "ORDER BY i.indrelid, indexname",
8025 tbloids->data);
8026 }
8027
8028 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8029
8030 ntups = PQntuples(res);
8031
8032 i_tableoid = PQfnumber(res, "tableoid");
8033 i_oid = PQfnumber(res, "oid");
8034 i_indrelid = PQfnumber(res, "indrelid");
8035 i_indexname = PQfnumber(res, "indexname");
8036 i_relpages = PQfnumber(res, "relpages");
8037 i_reltuples = PQfnumber(res, "reltuples");
8038 i_relallvisible = PQfnumber(res, "relallvisible");
8039 i_relallfrozen = PQfnumber(res, "relallfrozen");
8040 i_parentidx = PQfnumber(res, "parentidx");
8041 i_indexdef = PQfnumber(res, "indexdef");
8042 i_indnkeyatts = PQfnumber(res, "indnkeyatts");
8043 i_indnatts = PQfnumber(res, "indnatts");
8044 i_indkey = PQfnumber(res, "indkey");
8045 i_indisclustered = PQfnumber(res, "indisclustered");
8046 i_indisreplident = PQfnumber(res, "indisreplident");
8047 i_indnullsnotdistinct = PQfnumber(res, "indnullsnotdistinct");
8048 i_contype = PQfnumber(res, "contype");
8049 i_conname = PQfnumber(res, "conname");
8050 i_condeferrable = PQfnumber(res, "condeferrable");
8051 i_condeferred = PQfnumber(res, "condeferred");
8052 i_conperiod = PQfnumber(res, "conperiod");
8053 i_contableoid = PQfnumber(res, "contableoid");
8054 i_conoid = PQfnumber(res, "conoid");
8055 i_condef = PQfnumber(res, "condef");
8056 i_indattnames = PQfnumber(res, "indattnames");
8057 i_tablespace = PQfnumber(res, "tablespace");
8058 i_indreloptions = PQfnumber(res, "indreloptions");
8059 i_indstatcols = PQfnumber(res, "indstatcols");
8060 i_indstatvals = PQfnumber(res, "indstatvals");
8061
8063
8064 /*
8065 * Outer loop iterates once per table, not once per row. Incrementing of
8066 * j is handled by the inner loop.
8067 */
8068 curtblindx = -1;
8069 for (int j = 0; j < ntups;)
8070 {
8073 char **indAttNames = NULL;
8074 int nindAttNames = 0;
8075 int numinds;
8076
8077 /* Count rows for this table */
8078 for (numinds = 1; numinds < ntups - j; numinds++)
8079 if (atooid(PQgetvalue(res, j + numinds, i_indrelid)) != indrelid)
8080 break;
8081
8082 /*
8083 * Locate the associated TableInfo; we rely on tblinfo[] being in OID
8084 * order.
8085 */
8086 while (++curtblindx < numTables)
8087 {
8088 tbinfo = &tblinfo[curtblindx];
8089 if (tbinfo->dobj.catId.oid == indrelid)
8090 break;
8091 }
8092 if (curtblindx >= numTables)
8093 pg_fatal("unrecognized table OID %u", indrelid);
8094 /* cross-check that we only got requested tables */
8095 if (!tbinfo->hasindex ||
8096 !tbinfo->interesting)
8097 pg_fatal("unexpected index data for table \"%s\"",
8098 tbinfo->dobj.name);
8099
8100 /* Save data for this table */
8101 tbinfo->indexes = indxinfo + j;
8102 tbinfo->numIndexes = numinds;
8103
8104 for (int c = 0; c < numinds; c++, j++)
8105 {
8106 char contype;
8107 char indexkind;
8109 int32 relpages = atoi(PQgetvalue(res, j, i_relpages));
8110 int32 relallvisible = atoi(PQgetvalue(res, j, i_relallvisible));
8111 int32 relallfrozen = atoi(PQgetvalue(res, j, i_relallfrozen));
8112
8113 indxinfo[j].dobj.objType = DO_INDEX;
8114 indxinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
8115 indxinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
8116 AssignDumpId(&indxinfo[j].dobj);
8117 indxinfo[j].dobj.dump = tbinfo->dobj.dump;
8118 indxinfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_indexname));
8119 indxinfo[j].dobj.namespace = tbinfo->dobj.namespace;
8120 indxinfo[j].indextable = tbinfo;
8121 indxinfo[j].indexdef = pg_strdup(PQgetvalue(res, j, i_indexdef));
8122 indxinfo[j].indnkeyattrs = atoi(PQgetvalue(res, j, i_indnkeyatts));
8123 indxinfo[j].indnattrs = atoi(PQgetvalue(res, j, i_indnatts));
8124 indxinfo[j].tablespace = pg_strdup(PQgetvalue(res, j, i_tablespace));
8125 indxinfo[j].indreloptions = pg_strdup(PQgetvalue(res, j, i_indreloptions));
8126 indxinfo[j].indstatcols = pg_strdup(PQgetvalue(res, j, i_indstatcols));
8127 indxinfo[j].indstatvals = pg_strdup(PQgetvalue(res, j, i_indstatvals));
8128 indxinfo[j].indkeys = pg_malloc_array(Oid, indxinfo[j].indnattrs);
8130 indxinfo[j].indkeys, indxinfo[j].indnattrs);
8131 indxinfo[j].indisclustered = (PQgetvalue(res, j, i_indisclustered)[0] == 't');
8132 indxinfo[j].indisreplident = (PQgetvalue(res, j, i_indisreplident)[0] == 't');
8133 indxinfo[j].indnullsnotdistinct = (PQgetvalue(res, j, i_indnullsnotdistinct)[0] == 't');
8134 indxinfo[j].parentidx = atooid(PQgetvalue(res, j, i_parentidx));
8135 indxinfo[j].partattaches = (SimplePtrList)
8136 {
8137 NULL, NULL
8138 };
8139
8140 if (indxinfo[j].parentidx == 0)
8142 else
8144
8145 if (!PQgetisnull(res, j, i_indattnames))
8146 {
8148 &indAttNames, &nindAttNames))
8149 pg_fatal("could not parse %s array", "indattnames");
8150 }
8151
8152 relstats = getRelationStatistics(fout, &indxinfo[j].dobj, relpages,
8153 PQgetvalue(res, j, i_reltuples),
8154 relallvisible, relallfrozen, indexkind,
8155 indAttNames, nindAttNames);
8156
8157 contype = *(PQgetvalue(res, j, i_contype));
8158 if (contype == 'p' || contype == 'u' || contype == 'x')
8159 {
8160 /*
8161 * If we found a constraint matching the index, create an
8162 * entry for it.
8163 */
8165
8167 constrinfo->dobj.objType = DO_CONSTRAINT;
8168 constrinfo->dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid));
8169 constrinfo->dobj.catId.oid = atooid(PQgetvalue(res, j, i_conoid));
8170 AssignDumpId(&constrinfo->dobj);
8171 constrinfo->dobj.dump = tbinfo->dobj.dump;
8172 constrinfo->dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
8173 constrinfo->dobj.namespace = tbinfo->dobj.namespace;
8174 constrinfo->contable = tbinfo;
8175 constrinfo->condomain = NULL;
8176 constrinfo->contype = contype;
8177 if (contype == 'x')
8178 constrinfo->condef = pg_strdup(PQgetvalue(res, j, i_condef));
8179 else
8180 constrinfo->condef = NULL;
8181 constrinfo->confrelid = InvalidOid;
8182 constrinfo->conindex = indxinfo[j].dobj.dumpId;
8183 constrinfo->condeferrable = *(PQgetvalue(res, j, i_condeferrable)) == 't';
8184 constrinfo->condeferred = *(PQgetvalue(res, j, i_condeferred)) == 't';
8185 constrinfo->conperiod = *(PQgetvalue(res, j, i_conperiod)) == 't';
8186 constrinfo->conislocal = true;
8187 constrinfo->separate = true;
8188
8189 indxinfo[j].indexconstraint = constrinfo->dobj.dumpId;
8190 if (relstats != NULL)
8191 addObjectDependency(&relstats->dobj, constrinfo->dobj.dumpId);
8192 }
8193 else
8194 {
8195 /* Plain secondary index */
8196 indxinfo[j].indexconstraint = 0;
8197 }
8198 }
8199 }
8200
8201 PQclear(res);
8202
8203 destroyPQExpBuffer(query);
8205}
8206
8207/*
8208 * getExtendedStatistics
8209 * get information about extended-statistics objects.
8210 *
8211 * Note: extended statistics data is not returned directly to the caller, but
8212 * it does get entered into the DumpableObject tables.
8213 */
8214void
8216{
8217 PQExpBuffer query;
8218 PGresult *res;
8220 int ntups;
8221 int i_tableoid;
8222 int i_oid;
8223 int i_stxname;
8224 int i_stxnamespace;
8225 int i_stxowner;
8226 int i_stxrelid;
8227 int i_stattarget;
8228 int i;
8229
8230 /* Extended statistics were new in v10 */
8231 if (fout->remoteVersion < 100000)
8232 return;
8233
8234 query = createPQExpBuffer();
8235
8236 if (fout->remoteVersion < 130000)
8237 appendPQExpBufferStr(query, "SELECT tableoid, oid, stxname, "
8238 "stxnamespace, stxowner, stxrelid, NULL AS stxstattarget "
8239 "FROM pg_catalog.pg_statistic_ext");
8240 else
8241 appendPQExpBufferStr(query, "SELECT tableoid, oid, stxname, "
8242 "stxnamespace, stxowner, stxrelid, stxstattarget "
8243 "FROM pg_catalog.pg_statistic_ext");
8244
8245 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8246
8247 ntups = PQntuples(res);
8248
8249 i_tableoid = PQfnumber(res, "tableoid");
8250 i_oid = PQfnumber(res, "oid");
8251 i_stxname = PQfnumber(res, "stxname");
8252 i_stxnamespace = PQfnumber(res, "stxnamespace");
8253 i_stxowner = PQfnumber(res, "stxowner");
8254 i_stxrelid = PQfnumber(res, "stxrelid");
8255 i_stattarget = PQfnumber(res, "stxstattarget");
8256
8258
8259 for (i = 0; i < ntups; i++)
8260 {
8261 statsextinfo[i].dobj.objType = DO_STATSEXT;
8262 statsextinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8263 statsextinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8264 AssignDumpId(&statsextinfo[i].dobj);
8265 statsextinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_stxname));
8266 statsextinfo[i].dobj.namespace =
8268 statsextinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_stxowner));
8269 statsextinfo[i].stattable =
8271 if (PQgetisnull(res, i, i_stattarget))
8272 statsextinfo[i].stattarget = -1;
8273 else
8274 statsextinfo[i].stattarget = atoi(PQgetvalue(res, i, i_stattarget));
8275
8276 /* Decide whether we want to dump it */
8278
8279 if (fout->dopt->dumpStatistics)
8280 statsextinfo[i].dobj.components |= DUMP_COMPONENT_STATISTICS;
8281 }
8282
8283 PQclear(res);
8284 destroyPQExpBuffer(query);
8285}
8286
8287/*
8288 * getConstraints
8289 *
8290 * Get info about constraints on dumpable tables.
8291 *
8292 * Currently handles foreign keys only.
8293 * Unique and primary key constraints are handled with indexes,
8294 * while check constraints are processed in getTableAttrs().
8295 */
8296void
8298{
8301 PGresult *res;
8302 int ntups;
8303 int curtblindx;
8306 int i_contableoid,
8307 i_conoid,
8308 i_conrelid,
8309 i_conname,
8311 i_conindid,
8312 i_condef;
8313
8314 /*
8315 * We want to perform just one query against pg_constraint. However, we
8316 * mustn't try to select every row of the catalog and then sort it out on
8317 * the client side, because some of the server-side functions we need
8318 * would be unsafe to apply to tables we don't have lock on. Hence, we
8319 * build an array of the OIDs of tables we care about (and now have lock
8320 * on!), and use a WHERE clause to constrain which rows are selected.
8321 */
8323 for (int i = 0; i < numTables; i++)
8324 {
8325 TableInfo *tinfo = &tblinfo[i];
8326
8327 if (!(tinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
8328 continue;
8329
8330 /* OK, we need info for this table */
8331 if (tbloids->len > 1) /* do we have more than the '{'? */
8333 appendPQExpBuffer(tbloids, "%u", tinfo->dobj.catId.oid);
8334 }
8336
8338 "SELECT c.tableoid, c.oid, "
8339 "conrelid, conname, confrelid, ");
8340 if (fout->remoteVersion >= 110000)
8341 appendPQExpBufferStr(query, "conindid, ");
8342 else
8343 appendPQExpBufferStr(query, "0 AS conindid, ");
8344 appendPQExpBuffer(query,
8345 "pg_catalog.pg_get_constraintdef(c.oid) AS condef\n"
8346 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8347 "JOIN pg_catalog.pg_constraint c ON (src.tbloid = c.conrelid)\n"
8348 "WHERE contype = 'f' ",
8349 tbloids->data);
8350 if (fout->remoteVersion >= 110000)
8352 "AND conparentid = 0 ");
8354 "ORDER BY conrelid, conname");
8355
8356 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8357
8358 ntups = PQntuples(res);
8359
8360 i_contableoid = PQfnumber(res, "tableoid");
8361 i_conoid = PQfnumber(res, "oid");
8362 i_conrelid = PQfnumber(res, "conrelid");
8363 i_conname = PQfnumber(res, "conname");
8364 i_confrelid = PQfnumber(res, "confrelid");
8365 i_conindid = PQfnumber(res, "conindid");
8366 i_condef = PQfnumber(res, "condef");
8367
8369
8370 curtblindx = -1;
8371 for (int j = 0; j < ntups; j++)
8372 {
8373 Oid conrelid = atooid(PQgetvalue(res, j, i_conrelid));
8375
8376 /*
8377 * Locate the associated TableInfo; we rely on tblinfo[] being in OID
8378 * order.
8379 */
8380 if (tbinfo == NULL || tbinfo->dobj.catId.oid != conrelid)
8381 {
8382 while (++curtblindx < numTables)
8383 {
8384 tbinfo = &tblinfo[curtblindx];
8385 if (tbinfo->dobj.catId.oid == conrelid)
8386 break;
8387 }
8388 if (curtblindx >= numTables)
8389 pg_fatal("unrecognized table OID %u", conrelid);
8390 }
8391
8392 constrinfo[j].dobj.objType = DO_FK_CONSTRAINT;
8393 constrinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid));
8394 constrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_conoid));
8395 AssignDumpId(&constrinfo[j].dobj);
8396 constrinfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
8397 constrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
8398 constrinfo[j].contable = tbinfo;
8399 constrinfo[j].condomain = NULL;
8400 constrinfo[j].contype = 'f';
8401 constrinfo[j].condef = pg_strdup(PQgetvalue(res, j, i_condef));
8402 constrinfo[j].confrelid = atooid(PQgetvalue(res, j, i_confrelid));
8403 constrinfo[j].conindex = 0;
8404 constrinfo[j].condeferrable = false;
8405 constrinfo[j].condeferred = false;
8406 constrinfo[j].conislocal = true;
8407 constrinfo[j].separate = true;
8408
8409 /*
8410 * Restoring an FK that points to a partitioned table requires that
8411 * all partition indexes have been attached beforehand. Ensure that
8412 * happens by making the constraint depend on each index partition
8413 * attach object.
8414 */
8415 reftable = findTableByOid(constrinfo[j].confrelid);
8416 if (reftable && reftable->relkind == RELKIND_PARTITIONED_TABLE)
8417 {
8418 Oid indexOid = atooid(PQgetvalue(res, j, i_conindid));
8419
8420 if (indexOid != InvalidOid)
8421 {
8422 for (int k = 0; k < reftable->numIndexes; k++)
8423 {
8425
8426 /* not our index? */
8427 if (reftable->indexes[k].dobj.catId.oid != indexOid)
8428 continue;
8429
8430 refidx = &reftable->indexes[k];
8432 break;
8433 }
8434 }
8435 }
8436 }
8437
8438 PQclear(res);
8439
8440 destroyPQExpBuffer(query);
8442}
8443
8444/*
8445 * addConstrChildIdxDeps
8446 *
8447 * Recursive subroutine for getConstraints
8448 *
8449 * Given an object representing a foreign key constraint and an index on the
8450 * partitioned table it references, mark the constraint object as dependent
8451 * on the DO_INDEX_ATTACH object of each index partition, recursively
8452 * drilling down to their partitions if any. This ensures that the FK is not
8453 * restored until the index is fully marked valid.
8454 */
8455static void
8457{
8458 SimplePtrListCell *cell;
8459
8461
8462 for (cell = refidx->partattaches.head; cell; cell = cell->next)
8463 {
8465
8466 addObjectDependency(dobj, attach->dobj.dumpId);
8467
8468 if (attach->partitionIdx->partattaches.head != NULL)
8469 addConstrChildIdxDeps(dobj, attach->partitionIdx);
8470 }
8471}
8472
8473/*
8474 * getDomainConstraints
8475 *
8476 * Get info about constraints on a domain.
8477 */
8478static void
8480{
8483 PGresult *res;
8484 int i_tableoid,
8485 i_oid,
8486 i_conname,
8487 i_consrc,
8489 i_contype;
8490 int ntups;
8491
8493 {
8494 /*
8495 * Set up query for constraint-specific details. For servers 17 and
8496 * up, domains have constraints of type 'n' as well as 'c', otherwise
8497 * just the latter.
8498 */
8499 appendPQExpBuffer(query,
8500 "PREPARE getDomainConstraints(pg_catalog.oid) AS\n"
8501 "SELECT tableoid, oid, conname, "
8502 "pg_catalog.pg_get_constraintdef(oid) AS consrc, "
8503 "convalidated, contype "
8504 "FROM pg_catalog.pg_constraint "
8505 "WHERE contypid = $1 AND contype IN (%s) "
8506 "ORDER BY conname",
8507 fout->remoteVersion < 170000 ? "'c'" : "'c', 'n'");
8508
8509 ExecuteSqlStatement(fout, query->data);
8510
8512 }
8513
8514 printfPQExpBuffer(query,
8515 "EXECUTE getDomainConstraints('%u')",
8516 tyinfo->dobj.catId.oid);
8517
8518 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8519
8520 ntups = PQntuples(res);
8521
8522 i_tableoid = PQfnumber(res, "tableoid");
8523 i_oid = PQfnumber(res, "oid");
8524 i_conname = PQfnumber(res, "conname");
8525 i_consrc = PQfnumber(res, "consrc");
8526 i_convalidated = PQfnumber(res, "convalidated");
8527 i_contype = PQfnumber(res, "contype");
8528
8530 tyinfo->domChecks = constrinfo;
8531
8532 /* 'i' tracks result rows; 'j' counts CHECK constraints */
8533 for (int i = 0, j = 0; i < ntups; i++)
8534 {
8535 bool validated = PQgetvalue(res, i, i_convalidated)[0] == 't';
8536 char contype = (PQgetvalue(res, i, i_contype))[0];
8537 ConstraintInfo *constraint;
8538
8539 if (contype == CONSTRAINT_CHECK)
8540 {
8541 constraint = &constrinfo[j++];
8542 tyinfo->nDomChecks++;
8543 }
8544 else
8545 {
8546 Assert(contype == CONSTRAINT_NOTNULL);
8547 Assert(tyinfo->notnull == NULL);
8548 /* use last item in array for the not-null constraint */
8549 tyinfo->notnull = &(constrinfo[ntups - 1]);
8550 constraint = tyinfo->notnull;
8551 }
8552
8553 constraint->dobj.objType = DO_CONSTRAINT;
8554 constraint->dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8555 constraint->dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8556 AssignDumpId(&(constraint->dobj));
8557 constraint->dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
8558 constraint->dobj.namespace = tyinfo->dobj.namespace;
8559 constraint->contable = NULL;
8560 constraint->condomain = tyinfo;
8561 constraint->contype = contype;
8562 constraint->condef = pg_strdup(PQgetvalue(res, i, i_consrc));
8563 constraint->confrelid = InvalidOid;
8564 constraint->conindex = 0;
8565 constraint->condeferrable = false;
8566 constraint->condeferred = false;
8567 constraint->conislocal = true;
8568
8569 constraint->separate = !validated;
8570
8571 /*
8572 * Make the domain depend on the constraint, ensuring it won't be
8573 * output till any constraint dependencies are OK. If the constraint
8574 * has not been validated, it's going to be dumped after the domain
8575 * anyway, so this doesn't matter.
8576 */
8577 if (validated)
8578 addObjectDependency(&tyinfo->dobj, constraint->dobj.dumpId);
8579 }
8580
8581 PQclear(res);
8582
8583 destroyPQExpBuffer(query);
8584}
8585
8586/*
8587 * getRules
8588 * get basic information about every rule in the system
8589 */
8590void
8592{
8593 PGresult *res;
8594 int ntups;
8595 int i;
8598 int i_tableoid;
8599 int i_oid;
8600 int i_rulename;
8601 int i_ruletable;
8602 int i_ev_type;
8603 int i_is_instead;
8604 int i_ev_enabled;
8605
8606 appendPQExpBufferStr(query, "SELECT "
8607 "tableoid, oid, rulename, "
8608 "ev_class AS ruletable, ev_type, is_instead, "
8609 "ev_enabled "
8610 "FROM pg_rewrite "
8611 "ORDER BY oid");
8612
8613 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8614
8615 ntups = PQntuples(res);
8616
8618
8619 i_tableoid = PQfnumber(res, "tableoid");
8620 i_oid = PQfnumber(res, "oid");
8621 i_rulename = PQfnumber(res, "rulename");
8622 i_ruletable = PQfnumber(res, "ruletable");
8623 i_ev_type = PQfnumber(res, "ev_type");
8624 i_is_instead = PQfnumber(res, "is_instead");
8625 i_ev_enabled = PQfnumber(res, "ev_enabled");
8626
8627 for (i = 0; i < ntups; i++)
8628 {
8630
8631 ruleinfo[i].dobj.objType = DO_RULE;
8632 ruleinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8633 ruleinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8634 AssignDumpId(&ruleinfo[i].dobj);
8635 ruleinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_rulename));
8637 ruleinfo[i].ruletable = findTableByOid(ruletableoid);
8638 if (ruleinfo[i].ruletable == NULL)
8639 pg_fatal("failed sanity check, parent table with OID %u of pg_rewrite entry with OID %u not found",
8640 ruletableoid, ruleinfo[i].dobj.catId.oid);
8641 ruleinfo[i].dobj.namespace = ruleinfo[i].ruletable->dobj.namespace;
8642 ruleinfo[i].dobj.dump = ruleinfo[i].ruletable->dobj.dump;
8643 ruleinfo[i].ev_type = *(PQgetvalue(res, i, i_ev_type));
8644 ruleinfo[i].is_instead = *(PQgetvalue(res, i, i_is_instead)) == 't';
8645 ruleinfo[i].ev_enabled = *(PQgetvalue(res, i, i_ev_enabled));
8646 if (ruleinfo[i].ruletable)
8647 {
8648 /*
8649 * If the table is a view or materialized view, force its ON
8650 * SELECT rule to be sorted before the view itself --- this
8651 * ensures that any dependencies for the rule affect the table's
8652 * positioning. Other rules are forced to appear after their
8653 * table.
8654 */
8655 if ((ruleinfo[i].ruletable->relkind == RELKIND_VIEW ||
8656 ruleinfo[i].ruletable->relkind == RELKIND_MATVIEW) &&
8657 ruleinfo[i].ev_type == '1' && ruleinfo[i].is_instead)
8658 {
8659 addObjectDependency(&ruleinfo[i].ruletable->dobj,
8660 ruleinfo[i].dobj.dumpId);
8661 /* We'll merge the rule into CREATE VIEW, if possible */
8662 ruleinfo[i].separate = false;
8663 }
8664 else
8665 {
8667 ruleinfo[i].ruletable->dobj.dumpId);
8668 ruleinfo[i].separate = true;
8669 }
8670 }
8671 else
8672 ruleinfo[i].separate = true;
8673 }
8674
8675 PQclear(res);
8676
8677 destroyPQExpBuffer(query);
8678}
8679
8680/*
8681 * getTriggers
8682 * get information about every trigger on a dumpable table
8683 *
8684 * Note: trigger data is not returned directly to the caller, but it
8685 * does get entered into the DumpableObject tables.
8686 */
8687void
8689{
8692 PGresult *res;
8693 int ntups;
8694 int curtblindx;
8696 int i_tableoid,
8697 i_oid,
8698 i_tgrelid,
8699 i_tgname,
8702 i_tgdef;
8703
8704 /*
8705 * We want to perform just one query against pg_trigger. However, we
8706 * mustn't try to select every row of the catalog and then sort it out on
8707 * the client side, because some of the server-side functions we need
8708 * would be unsafe to apply to tables we don't have lock on. Hence, we
8709 * build an array of the OIDs of tables we care about (and now have lock
8710 * on!), and use a WHERE clause to constrain which rows are selected.
8711 */
8713 for (int i = 0; i < numTables; i++)
8714 {
8715 TableInfo *tbinfo = &tblinfo[i];
8716
8717 if (!tbinfo->hastriggers ||
8718 !(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
8719 continue;
8720
8721 /* OK, we need info for this table */
8722 if (tbloids->len > 1) /* do we have more than the '{'? */
8724 appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
8725 }
8727
8728 if (fout->remoteVersion >= 150000)
8729 {
8730 /*
8731 * NB: think not to use pretty=true in pg_get_triggerdef. It could
8732 * result in non-forward-compatible dumps of WHEN clauses due to
8733 * under-parenthesization.
8734 *
8735 * NB: We need to see partition triggers in case the tgenabled flag
8736 * has been changed from the parent.
8737 */
8738 appendPQExpBuffer(query,
8739 "SELECT t.tgrelid, t.tgname, "
8740 "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8741 "t.tgenabled, t.tableoid, t.oid, "
8742 "t.tgparentid <> 0 AS tgispartition\n"
8743 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8744 "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8745 "LEFT JOIN pg_catalog.pg_trigger u ON (u.oid = t.tgparentid) "
8746 "WHERE ((NOT t.tgisinternal AND t.tgparentid = 0) "
8747 "OR t.tgenabled != u.tgenabled) "
8748 "ORDER BY t.tgrelid, t.tgname",
8749 tbloids->data);
8750 }
8751 else if (fout->remoteVersion >= 130000)
8752 {
8753 /*
8754 * NB: think not to use pretty=true in pg_get_triggerdef. It could
8755 * result in non-forward-compatible dumps of WHEN clauses due to
8756 * under-parenthesization.
8757 *
8758 * NB: We need to see tgisinternal triggers in partitions, in case the
8759 * tgenabled flag has been changed from the parent.
8760 */
8761 appendPQExpBuffer(query,
8762 "SELECT t.tgrelid, t.tgname, "
8763 "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8764 "t.tgenabled, t.tableoid, t.oid, t.tgisinternal as tgispartition\n"
8765 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8766 "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8767 "LEFT JOIN pg_catalog.pg_trigger u ON (u.oid = t.tgparentid) "
8768 "WHERE (NOT t.tgisinternal OR t.tgenabled != u.tgenabled) "
8769 "ORDER BY t.tgrelid, t.tgname",
8770 tbloids->data);
8771 }
8772 else if (fout->remoteVersion >= 110000)
8773 {
8774 /*
8775 * NB: We need to see tgisinternal triggers in partitions, in case the
8776 * tgenabled flag has been changed from the parent. No tgparentid in
8777 * version 11-12, so we have to match them via pg_depend.
8778 *
8779 * See above about pretty=true in pg_get_triggerdef.
8780 */
8781 appendPQExpBuffer(query,
8782 "SELECT t.tgrelid, t.tgname, "
8783 "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8784 "t.tgenabled, t.tableoid, t.oid, t.tgisinternal as tgispartition "
8785 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8786 "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8787 "LEFT JOIN pg_catalog.pg_depend AS d ON "
8788 " d.classid = 'pg_catalog.pg_trigger'::pg_catalog.regclass AND "
8789 " d.refclassid = 'pg_catalog.pg_trigger'::pg_catalog.regclass AND "
8790 " d.objid = t.oid "
8791 "LEFT JOIN pg_catalog.pg_trigger AS pt ON pt.oid = refobjid "
8792 "WHERE (NOT t.tgisinternal OR t.tgenabled != pt.tgenabled) "
8793 "ORDER BY t.tgrelid, t.tgname",
8794 tbloids->data);
8795 }
8796 else
8797 {
8798 /* See above about pretty=true in pg_get_triggerdef */
8799 appendPQExpBuffer(query,
8800 "SELECT t.tgrelid, t.tgname, "
8801 "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8802 "t.tgenabled, false as tgispartition, "
8803 "t.tableoid, t.oid "
8804 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8805 "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8806 "WHERE NOT tgisinternal "
8807 "ORDER BY t.tgrelid, t.tgname",
8808 tbloids->data);
8809 }
8810
8811 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8812
8813 ntups = PQntuples(res);
8814
8815 i_tableoid = PQfnumber(res, "tableoid");
8816 i_oid = PQfnumber(res, "oid");
8817 i_tgrelid = PQfnumber(res, "tgrelid");
8818 i_tgname = PQfnumber(res, "tgname");
8819 i_tgenabled = PQfnumber(res, "tgenabled");
8820 i_tgispartition = PQfnumber(res, "tgispartition");
8821 i_tgdef = PQfnumber(res, "tgdef");
8822
8824
8825 /*
8826 * Outer loop iterates once per table, not once per row. Incrementing of
8827 * j is handled by the inner loop.
8828 */
8829 curtblindx = -1;
8830 for (int j = 0; j < ntups;)
8831 {
8834 int numtrigs;
8835
8836 /* Count rows for this table */
8837 for (numtrigs = 1; numtrigs < ntups - j; numtrigs++)
8838 if (atooid(PQgetvalue(res, j + numtrigs, i_tgrelid)) != tgrelid)
8839 break;
8840
8841 /*
8842 * Locate the associated TableInfo; we rely on tblinfo[] being in OID
8843 * order.
8844 */
8845 while (++curtblindx < numTables)
8846 {
8847 tbinfo = &tblinfo[curtblindx];
8848 if (tbinfo->dobj.catId.oid == tgrelid)
8849 break;
8850 }
8851 if (curtblindx >= numTables)
8852 pg_fatal("unrecognized table OID %u", tgrelid);
8853
8854 /* Save data for this table */
8855 tbinfo->triggers = tginfo + j;
8856 tbinfo->numTriggers = numtrigs;
8857
8858 for (int c = 0; c < numtrigs; c++, j++)
8859 {
8860 tginfo[j].dobj.objType = DO_TRIGGER;
8861 tginfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
8862 tginfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
8863 AssignDumpId(&tginfo[j].dobj);
8864 tginfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_tgname));
8865 tginfo[j].dobj.namespace = tbinfo->dobj.namespace;
8866 tginfo[j].tgtable = tbinfo;
8867 tginfo[j].tgenabled = *(PQgetvalue(res, j, i_tgenabled));
8868 tginfo[j].tgispartition = *(PQgetvalue(res, j, i_tgispartition)) == 't';
8869 tginfo[j].tgdef = pg_strdup(PQgetvalue(res, j, i_tgdef));
8870 }
8871 }
8872
8873 PQclear(res);
8874
8875 destroyPQExpBuffer(query);
8877}
8878
8879/*
8880 * getEventTriggers
8881 * get information about event triggers
8882 */
8883void
8885{
8886 int i;
8887 PQExpBuffer query;
8888 PGresult *res;
8890 int i_tableoid,
8891 i_oid,
8892 i_evtname,
8893 i_evtevent,
8894 i_evtowner,
8895 i_evttags,
8896 i_evtfname,
8898 int ntups;
8899
8900 /* Before 9.3, there are no event triggers */
8901 if (fout->remoteVersion < 90300)
8902 return;
8903
8904 query = createPQExpBuffer();
8905
8907 "SELECT e.tableoid, e.oid, evtname, evtenabled, "
8908 "evtevent, evtowner, "
8909 "array_to_string(array("
8910 "select quote_literal(x) "
8911 " from unnest(evttags) as t(x)), ', ') as evttags, "
8912 "e.evtfoid::regproc as evtfname "
8913 "FROM pg_event_trigger e "
8914 "ORDER BY e.oid");
8915
8916 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8917
8918 ntups = PQntuples(res);
8919
8921
8922 i_tableoid = PQfnumber(res, "tableoid");
8923 i_oid = PQfnumber(res, "oid");
8924 i_evtname = PQfnumber(res, "evtname");
8925 i_evtevent = PQfnumber(res, "evtevent");
8926 i_evtowner = PQfnumber(res, "evtowner");
8927 i_evttags = PQfnumber(res, "evttags");
8928 i_evtfname = PQfnumber(res, "evtfname");
8929 i_evtenabled = PQfnumber(res, "evtenabled");
8930
8931 for (i = 0; i < ntups; i++)
8932 {
8933 evtinfo[i].dobj.objType = DO_EVENT_TRIGGER;
8934 evtinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8935 evtinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8936 AssignDumpId(&evtinfo[i].dobj);
8937 evtinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_evtname));
8938 evtinfo[i].evtname = pg_strdup(PQgetvalue(res, i, i_evtname));
8939 evtinfo[i].evtevent = pg_strdup(PQgetvalue(res, i, i_evtevent));
8940 evtinfo[i].evtowner = getRoleName(PQgetvalue(res, i, i_evtowner));
8941 evtinfo[i].evttags = pg_strdup(PQgetvalue(res, i, i_evttags));
8942 evtinfo[i].evtfname = pg_strdup(PQgetvalue(res, i, i_evtfname));
8943 evtinfo[i].evtenabled = *(PQgetvalue(res, i, i_evtenabled));
8944
8945 /* Decide whether we want to dump it */
8947 }
8948
8949 PQclear(res);
8950
8951 destroyPQExpBuffer(query);
8952}
8953
8954/*
8955 * getProcLangs
8956 * get basic information about every procedural language in the system
8957 *
8958 * NB: this must run after getFuncs() because we assume we can do
8959 * findFuncByOid().
8960 */
8961void
8963{
8964 PGresult *res;
8965 int ntups;
8966 int i;
8969 int i_tableoid;
8970 int i_oid;
8971 int i_lanname;
8972 int i_lanpltrusted;
8973 int i_lanplcallfoid;
8974 int i_laninline;
8975 int i_lanvalidator;
8976 int i_lanacl;
8977 int i_acldefault;
8978 int i_lanowner;
8979
8980 appendPQExpBufferStr(query, "SELECT tableoid, oid, "
8981 "lanname, lanpltrusted, lanplcallfoid, "
8982 "laninline, lanvalidator, "
8983 "lanacl, "
8984 "acldefault('l', lanowner) AS acldefault, "
8985 "lanowner "
8986 "FROM pg_language "
8987 "WHERE lanispl "
8988 "ORDER BY oid");
8989
8990 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8991
8992 ntups = PQntuples(res);
8993
8995
8996 i_tableoid = PQfnumber(res, "tableoid");
8997 i_oid = PQfnumber(res, "oid");
8998 i_lanname = PQfnumber(res, "lanname");
8999 i_lanpltrusted = PQfnumber(res, "lanpltrusted");
9000 i_lanplcallfoid = PQfnumber(res, "lanplcallfoid");
9001 i_laninline = PQfnumber(res, "laninline");
9002 i_lanvalidator = PQfnumber(res, "lanvalidator");
9003 i_lanacl = PQfnumber(res, "lanacl");
9004 i_acldefault = PQfnumber(res, "acldefault");
9005 i_lanowner = PQfnumber(res, "lanowner");
9006
9007 for (i = 0; i < ntups; i++)
9008 {
9009 planginfo[i].dobj.objType = DO_PROCLANG;
9010 planginfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
9011 planginfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
9012 AssignDumpId(&planginfo[i].dobj);
9013
9014 planginfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_lanname));
9015 planginfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_lanacl));
9016 planginfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
9017 planginfo[i].dacl.privtype = 0;
9018 planginfo[i].dacl.initprivs = NULL;
9019 planginfo[i].lanpltrusted = *(PQgetvalue(res, i, i_lanpltrusted)) == 't';
9020 planginfo[i].lanplcallfoid = atooid(PQgetvalue(res, i, i_lanplcallfoid));
9021 planginfo[i].laninline = atooid(PQgetvalue(res, i, i_laninline));
9022 planginfo[i].lanvalidator = atooid(PQgetvalue(res, i, i_lanvalidator));
9023 planginfo[i].lanowner = getRoleName(PQgetvalue(res, i, i_lanowner));
9024
9025 /* Decide whether we want to dump it */
9027
9028 /* Mark whether language has an ACL */
9029 if (!PQgetisnull(res, i, i_lanacl))
9030 planginfo[i].dobj.components |= DUMP_COMPONENT_ACL;
9031 }
9032
9033 PQclear(res);
9034
9035 destroyPQExpBuffer(query);
9036}
9037
9038/*
9039 * getCasts
9040 * get basic information about most casts in the system
9041 *
9042 * Skip casts from a range to its multirange, since we'll create those
9043 * automatically.
9044 */
9045void
9047{
9048 PGresult *res;
9049 int ntups;
9050 int i;
9053 int i_tableoid;
9054 int i_oid;
9055 int i_castsource;
9056 int i_casttarget;
9057 int i_castfunc;
9058 int i_castcontext;
9059 int i_castmethod;
9060
9061 if (fout->remoteVersion >= 140000)
9062 {
9063 appendPQExpBufferStr(query, "SELECT tableoid, oid, "
9064 "castsource, casttarget, castfunc, castcontext, "
9065 "castmethod "
9066 "FROM pg_cast c "
9067 "WHERE NOT EXISTS ( "
9068 "SELECT 1 FROM pg_range r "
9069 "WHERE c.castsource = r.rngtypid "
9070 "AND c.casttarget = r.rngmultitypid "
9071 ") "
9072 "ORDER BY 3,4");
9073 }
9074 else
9075 {
9076 appendPQExpBufferStr(query, "SELECT tableoid, oid, "
9077 "castsource, casttarget, castfunc, castcontext, "
9078 "castmethod "
9079 "FROM pg_cast ORDER BY 3,4");
9080 }
9081
9082 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
9083
9084 ntups = PQntuples(res);
9085
9087
9088 i_tableoid = PQfnumber(res, "tableoid");
9089 i_oid = PQfnumber(res, "oid");
9090 i_castsource = PQfnumber(res, "castsource");
9091 i_casttarget = PQfnumber(res, "casttarget");
9092 i_castfunc = PQfnumber(res, "castfunc");
9093 i_castcontext = PQfnumber(res, "castcontext");
9094 i_castmethod = PQfnumber(res, "castmethod");
9095
9096 for (i = 0; i < ntups; i++)
9097 {
9101
9103 castinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
9104 castinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
9105 AssignDumpId(&castinfo[i].dobj);
9106 castinfo[i].castsource = atooid(PQgetvalue(res, i, i_castsource));
9107 castinfo[i].casttarget = atooid(PQgetvalue(res, i, i_casttarget));
9108 castinfo[i].castfunc = atooid(PQgetvalue(res, i, i_castfunc));
9109 castinfo[i].castcontext = *(PQgetvalue(res, i, i_castcontext));
9110 castinfo[i].castmethod = *(PQgetvalue(res, i, i_castmethod));
9111
9112 /*
9113 * Try to name cast as concatenation of typnames. This is only used
9114 * for purposes of sorting. If we fail to find either type, the name
9115 * will be an empty string.
9116 */
9118 sTypeInfo = findTypeByOid(castinfo[i].castsource);
9119 tTypeInfo = findTypeByOid(castinfo[i].casttarget);
9120 if (sTypeInfo && tTypeInfo)
9121 appendPQExpBuffer(&namebuf, "%s %s",
9122 sTypeInfo->dobj.name, tTypeInfo->dobj.name);
9123 castinfo[i].dobj.name = namebuf.data;
9124
9125 /* Decide whether we want to dump it */
9127 }
9128
9129 PQclear(res);
9130
9131 destroyPQExpBuffer(query);
9132}
9133
9134static char *
9136{
9137 PQExpBuffer query;
9138 PGresult *res;
9139 char *lanname;
9140
9141 query = createPQExpBuffer();
9142 appendPQExpBuffer(query, "SELECT lanname FROM pg_language WHERE oid = %u", langid);
9143 res = ExecuteSqlQueryForSingleRow(fout, query->data);
9144 lanname = pg_strdup(fmtId(PQgetvalue(res, 0, 0)));
9145 destroyPQExpBuffer(query);
9146 PQclear(res);
9147
9148 return lanname;
9149}
9150
9151/*
9152 * getTransforms
9153 * get basic information about every transform in the system
9154 */
9155void
9157{
9158 PGresult *res;
9159 int ntups;
9160 int i;
9161 PQExpBuffer query;
9163 int i_tableoid;
9164 int i_oid;
9165 int i_trftype;
9166 int i_trflang;
9167 int i_trffromsql;
9168 int i_trftosql;
9169
9170 /* Transforms didn't exist pre-9.5 */
9171 if (fout->remoteVersion < 90500)
9172 return;
9173
9174 query = createPQExpBuffer();
9175
9176 appendPQExpBufferStr(query, "SELECT tableoid, oid, "
9177 "trftype, trflang, trffromsql::oid, trftosql::oid "
9178 "FROM pg_transform "
9179 "ORDER BY 3,4");
9180
9181 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
9182
9183 ntups = PQntuples(res);
9184
9186
9187 i_tableoid = PQfnumber(res, "tableoid");
9188 i_oid = PQfnumber(res, "oid");
9189 i_trftype = PQfnumber(res, "trftype");
9190 i_trflang = PQfnumber(res, "trflang");
9191 i_trffromsql = PQfnumber(res, "trffromsql");
9192 i_trftosql = PQfnumber(res, "trftosql");
9193
9194 for (i = 0; i < ntups; i++)
9195 {
9198 char *lanname;
9199
9201 transforminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
9202 transforminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
9204 transforminfo[i].trftype = atooid(PQgetvalue(res, i, i_trftype));
9205 transforminfo[i].trflang = atooid(PQgetvalue(res, i, i_trflang));
9206 transforminfo[i].trffromsql = atooid(PQgetvalue(res, i, i_trffromsql));
9207 transforminfo[i].trftosql = atooid(PQgetvalue(res, i, i_trftosql));
9208
9209 /*
9210 * Try to name transform as concatenation of type and language name.
9211 * This is only used for purposes of sorting. If we fail to find
9212 * either, the name will be an empty string.
9213 */
9217 if (typeInfo && lanname)
9218 appendPQExpBuffer(&namebuf, "%s %s",
9219 typeInfo->dobj.name, lanname);
9220 transforminfo[i].dobj.name = namebuf.data;
9221 free(lanname);
9222
9223 /* Decide whether we want to dump it */
9225 }
9226
9227 PQclear(res);
9228
9229 destroyPQExpBuffer(query);
9230}
9231
9232/*
9233 * getTableAttrs -
9234 * for each interesting table, read info about its attributes
9235 * (names, types, default values, CHECK constraints, etc)
9236 *
9237 * modifies tblinfo
9238 */
9239void
9241{
9242 DumpOptions *dopt = fout->dopt;
9247 PGresult *res;
9248 int ntups;
9249 int curtblindx;
9250 int i_attrelid;
9251 int i_attnum;
9252 int i_attname;
9253 int i_atttypname;
9254 int i_attstattarget;
9255 int i_attstorage;
9256 int i_typstorage;
9257 int i_attidentity;
9258 int i_attgenerated;
9259 int i_attisdropped;
9260 int i_attlen;
9261 int i_attalign;
9262 int i_attislocal;
9263 int i_notnull_name;
9268 int i_attoptions;
9269 int i_attcollation;
9270 int i_attcompression;
9271 int i_attfdwoptions;
9272 int i_attmissingval;
9273 int i_atthasdef;
9274
9275 /*
9276 * We want to perform just one query against pg_attribute, and then just
9277 * one against pg_attrdef (for DEFAULTs) and two against pg_constraint
9278 * (for CHECK constraints and for NOT NULL constraints). However, we
9279 * mustn't try to select every row of those catalogs and then sort it out
9280 * on the client side, because some of the server-side functions we need
9281 * would be unsafe to apply to tables we don't have lock on. Hence, we
9282 * build an array of the OIDs of tables we care about (and now have lock
9283 * on!), and use a WHERE clause to constrain which rows are selected.
9284 */
9287 for (int i = 0; i < numTables; i++)
9288 {
9289 TableInfo *tbinfo = &tblinfo[i];
9290
9291 /* Don't bother to collect info for sequences */
9292 if (tbinfo->relkind == RELKIND_SEQUENCE)
9293 continue;
9294
9295 /*
9296 * Don't bother with uninteresting tables, either. For binary
9297 * upgrades, this is bypassed for pg_largeobject_metadata and
9298 * pg_shdepend so that the columns names are collected for the
9299 * corresponding COPY commands. Restoring the data for those catalogs
9300 * is faster than restoring the equivalent set of large object
9301 * commands.
9302 */
9303 if (!tbinfo->interesting &&
9304 !(fout->dopt->binary_upgrade &&
9305 (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
9306 tbinfo->dobj.catId.oid == SharedDependRelationId)))
9307 continue;
9308
9309 /* OK, we need info for this table */
9310 if (tbloids->len > 1) /* do we have more than the '{'? */
9312 appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
9313
9314 if (tbinfo->ncheck > 0)
9315 {
9316 /* Also make a list of the ones with check constraints */
9317 if (checkoids->len > 1) /* do we have more than the '{'? */
9319 appendPQExpBuffer(checkoids, "%u", tbinfo->dobj.catId.oid);
9320 }
9321 }
9324
9325 /*
9326 * Find all the user attributes and their types.
9327 *
9328 * Since we only want to dump COLLATE clauses for attributes whose
9329 * collation is different from their type's default, we use a CASE here to
9330 * suppress uninteresting attcollations cheaply.
9331 */
9333 "SELECT\n"
9334 "a.attrelid,\n"
9335 "a.attnum,\n"
9336 "a.attname,\n"
9337 "a.attstattarget,\n"
9338 "a.attstorage,\n"
9339 "t.typstorage,\n"
9340 "a.atthasdef,\n"
9341 "a.attisdropped,\n"
9342 "a.attlen,\n"
9343 "a.attalign,\n"
9344 "a.attislocal,\n"
9345 "pg_catalog.format_type(t.oid, a.atttypmod) AS atttypname,\n"
9346 "array_to_string(a.attoptions, ', ') AS attoptions,\n"
9347 "CASE WHEN a.attcollation <> t.typcollation "
9348 "THEN a.attcollation ELSE 0 END AS attcollation,\n"
9349 "pg_catalog.array_to_string(ARRAY("
9350 "SELECT pg_catalog.quote_ident(option_name) || "
9351 "' ' || pg_catalog.quote_literal(option_value) "
9352 "FROM pg_catalog.pg_options_to_table(attfdwoptions) "
9353 "ORDER BY option_name"
9354 "), E',\n ') AS attfdwoptions,\n");
9355
9356 /*
9357 * Find out any NOT NULL markings for each column. In 18 and up we read
9358 * pg_constraint to obtain the constraint name, and for valid constraints
9359 * also pg_description to obtain its comment. notnull_noinherit is set
9360 * according to the NO INHERIT property. For versions prior to 18, we
9361 * store an empty string as the name when a constraint is marked as
9362 * attnotnull (this cues dumpTableSchema to print the NOT NULL clause
9363 * without a name); also, such cases are never NO INHERIT.
9364 *
9365 * For invalid constraints, we need to store their OIDs for processing
9366 * elsewhere, so we bring the pg_constraint.oid value when the constraint
9367 * is invalid, and NULL otherwise. Their comments are handled not here
9368 * but by collectComments, because they're their own dumpable object.
9369 *
9370 * We track in notnull_islocal whether the constraint was defined directly
9371 * in this table or via an ancestor, for binary upgrade. flagInhAttrs
9372 * might modify this later.
9373 */
9374 if (fout->remoteVersion >= 180000)
9376 "co.conname AS notnull_name,\n"
9377 "CASE WHEN co.convalidated THEN pt.description"
9378 " ELSE NULL END AS notnull_comment,\n"
9379 "CASE WHEN NOT co.convalidated THEN co.oid "
9380 "ELSE NULL END AS notnull_invalidoid,\n"
9381 "co.connoinherit AS notnull_noinherit,\n"
9382 "co.conislocal AS notnull_islocal,\n");
9383 else
9385 "CASE WHEN a.attnotnull THEN '' ELSE NULL END AS notnull_name,\n"
9386 "NULL AS notnull_comment,\n"
9387 "NULL AS notnull_invalidoid,\n"
9388 "false AS notnull_noinherit,\n"
9389 "CASE WHEN a.attislocal THEN true\n"
9390 " WHEN a.attnotnull AND NOT a.attislocal THEN true\n"
9391 " ELSE false\n"
9392 "END AS notnull_islocal,\n");
9393
9394 if (fout->remoteVersion >= 140000)
9396 "a.attcompression AS attcompression,\n");
9397 else
9399 "'' AS attcompression,\n");
9400
9401 if (fout->remoteVersion >= 100000)
9403 "a.attidentity,\n");
9404 else
9406 "'' AS attidentity,\n");
9407
9408 if (fout->remoteVersion >= 110000)
9410 "CASE WHEN a.atthasmissing AND NOT a.attisdropped "
9411 "THEN a.attmissingval ELSE null END AS attmissingval,\n");
9412 else
9414 "NULL AS attmissingval,\n");
9415
9416 if (fout->remoteVersion >= 120000)
9418 "a.attgenerated\n");
9419 else
9421 "'' AS attgenerated\n");
9422
9423 /* need left join to pg_type to not fail on dropped columns ... */
9425 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
9426 "JOIN pg_catalog.pg_attribute a ON (src.tbloid = a.attrelid) "
9427 "LEFT JOIN pg_catalog.pg_type t "
9428 "ON (a.atttypid = t.oid)\n",
9429 tbloids->data);
9430
9431 /*
9432 * In versions 18 and up, we need pg_constraint for explicit NOT NULL
9433 * entries and pg_description to get their comments.
9434 */
9435 if (fout->remoteVersion >= 180000)
9437 " LEFT JOIN pg_catalog.pg_constraint co ON "
9438 "(a.attrelid = co.conrelid\n"
9439 " AND co.contype = 'n' AND "
9440 "co.conkey = array[a.attnum])\n"
9441 " LEFT JOIN pg_catalog.pg_description pt ON "
9442 "(pt.classoid = co.tableoid AND pt.objoid = co.oid)\n");
9443
9445 "WHERE a.attnum > 0::pg_catalog.int2\n");
9446
9447 /*
9448 * For binary upgrades from <v12, be sure to pick up
9449 * pg_largeobject_metadata's oid column.
9450 */
9451 if (fout->dopt->binary_upgrade && fout->remoteVersion < 120000)
9453 "OR (a.attnum = -2::pg_catalog.int2 AND src.tbloid = "
9455
9457 "ORDER BY a.attrelid, a.attnum");
9458
9460
9461 ntups = PQntuples(res);
9462
9463 i_attrelid = PQfnumber(res, "attrelid");
9464 i_attnum = PQfnumber(res, "attnum");
9465 i_attname = PQfnumber(res, "attname");
9466 i_atttypname = PQfnumber(res, "atttypname");
9467 i_attstattarget = PQfnumber(res, "attstattarget");
9468 i_attstorage = PQfnumber(res, "attstorage");
9469 i_typstorage = PQfnumber(res, "typstorage");
9470 i_attidentity = PQfnumber(res, "attidentity");
9471 i_attgenerated = PQfnumber(res, "attgenerated");
9472 i_attisdropped = PQfnumber(res, "attisdropped");
9473 i_attlen = PQfnumber(res, "attlen");
9474 i_attalign = PQfnumber(res, "attalign");
9475 i_attislocal = PQfnumber(res, "attislocal");
9476 i_notnull_name = PQfnumber(res, "notnull_name");
9477 i_notnull_comment = PQfnumber(res, "notnull_comment");
9478 i_notnull_invalidoid = PQfnumber(res, "notnull_invalidoid");
9479 i_notnull_noinherit = PQfnumber(res, "notnull_noinherit");
9480 i_notnull_islocal = PQfnumber(res, "notnull_islocal");
9481 i_attoptions = PQfnumber(res, "attoptions");
9482 i_attcollation = PQfnumber(res, "attcollation");
9483 i_attcompression = PQfnumber(res, "attcompression");
9484 i_attfdwoptions = PQfnumber(res, "attfdwoptions");
9485 i_attmissingval = PQfnumber(res, "attmissingval");
9486 i_atthasdef = PQfnumber(res, "atthasdef");
9487
9488 /* Within the next loop, we'll accumulate OIDs of tables with defaults */
9491
9492 /*
9493 * Outer loop iterates once per table, not once per row. Incrementing of
9494 * r is handled by the inner loop.
9495 */
9496 curtblindx = -1;
9497 for (int r = 0; r < ntups;)
9498 {
9499 Oid attrelid = atooid(PQgetvalue(res, r, i_attrelid));
9501 int numatts;
9502 bool hasdefaults;
9503
9504 /* Count rows for this table */
9505 for (numatts = 1; numatts < ntups - r; numatts++)
9506 if (atooid(PQgetvalue(res, r + numatts, i_attrelid)) != attrelid)
9507 break;
9508
9509 /*
9510 * Locate the associated TableInfo; we rely on tblinfo[] being in OID
9511 * order.
9512 */
9513 while (++curtblindx < numTables)
9514 {
9515 tbinfo = &tblinfo[curtblindx];
9516 if (tbinfo->dobj.catId.oid == attrelid)
9517 break;
9518 }
9519 if (curtblindx >= numTables)
9520 pg_fatal("unrecognized table OID %u", attrelid);
9521 /* cross-check that we only got requested tables */
9522 if (tbinfo->relkind == RELKIND_SEQUENCE ||
9523 (!tbinfo->interesting &&
9524 !(fout->dopt->binary_upgrade &&
9525 (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
9526 tbinfo->dobj.catId.oid == SharedDependRelationId))))
9527 pg_fatal("unexpected column data for table \"%s\"",
9528 tbinfo->dobj.name);
9529
9530 /* Save data for this table */
9531 tbinfo->numatts = numatts;
9532 tbinfo->attnames = pg_malloc_array(char *, numatts);
9533 tbinfo->atttypnames = pg_malloc_array(char *, numatts);
9534 tbinfo->attstattarget = pg_malloc_array(int, numatts);
9535 tbinfo->attstorage = pg_malloc_array(char, numatts);
9536 tbinfo->typstorage = pg_malloc_array(char, numatts);
9537 tbinfo->attidentity = pg_malloc_array(char, numatts);
9538 tbinfo->attgenerated = pg_malloc_array(char, numatts);
9539 tbinfo->attisdropped = pg_malloc_array(bool, numatts);
9540 tbinfo->attlen = pg_malloc_array(int, numatts);
9541 tbinfo->attalign = pg_malloc_array(char, numatts);
9542 tbinfo->attislocal = pg_malloc_array(bool, numatts);
9543 tbinfo->attoptions = pg_malloc_array(char *, numatts);
9544 tbinfo->attcollation = pg_malloc_array(Oid, numatts);
9545 tbinfo->attcompression = pg_malloc_array(char, numatts);
9546 tbinfo->attfdwoptions = pg_malloc_array(char *, numatts);
9547 tbinfo->attmissingval = pg_malloc_array(char *, numatts);
9548 tbinfo->notnull_constrs = pg_malloc_array(char *, numatts);
9549 tbinfo->notnull_comment = pg_malloc_array(char *, numatts);
9550 tbinfo->notnull_invalid = pg_malloc_array(bool, numatts);
9551 tbinfo->notnull_noinh = pg_malloc_array(bool, numatts);
9552 tbinfo->notnull_islocal = pg_malloc_array(bool, numatts);
9553 tbinfo->attrdefs = pg_malloc_array(AttrDefInfo *, numatts);
9554 hasdefaults = false;
9555
9556 for (int j = 0; j < numatts; j++, r++)
9557 {
9558 if (j + 1 != atoi(PQgetvalue(res, r, i_attnum)) &&
9559 !(fout->dopt->binary_upgrade && fout->remoteVersion < 120000 &&
9560 tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId))
9561 pg_fatal("invalid column numbering in table \"%s\"",
9562 tbinfo->dobj.name);
9563 tbinfo->attnames[j] = pg_strdup(PQgetvalue(res, r, i_attname));
9564 tbinfo->atttypnames[j] = pg_strdup(PQgetvalue(res, r, i_atttypname));
9565 if (PQgetisnull(res, r, i_attstattarget))
9566 tbinfo->attstattarget[j] = -1;
9567 else
9568 tbinfo->attstattarget[j] = atoi(PQgetvalue(res, r, i_attstattarget));
9569 tbinfo->attstorage[j] = *(PQgetvalue(res, r, i_attstorage));
9570 tbinfo->typstorage[j] = *(PQgetvalue(res, r, i_typstorage));
9571 tbinfo->attidentity[j] = *(PQgetvalue(res, r, i_attidentity));
9572 tbinfo->attgenerated[j] = *(PQgetvalue(res, r, i_attgenerated));
9573 tbinfo->needs_override = tbinfo->needs_override || (tbinfo->attidentity[j] == ATTRIBUTE_IDENTITY_ALWAYS);
9574 tbinfo->attisdropped[j] = (PQgetvalue(res, r, i_attisdropped)[0] == 't');
9575 tbinfo->attlen[j] = atoi(PQgetvalue(res, r, i_attlen));
9576 tbinfo->attalign[j] = *(PQgetvalue(res, r, i_attalign));
9577 tbinfo->attislocal[j] = (PQgetvalue(res, r, i_attislocal)[0] == 't');
9578
9579 /* Handle not-null constraint name and flags */
9581 tbinfo, j,
9588
9589 tbinfo->notnull_comment[j] = PQgetisnull(res, r, i_notnull_comment) ?
9591 tbinfo->attoptions[j] = pg_strdup(PQgetvalue(res, r, i_attoptions));
9592 tbinfo->attcollation[j] = atooid(PQgetvalue(res, r, i_attcollation));
9593 tbinfo->attcompression[j] = *(PQgetvalue(res, r, i_attcompression));
9594 tbinfo->attfdwoptions[j] = pg_strdup(PQgetvalue(res, r, i_attfdwoptions));
9595 tbinfo->attmissingval[j] = pg_strdup(PQgetvalue(res, r, i_attmissingval));
9596 tbinfo->attrdefs[j] = NULL; /* fix below */
9597 if (PQgetvalue(res, r, i_atthasdef)[0] == 't')
9598 hasdefaults = true;
9599 }
9600
9601 if (hasdefaults)
9602 {
9603 /* Collect OIDs of interesting tables that have defaults */
9604 if (tbloids->len > 1) /* do we have more than the '{'? */
9606 appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
9607 }
9608 }
9609
9610 /* If invalidnotnulloids has any data, finalize it */
9611 if (invalidnotnulloids != NULL)
9613
9614 PQclear(res);
9615
9616 /*
9617 * Now get info about column defaults. This is skipped for a data-only
9618 * dump, as it is only needed for table schemas.
9619 */
9620 if (dopt->dumpSchema && tbloids->len > 1)
9621 {
9622 AttrDefInfo *attrdefs;
9623 int numDefaults;
9625
9626 pg_log_info("finding table default expressions");
9627
9629
9630 printfPQExpBuffer(q, "SELECT a.tableoid, a.oid, adrelid, adnum, "
9631 "pg_catalog.pg_get_expr(adbin, adrelid) AS adsrc\n"
9632 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
9633 "JOIN pg_catalog.pg_attrdef a ON (src.tbloid = a.adrelid)\n"
9634 "ORDER BY a.adrelid, a.adnum",
9635 tbloids->data);
9636
9638
9639 numDefaults = PQntuples(res);
9641
9642 curtblindx = -1;
9643 for (int j = 0; j < numDefaults; j++)
9644 {
9645 Oid adtableoid = atooid(PQgetvalue(res, j, 0));
9646 Oid adoid = atooid(PQgetvalue(res, j, 1));
9647 Oid adrelid = atooid(PQgetvalue(res, j, 2));
9648 int adnum = atoi(PQgetvalue(res, j, 3));
9649 char *adsrc = PQgetvalue(res, j, 4);
9650
9651 /*
9652 * Locate the associated TableInfo; we rely on tblinfo[] being in
9653 * OID order.
9654 */
9655 if (tbinfo == NULL || tbinfo->dobj.catId.oid != adrelid)
9656 {
9657 while (++curtblindx < numTables)
9658 {
9659 tbinfo = &tblinfo[curtblindx];
9660 if (tbinfo->dobj.catId.oid == adrelid)
9661 break;
9662 }
9663 if (curtblindx >= numTables)
9664 pg_fatal("unrecognized table OID %u", adrelid);
9665 }
9666
9667 if (adnum <= 0 || adnum > tbinfo->numatts)
9668 pg_fatal("invalid adnum value %d for table \"%s\"",
9669 adnum, tbinfo->dobj.name);
9670
9671 /*
9672 * dropped columns shouldn't have defaults, but just in case,
9673 * ignore 'em
9674 */
9675 if (tbinfo->attisdropped[adnum - 1])
9676 continue;
9677
9678 attrdefs[j].dobj.objType = DO_ATTRDEF;
9679 attrdefs[j].dobj.catId.tableoid = adtableoid;
9680 attrdefs[j].dobj.catId.oid = adoid;
9681 AssignDumpId(&attrdefs[j].dobj);
9682 attrdefs[j].adtable = tbinfo;
9683 attrdefs[j].adnum = adnum;
9684 attrdefs[j].adef_expr = pg_strdup(adsrc);
9685
9686 attrdefs[j].dobj.name = pg_strdup(tbinfo->dobj.name);
9687 attrdefs[j].dobj.namespace = tbinfo->dobj.namespace;
9688
9689 attrdefs[j].dobj.dump = tbinfo->dobj.dump;
9690
9691 /*
9692 * Figure out whether the default/generation expression should be
9693 * dumped as part of the main CREATE TABLE (or similar) command or
9694 * as a separate ALTER TABLE (or similar) command. The preference
9695 * is to put it into the CREATE command, but in some cases that's
9696 * not possible.
9697 */
9698 if (tbinfo->attgenerated[adnum - 1])
9699 {
9700 /*
9701 * Column generation expressions cannot be dumped separately,
9702 * because there is no syntax for it. By setting separate to
9703 * false here we prevent the "default" from being processed as
9704 * its own dumpable object. Later, flagInhAttrs() will mark
9705 * it as not to be dumped at all, if possible (that is, if it
9706 * can be inherited from a parent).
9707 */
9708 attrdefs[j].separate = false;
9709 }
9710 else if (tbinfo->relkind == RELKIND_VIEW)
9711 {
9712 /*
9713 * Defaults on a VIEW must always be dumped as separate ALTER
9714 * TABLE commands.
9715 */
9716 attrdefs[j].separate = true;
9717 }
9718 else if (!shouldPrintColumn(dopt, tbinfo, adnum - 1))
9719 {
9720 /* column will be suppressed, print default separately */
9721 attrdefs[j].separate = true;
9722 }
9723 else
9724 {
9725 attrdefs[j].separate = false;
9726 }
9727
9728 if (!attrdefs[j].separate)
9729 {
9730 /*
9731 * Mark the default as needing to appear before the table, so
9732 * that any dependencies it has must be emitted before the
9733 * CREATE TABLE. If this is not possible, we'll change to
9734 * "separate" mode while sorting dependencies.
9735 */
9737 attrdefs[j].dobj.dumpId);
9738 }
9739
9740 tbinfo->attrdefs[adnum - 1] = &attrdefs[j];
9741 }
9742
9743 PQclear(res);
9744 }
9745
9746 /*
9747 * Get info about NOT NULL NOT VALID constraints. This is skipped for a
9748 * data-only dump, as it is only needed for table schemas.
9749 */
9750 if (dopt->dumpSchema && invalidnotnulloids)
9751 {
9753 int numConstrs;
9754 int i_tableoid;
9755 int i_oid;
9756 int i_conrelid;
9757 int i_conname;
9758 int i_consrc;
9759 int i_conislocal;
9760
9761 pg_log_info("finding invalid not-null constraints");
9762
9765 "SELECT c.tableoid, c.oid, conrelid, conname, "
9766 "pg_catalog.pg_get_constraintdef(c.oid) AS consrc, "
9767 "conislocal, convalidated "
9768 "FROM unnest('%s'::pg_catalog.oid[]) AS src(conoid)\n"
9769 "JOIN pg_catalog.pg_constraint c ON (src.conoid = c.oid)\n"
9770 "ORDER BY c.conrelid, c.conname",
9772
9774
9775 numConstrs = PQntuples(res);
9777
9778 i_tableoid = PQfnumber(res, "tableoid");
9779 i_oid = PQfnumber(res, "oid");
9780 i_conrelid = PQfnumber(res, "conrelid");
9781 i_conname = PQfnumber(res, "conname");
9782 i_consrc = PQfnumber(res, "consrc");
9783 i_conislocal = PQfnumber(res, "conislocal");
9784
9785 /* As above, this loop iterates once per table, not once per row */
9786 curtblindx = -1;
9787 for (int j = 0; j < numConstrs;)
9788 {
9789 Oid conrelid = atooid(PQgetvalue(res, j, i_conrelid));
9791 int numcons;
9792
9793 /* Count rows for this table */
9794 for (numcons = 1; numcons < numConstrs - j; numcons++)
9795 if (atooid(PQgetvalue(res, j + numcons, i_conrelid)) != conrelid)
9796 break;
9797
9798 /*
9799 * Locate the associated TableInfo; we rely on tblinfo[] being in
9800 * OID order.
9801 */
9802 while (++curtblindx < numTables)
9803 {
9804 tbinfo = &tblinfo[curtblindx];
9805 if (tbinfo->dobj.catId.oid == conrelid)
9806 break;
9807 }
9808 if (curtblindx >= numTables)
9809 pg_fatal("unrecognized table OID %u", conrelid);
9810
9811 for (int c = 0; c < numcons; c++, j++)
9812 {
9813 constrs[j].dobj.objType = DO_CONSTRAINT;
9814 constrs[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
9815 constrs[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
9816 AssignDumpId(&constrs[j].dobj);
9817 constrs[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
9818 constrs[j].dobj.namespace = tbinfo->dobj.namespace;
9819 constrs[j].contable = tbinfo;
9820 constrs[j].condomain = NULL;
9821 constrs[j].contype = 'n';
9822 constrs[j].condef = pg_strdup(PQgetvalue(res, j, i_consrc));
9823 constrs[j].confrelid = InvalidOid;
9824 constrs[j].conindex = 0;
9825 constrs[j].condeferrable = false;
9826 constrs[j].condeferred = false;
9827 constrs[j].conislocal = (PQgetvalue(res, j, i_conislocal)[0] == 't');
9828
9829 /*
9830 * All invalid not-null constraints must be dumped separately,
9831 * because CREATE TABLE would not create them as invalid, and
9832 * also because they must be created after potentially
9833 * violating data has been loaded.
9834 */
9835 constrs[j].separate = true;
9836
9837 constrs[j].dobj.dump = tbinfo->dobj.dump;
9838 }
9839 }
9840 PQclear(res);
9841 }
9842
9843 /*
9844 * Get info about table CHECK constraints. This is skipped for a
9845 * data-only dump, as it is only needed for table schemas.
9846 */
9847 if (dopt->dumpSchema && checkoids->len > 2)
9848 {
9850 int numConstrs;
9851 int i_tableoid;
9852 int i_oid;
9853 int i_conrelid;
9854 int i_conname;
9855 int i_consrc;
9856 int i_conislocal;
9857 int i_convalidated;
9858
9859 pg_log_info("finding table check constraints");
9860
9863 "SELECT c.tableoid, c.oid, conrelid, conname, "
9864 "pg_catalog.pg_get_constraintdef(c.oid) AS consrc, "
9865 "conislocal, convalidated "
9866 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
9867 "JOIN pg_catalog.pg_constraint c ON (src.tbloid = c.conrelid)\n"
9868 "WHERE contype = 'c' "
9869 "ORDER BY c.conrelid, c.conname",
9870 checkoids->data);
9871
9873
9874 numConstrs = PQntuples(res);
9876
9877 i_tableoid = PQfnumber(res, "tableoid");
9878 i_oid = PQfnumber(res, "oid");
9879 i_conrelid = PQfnumber(res, "conrelid");
9880 i_conname = PQfnumber(res, "conname");
9881 i_consrc = PQfnumber(res, "consrc");
9882 i_conislocal = PQfnumber(res, "conislocal");
9883 i_convalidated = PQfnumber(res, "convalidated");
9884
9885 /* As above, this loop iterates once per table, not once per row */
9886 curtblindx = -1;
9887 for (int j = 0; j < numConstrs;)
9888 {
9889 Oid conrelid = atooid(PQgetvalue(res, j, i_conrelid));
9891 int numcons;
9892
9893 /* Count rows for this table */
9894 for (numcons = 1; numcons < numConstrs - j; numcons++)
9895 if (atooid(PQgetvalue(res, j + numcons, i_conrelid)) != conrelid)
9896 break;
9897
9898 /*
9899 * Locate the associated TableInfo; we rely on tblinfo[] being in
9900 * OID order.
9901 */
9902 while (++curtblindx < numTables)
9903 {
9904 tbinfo = &tblinfo[curtblindx];
9905 if (tbinfo->dobj.catId.oid == conrelid)
9906 break;
9907 }
9908 if (curtblindx >= numTables)
9909 pg_fatal("unrecognized table OID %u", conrelid);
9910
9911 if (numcons != tbinfo->ncheck)
9912 {
9913 pg_log_error(ngettext("expected %d check constraint on table \"%s\" but found %d",
9914 "expected %d check constraints on table \"%s\" but found %d",
9915 tbinfo->ncheck),
9916 tbinfo->ncheck, tbinfo->dobj.name, numcons);
9917 pg_log_error_hint("The system catalogs might be corrupted.");
9918 exit_nicely(1);
9919 }
9920
9921 tbinfo->checkexprs = constrs + j;
9922
9923 for (int c = 0; c < numcons; c++, j++)
9924 {
9925 bool validated = PQgetvalue(res, j, i_convalidated)[0] == 't';
9926
9927 constrs[j].dobj.objType = DO_CONSTRAINT;
9928 constrs[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
9929 constrs[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
9930 AssignDumpId(&constrs[j].dobj);
9931 constrs[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
9932 constrs[j].dobj.namespace = tbinfo->dobj.namespace;
9933 constrs[j].contable = tbinfo;
9934 constrs[j].condomain = NULL;
9935 constrs[j].contype = 'c';
9936 constrs[j].condef = pg_strdup(PQgetvalue(res, j, i_consrc));
9937 constrs[j].confrelid = InvalidOid;
9938 constrs[j].conindex = 0;
9939 constrs[j].condeferrable = false;
9940 constrs[j].condeferred = false;
9941 constrs[j].conislocal = (PQgetvalue(res, j, i_conislocal)[0] == 't');
9942
9943 /*
9944 * An unvalidated constraint needs to be dumped separately, so
9945 * that potentially-violating existing data is loaded before
9946 * the constraint.
9947 */
9948 constrs[j].separate = !validated;
9949
9950 constrs[j].dobj.dump = tbinfo->dobj.dump;
9951
9952 /*
9953 * Mark the constraint as needing to appear before the table
9954 * --- this is so that any other dependencies of the
9955 * constraint will be emitted before we try to create the
9956 * table. If the constraint is to be dumped separately, it
9957 * will be dumped after data is loaded anyway, so don't do it.
9958 * (There's an automatic dependency in the opposite direction
9959 * anyway, so don't need to add one manually here.)
9960 */
9961 if (!constrs[j].separate)
9963 constrs[j].dobj.dumpId);
9964
9965 /*
9966 * We will detect later whether the constraint must be split
9967 * out from the table definition.
9968 */
9969 }
9970 }
9971
9972 PQclear(res);
9973 }
9974
9978}
9979
9980/*
9981 * Based on the getTableAttrs query's row corresponding to one column, set
9982 * the name and flags to handle a not-null constraint for that column in
9983 * the tbinfo struct.
9984 *
9985 * Result row 'r' is for tbinfo's attribute 'j'.
9986 *
9987 * There are four possibilities:
9988 * 1) the column has no not-null constraints. In that case, ->notnull_constrs
9989 * (the constraint name) remains NULL.
9990 * 2) The column has a constraint with no name (this is the case when
9991 * constraints come from pre-18 servers). In this case, ->notnull_constrs
9992 * is set to the empty string; dumpTableSchema will print just "NOT NULL".
9993 * 3) The column has an invalid not-null constraint. This must be treated
9994 * as a separate object (because it must be created after the table data
9995 * is loaded). So we add its OID to invalidnotnulloids for processing
9996 * elsewhere and do nothing further with it here. We distinguish this
9997 * case because the "notnull_invalidoid" column has been set to a non-NULL
9998 * value, which is the constraint OID. Valid constraints have a null OID.
9999 * 4) The column has a constraint with a known name; in that case
10000 * notnull_constrs carries that name and dumpTableSchema will print
10001 * "CONSTRAINT the_name NOT NULL". However, if the name is the default
10002 * (table_column_not_null) and there's no comment on the constraint,
10003 * there's no need to print that name in the dump, so notnull_constrs
10004 * is set to the empty string and it behaves as case 2.
10005 *
10006 * In a child table that inherits from a parent already containing NOT NULL
10007 * constraints and the columns in the child don't have their own NOT NULL
10008 * declarations, we suppress printing constraints in the child: the
10009 * constraints are acquired at the point where the child is attached to the
10010 * parent. This is tracked in ->notnull_islocal; for servers pre-18 this is
10011 * set not here but in flagInhAttrs. That flag is also used when the
10012 * constraint was validated in a child but all its parent have it as NOT
10013 * VALID.
10014 *
10015 * Any of these constraints might have the NO INHERIT bit. If so we set
10016 * ->notnull_noinh and NO INHERIT will be printed by dumpTableSchema.
10017 *
10018 * In case 4 above, the name comparison is a bit of a hack; it actually fails
10019 * to do the right thing in all but the trivial case. However, the downside
10020 * of getting it wrong is simply that the name is printed rather than
10021 * suppressed, so it's not a big deal.
10022 *
10023 * invalidnotnulloids is expected to be given as NULL; if any invalid not-null
10024 * constraints are found, it is initialized and filled with the array of
10025 * OIDs of such constraints, for later processing.
10026 */
10027static void
10029 TableInfo *tbinfo, int j,
10030 int i_notnull_name,
10036{
10037 DumpOptions *dopt = fout->dopt;
10038
10039 /*
10040 * If this not-null constraint is not valid, list its OID in
10041 * invalidnotnulloids and do nothing further. It'll be processed
10042 * elsewhere later.
10043 *
10044 * Because invalid not-null constraints are rare, we don't want to malloc
10045 * invalidnotnulloids until we're sure we're going it need it, which
10046 * happens here.
10047 */
10048 if (!PQgetisnull(res, r, i_notnull_invalidoid))
10049 {
10050 char *constroid = PQgetvalue(res, r, i_notnull_invalidoid);
10051
10052 if (*invalidnotnulloids == NULL)
10053 {
10057 }
10058 else
10060
10061 /*
10062 * Track when a parent constraint is invalid for the cases where a
10063 * child constraint has been validated independenly.
10064 */
10065 tbinfo->notnull_invalid[j] = true;
10066
10067 /* nothing else to do */
10068 tbinfo->notnull_constrs[j] = NULL;
10069 return;
10070 }
10071
10072 /*
10073 * notnull_noinh is straight from the query result. notnull_islocal also,
10074 * though flagInhAttrs may change that one later.
10075 */
10076 tbinfo->notnull_noinh[j] = PQgetvalue(res, r, i_notnull_noinherit)[0] == 't';
10077 tbinfo->notnull_islocal[j] = PQgetvalue(res, r, i_notnull_islocal)[0] == 't';
10078 tbinfo->notnull_invalid[j] = false;
10079
10080 /*
10081 * Determine a constraint name to use. If the column is not marked not-
10082 * null, we set NULL which cues ... to do nothing. An empty string says
10083 * to print an unnamed NOT NULL, and anything else is a constraint name to
10084 * use.
10085 */
10086 if (fout->remoteVersion < 180000)
10087 {
10088 /*
10089 * < 18 doesn't have not-null names, so an unnamed constraint is
10090 * sufficient.
10091 */
10092 if (PQgetisnull(res, r, i_notnull_name))
10093 tbinfo->notnull_constrs[j] = NULL;
10094 else
10095 tbinfo->notnull_constrs[j] = "";
10096 }
10097 else
10098 {
10099 if (PQgetisnull(res, r, i_notnull_name))
10100 tbinfo->notnull_constrs[j] = NULL;
10101 else
10102 {
10103 /*
10104 * In binary upgrade of inheritance child tables, must have a
10105 * constraint name that we can UPDATE later; same if there's a
10106 * comment on the constraint.
10107 */
10108 if ((dopt->binary_upgrade &&
10109 !tbinfo->ispartition &&
10110 !tbinfo->notnull_islocal) ||
10112 {
10113 tbinfo->notnull_constrs[j] =
10115 }
10116 else
10117 {
10118 char *default_name;
10119
10120 /* XXX should match ChooseConstraintName better */
10121 default_name = psprintf("%s_%s_not_null", tbinfo->dobj.name,
10122 tbinfo->attnames[j]);
10123 if (strcmp(default_name,
10124 PQgetvalue(res, r, i_notnull_name)) == 0)
10125 tbinfo->notnull_constrs[j] = "";
10126 else
10127 {
10128 tbinfo->notnull_constrs[j] =
10130 }
10132 }
10133 }
10134 }
10135}
10136
10137/*
10138 * Test whether a column should be printed as part of table's CREATE TABLE.
10139 * Column number is zero-based.
10140 *
10141 * Normally this is always true, but it's false for dropped columns, as well
10142 * as those that were inherited without any local definition. (If we print
10143 * such a column it will mistakenly get pg_attribute.attislocal set to true.)
10144 * For partitions, it's always true, because we want the partitions to be
10145 * created independently and ATTACH PARTITION used afterwards.
10146 *
10147 * In binary_upgrade mode, we must print all columns and fix the attislocal/
10148 * attisdropped state later, so as to keep control of the physical column
10149 * order.
10150 *
10151 * This function exists because there are scattered nonobvious places that
10152 * must be kept in sync with this decision.
10153 */
10154bool
10155shouldPrintColumn(const DumpOptions *dopt, const TableInfo *tbinfo, int colno)
10156{
10157 if (dopt->binary_upgrade)
10158 return true;
10159 if (tbinfo->attisdropped[colno])
10160 return false;
10161 return (tbinfo->attislocal[colno] || tbinfo->ispartition);
10162}
10163
10164
10165/*
10166 * getTSParsers:
10167 * get information about all text search parsers in the system catalogs
10168 */
10169void
10171{
10172 PGresult *res;
10173 int ntups;
10174 int i;
10175 PQExpBuffer query;
10177 int i_tableoid;
10178 int i_oid;
10179 int i_prsname;
10180 int i_prsnamespace;
10181 int i_prsstart;
10182 int i_prstoken;
10183 int i_prsend;
10184 int i_prsheadline;
10185 int i_prslextype;
10186
10187 query = createPQExpBuffer();
10188
10189 /*
10190 * find all text search objects, including builtin ones; we filter out
10191 * system-defined objects at dump-out time.
10192 */
10193
10194 appendPQExpBufferStr(query, "SELECT tableoid, oid, prsname, prsnamespace, "
10195 "prsstart::oid, prstoken::oid, "
10196 "prsend::oid, prsheadline::oid, prslextype::oid "
10197 "FROM pg_ts_parser");
10198
10199 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10200
10201 ntups = PQntuples(res);
10202
10204
10205 i_tableoid = PQfnumber(res, "tableoid");
10206 i_oid = PQfnumber(res, "oid");
10207 i_prsname = PQfnumber(res, "prsname");
10208 i_prsnamespace = PQfnumber(res, "prsnamespace");
10209 i_prsstart = PQfnumber(res, "prsstart");
10210 i_prstoken = PQfnumber(res, "prstoken");
10211 i_prsend = PQfnumber(res, "prsend");
10212 i_prsheadline = PQfnumber(res, "prsheadline");
10213 i_prslextype = PQfnumber(res, "prslextype");
10214
10215 for (i = 0; i < ntups; i++)
10216 {
10217 prsinfo[i].dobj.objType = DO_TSPARSER;
10218 prsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10219 prsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10220 AssignDumpId(&prsinfo[i].dobj);
10221 prsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_prsname));
10222 prsinfo[i].dobj.namespace =
10224 prsinfo[i].prsstart = atooid(PQgetvalue(res, i, i_prsstart));
10225 prsinfo[i].prstoken = atooid(PQgetvalue(res, i, i_prstoken));
10226 prsinfo[i].prsend = atooid(PQgetvalue(res, i, i_prsend));
10227 prsinfo[i].prsheadline = atooid(PQgetvalue(res, i, i_prsheadline));
10228 prsinfo[i].prslextype = atooid(PQgetvalue(res, i, i_prslextype));
10229
10230 /* Decide whether we want to dump it */
10232 }
10233
10234 PQclear(res);
10235
10236 destroyPQExpBuffer(query);
10237}
10238
10239/*
10240 * getTSDictionaries:
10241 * get information about all text search dictionaries in the system catalogs
10242 */
10243void
10245{
10246 PGresult *res;
10247 int ntups;
10248 int i;
10249 PQExpBuffer query;
10251 int i_tableoid;
10252 int i_oid;
10253 int i_dictname;
10254 int i_dictnamespace;
10255 int i_dictowner;
10256 int i_dicttemplate;
10257 int i_dictinitoption;
10258
10259 query = createPQExpBuffer();
10260
10261 appendPQExpBufferStr(query, "SELECT tableoid, oid, dictname, "
10262 "dictnamespace, dictowner, "
10263 "dicttemplate, dictinitoption "
10264 "FROM pg_ts_dict");
10265
10266 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10267
10268 ntups = PQntuples(res);
10269
10271
10272 i_tableoid = PQfnumber(res, "tableoid");
10273 i_oid = PQfnumber(res, "oid");
10274 i_dictname = PQfnumber(res, "dictname");
10275 i_dictnamespace = PQfnumber(res, "dictnamespace");
10276 i_dictowner = PQfnumber(res, "dictowner");
10277 i_dictinitoption = PQfnumber(res, "dictinitoption");
10278 i_dicttemplate = PQfnumber(res, "dicttemplate");
10279
10280 for (i = 0; i < ntups; i++)
10281 {
10282 dictinfo[i].dobj.objType = DO_TSDICT;
10283 dictinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10284 dictinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10285 AssignDumpId(&dictinfo[i].dobj);
10286 dictinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_dictname));
10287 dictinfo[i].dobj.namespace =
10289 dictinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_dictowner));
10290 dictinfo[i].dicttemplate = atooid(PQgetvalue(res, i, i_dicttemplate));
10291 if (PQgetisnull(res, i, i_dictinitoption))
10292 dictinfo[i].dictinitoption = NULL;
10293 else
10294 dictinfo[i].dictinitoption = pg_strdup(PQgetvalue(res, i, i_dictinitoption));
10295
10296 /* Decide whether we want to dump it */
10298 }
10299
10300 PQclear(res);
10301
10302 destroyPQExpBuffer(query);
10303}
10304
10305/*
10306 * getTSTemplates:
10307 * get information about all text search templates in the system catalogs
10308 */
10309void
10311{
10312 PGresult *res;
10313 int ntups;
10314 int i;
10315 PQExpBuffer query;
10317 int i_tableoid;
10318 int i_oid;
10319 int i_tmplname;
10320 int i_tmplnamespace;
10321 int i_tmplinit;
10322 int i_tmpllexize;
10323
10324 query = createPQExpBuffer();
10325
10326 appendPQExpBufferStr(query, "SELECT tableoid, oid, tmplname, "
10327 "tmplnamespace, tmplinit::oid, tmpllexize::oid "
10328 "FROM pg_ts_template");
10329
10330 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10331
10332 ntups = PQntuples(res);
10333
10335
10336 i_tableoid = PQfnumber(res, "tableoid");
10337 i_oid = PQfnumber(res, "oid");
10338 i_tmplname = PQfnumber(res, "tmplname");
10339 i_tmplnamespace = PQfnumber(res, "tmplnamespace");
10340 i_tmplinit = PQfnumber(res, "tmplinit");
10341 i_tmpllexize = PQfnumber(res, "tmpllexize");
10342
10343 for (i = 0; i < ntups; i++)
10344 {
10345 tmplinfo[i].dobj.objType = DO_TSTEMPLATE;
10346 tmplinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10347 tmplinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10348 AssignDumpId(&tmplinfo[i].dobj);
10349 tmplinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_tmplname));
10350 tmplinfo[i].dobj.namespace =
10352 tmplinfo[i].tmplinit = atooid(PQgetvalue(res, i, i_tmplinit));
10353 tmplinfo[i].tmpllexize = atooid(PQgetvalue(res, i, i_tmpllexize));
10354
10355 /* Decide whether we want to dump it */
10357 }
10358
10359 PQclear(res);
10360
10361 destroyPQExpBuffer(query);
10362}
10363
10364/*
10365 * getTSConfigurations:
10366 * get information about all text search configurations
10367 */
10368void
10370{
10371 PGresult *res;
10372 int ntups;
10373 int i;
10374 PQExpBuffer query;
10376 int i_tableoid;
10377 int i_oid;
10378 int i_cfgname;
10379 int i_cfgnamespace;
10380 int i_cfgowner;
10381 int i_cfgparser;
10382
10383 query = createPQExpBuffer();
10384
10385 appendPQExpBufferStr(query, "SELECT tableoid, oid, cfgname, "
10386 "cfgnamespace, cfgowner, cfgparser "
10387 "FROM pg_ts_config");
10388
10389 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10390
10391 ntups = PQntuples(res);
10392
10394
10395 i_tableoid = PQfnumber(res, "tableoid");
10396 i_oid = PQfnumber(res, "oid");
10397 i_cfgname = PQfnumber(res, "cfgname");
10398 i_cfgnamespace = PQfnumber(res, "cfgnamespace");
10399 i_cfgowner = PQfnumber(res, "cfgowner");
10400 i_cfgparser = PQfnumber(res, "cfgparser");
10401
10402 for (i = 0; i < ntups; i++)
10403 {
10404 cfginfo[i].dobj.objType = DO_TSCONFIG;
10405 cfginfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10406 cfginfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10407 AssignDumpId(&cfginfo[i].dobj);
10408 cfginfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_cfgname));
10409 cfginfo[i].dobj.namespace =
10411 cfginfo[i].rolname = getRoleName(PQgetvalue(res, i, i_cfgowner));
10412 cfginfo[i].cfgparser = atooid(PQgetvalue(res, i, i_cfgparser));
10413
10414 /* Decide whether we want to dump it */
10416 }
10417
10418 PQclear(res);
10419
10420 destroyPQExpBuffer(query);
10421}
10422
10423/*
10424 * getForeignDataWrappers:
10425 * get information about all foreign-data wrappers in the system catalogs
10426 */
10427void
10429{
10430 PGresult *res;
10431 int ntups;
10432 int i;
10433 PQExpBuffer query;
10435 int i_tableoid;
10436 int i_oid;
10437 int i_fdwname;
10438 int i_fdwowner;
10439 int i_fdwhandler;
10440 int i_fdwvalidator;
10441 int i_fdwacl;
10442 int i_acldefault;
10443 int i_fdwoptions;
10444
10445 query = createPQExpBuffer();
10446
10447 appendPQExpBufferStr(query, "SELECT tableoid, oid, fdwname, "
10448 "fdwowner, "
10449 "fdwhandler::pg_catalog.regproc, "
10450 "fdwvalidator::pg_catalog.regproc, "
10451 "fdwacl, "
10452 "acldefault('F', fdwowner) AS acldefault, "
10453 "array_to_string(ARRAY("
10454 "SELECT quote_ident(option_name) || ' ' || "
10455 "quote_literal(option_value) "
10456 "FROM pg_options_to_table(fdwoptions) "
10457 "ORDER BY option_name"
10458 "), E',\n ') AS fdwoptions "
10459 "FROM pg_foreign_data_wrapper");
10460
10461 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10462
10463 ntups = PQntuples(res);
10464
10466
10467 i_tableoid = PQfnumber(res, "tableoid");
10468 i_oid = PQfnumber(res, "oid");
10469 i_fdwname = PQfnumber(res, "fdwname");
10470 i_fdwowner = PQfnumber(res, "fdwowner");
10471 i_fdwhandler = PQfnumber(res, "fdwhandler");
10472 i_fdwvalidator = PQfnumber(res, "fdwvalidator");
10473 i_fdwacl = PQfnumber(res, "fdwacl");
10474 i_acldefault = PQfnumber(res, "acldefault");
10475 i_fdwoptions = PQfnumber(res, "fdwoptions");
10476
10477 for (i = 0; i < ntups; i++)
10478 {
10479 fdwinfo[i].dobj.objType = DO_FDW;
10480 fdwinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10481 fdwinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10482 AssignDumpId(&fdwinfo[i].dobj);
10483 fdwinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_fdwname));
10484 fdwinfo[i].dobj.namespace = NULL;
10485 fdwinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_fdwacl));
10486 fdwinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
10487 fdwinfo[i].dacl.privtype = 0;
10488 fdwinfo[i].dacl.initprivs = NULL;
10489 fdwinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_fdwowner));
10490 fdwinfo[i].fdwhandler = pg_strdup(PQgetvalue(res, i, i_fdwhandler));
10491 fdwinfo[i].fdwvalidator = pg_strdup(PQgetvalue(res, i, i_fdwvalidator));
10492 fdwinfo[i].fdwoptions = pg_strdup(PQgetvalue(res, i, i_fdwoptions));
10493
10494 /* Decide whether we want to dump it */
10496
10497 /* Mark whether FDW has an ACL */
10498 if (!PQgetisnull(res, i, i_fdwacl))
10499 fdwinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
10500 }
10501
10502 PQclear(res);
10503
10504 destroyPQExpBuffer(query);
10505}
10506
10507/*
10508 * getForeignServers:
10509 * get information about all foreign servers in the system catalogs
10510 */
10511void
10513{
10514 PGresult *res;
10515 int ntups;
10516 int i;
10517 PQExpBuffer query;
10519 int i_tableoid;
10520 int i_oid;
10521 int i_srvname;
10522 int i_srvowner;
10523 int i_srvfdw;
10524 int i_srvtype;
10525 int i_srvversion;
10526 int i_srvacl;
10527 int i_acldefault;
10528 int i_srvoptions;
10529
10530 query = createPQExpBuffer();
10531
10532 appendPQExpBufferStr(query, "SELECT tableoid, oid, srvname, "
10533 "srvowner, "
10534 "srvfdw, srvtype, srvversion, srvacl, "
10535 "acldefault('S', srvowner) AS acldefault, "
10536 "array_to_string(ARRAY("
10537 "SELECT quote_ident(option_name) || ' ' || "
10538 "quote_literal(option_value) "
10539 "FROM pg_options_to_table(srvoptions) "
10540 "ORDER BY option_name"
10541 "), E',\n ') AS srvoptions "
10542 "FROM pg_foreign_server");
10543
10544 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10545
10546 ntups = PQntuples(res);
10547
10549
10550 i_tableoid = PQfnumber(res, "tableoid");
10551 i_oid = PQfnumber(res, "oid");
10552 i_srvname = PQfnumber(res, "srvname");
10553 i_srvowner = PQfnumber(res, "srvowner");
10554 i_srvfdw = PQfnumber(res, "srvfdw");
10555 i_srvtype = PQfnumber(res, "srvtype");
10556 i_srvversion = PQfnumber(res, "srvversion");
10557 i_srvacl = PQfnumber(res, "srvacl");
10558 i_acldefault = PQfnumber(res, "acldefault");
10559 i_srvoptions = PQfnumber(res, "srvoptions");
10560
10561 for (i = 0; i < ntups; i++)
10562 {
10563 srvinfo[i].dobj.objType = DO_FOREIGN_SERVER;
10564 srvinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10565 srvinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10566 AssignDumpId(&srvinfo[i].dobj);
10567 srvinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_srvname));
10568 srvinfo[i].dobj.namespace = NULL;
10569 srvinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_srvacl));
10570 srvinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
10571 srvinfo[i].dacl.privtype = 0;
10572 srvinfo[i].dacl.initprivs = NULL;
10573 srvinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_srvowner));
10574 srvinfo[i].srvfdw = atooid(PQgetvalue(res, i, i_srvfdw));
10575 srvinfo[i].srvtype = pg_strdup(PQgetvalue(res, i, i_srvtype));
10576 srvinfo[i].srvversion = pg_strdup(PQgetvalue(res, i, i_srvversion));
10577 srvinfo[i].srvoptions = pg_strdup(PQgetvalue(res, i, i_srvoptions));
10578
10579 /* Decide whether we want to dump it */
10581
10582 /* Servers have user mappings */
10583 srvinfo[i].dobj.components |= DUMP_COMPONENT_USERMAP;
10584
10585 /* Mark whether server has an ACL */
10586 if (!PQgetisnull(res, i, i_srvacl))
10587 srvinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
10588 }
10589
10590 PQclear(res);
10591
10592 destroyPQExpBuffer(query);
10593}
10594
10595/*
10596 * getDefaultACLs:
10597 * get information about all default ACL information in the system catalogs
10598 */
10599void
10601{
10602 DumpOptions *dopt = fout->dopt;
10604 PQExpBuffer query;
10605 PGresult *res;
10606 int i_oid;
10607 int i_tableoid;
10608 int i_defaclrole;
10610 int i_defaclobjtype;
10611 int i_defaclacl;
10612 int i_acldefault;
10613 int i,
10614 ntups;
10615
10616 query = createPQExpBuffer();
10617
10618 /*
10619 * Global entries (with defaclnamespace=0) replace the hard-wired default
10620 * ACL for their object type. We should dump them as deltas from the
10621 * default ACL, since that will be used as a starting point for
10622 * interpreting the ALTER DEFAULT PRIVILEGES commands. On the other hand,
10623 * non-global entries can only add privileges not revoke them. We must
10624 * dump those as-is (i.e., as deltas from an empty ACL).
10625 *
10626 * We can use defaclobjtype as the object type for acldefault(), except
10627 * for the case of 'S' (DEFACLOBJ_SEQUENCE) which must be converted to
10628 * 's'.
10629 */
10631 "SELECT oid, tableoid, "
10632 "defaclrole, "
10633 "defaclnamespace, "
10634 "defaclobjtype, "
10635 "defaclacl, "
10636 "CASE WHEN defaclnamespace = 0 THEN "
10637 "acldefault(CASE WHEN defaclobjtype = 'S' "
10638 "THEN 's'::\"char\" ELSE defaclobjtype END, "
10639 "defaclrole) ELSE '{}' END AS acldefault "
10640 "FROM pg_default_acl");
10641
10642 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10643
10644 ntups = PQntuples(res);
10645
10647
10648 i_oid = PQfnumber(res, "oid");
10649 i_tableoid = PQfnumber(res, "tableoid");
10650 i_defaclrole = PQfnumber(res, "defaclrole");
10651 i_defaclnamespace = PQfnumber(res, "defaclnamespace");
10652 i_defaclobjtype = PQfnumber(res, "defaclobjtype");
10653 i_defaclacl = PQfnumber(res, "defaclacl");
10654 i_acldefault = PQfnumber(res, "acldefault");
10655
10656 for (i = 0; i < ntups; i++)
10657 {
10659
10660 daclinfo[i].dobj.objType = DO_DEFAULT_ACL;
10661 daclinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10662 daclinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10663 AssignDumpId(&daclinfo[i].dobj);
10664 /* cheesy ... is it worth coming up with a better object name? */
10665 daclinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_defaclobjtype));
10666
10667 if (nspid != InvalidOid)
10668 daclinfo[i].dobj.namespace = findNamespace(nspid);
10669 else
10670 daclinfo[i].dobj.namespace = NULL;
10671
10672 daclinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_defaclacl));
10673 daclinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
10674 daclinfo[i].dacl.privtype = 0;
10675 daclinfo[i].dacl.initprivs = NULL;
10676 daclinfo[i].defaclrole = getRoleName(PQgetvalue(res, i, i_defaclrole));
10677 daclinfo[i].defaclobjtype = *(PQgetvalue(res, i, i_defaclobjtype));
10678
10679 /* Default ACLs are ACLs, of course */
10680 daclinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
10681
10682 /* Decide whether we want to dump it */
10684 }
10685
10686 PQclear(res);
10687
10688 destroyPQExpBuffer(query);
10689}
10690
10691/*
10692 * getRoleName -- look up the name of a role, given its OID
10693 *
10694 * In current usage, we don't expect failures, so error out for a bad OID.
10695 */
10696static const char *
10698{
10699 Oid roleoid = atooid(roleoid_str);
10700
10701 /*
10702 * Do binary search to find the appropriate item.
10703 */
10704 if (nrolenames > 0)
10705 {
10706 RoleNameItem *low = &rolenames[0];
10707 RoleNameItem *high = &rolenames[nrolenames - 1];
10708
10709 while (low <= high)
10710 {
10711 RoleNameItem *middle = low + (high - low) / 2;
10712
10713 if (roleoid < middle->roleoid)
10714 high = middle - 1;
10715 else if (roleoid > middle->roleoid)
10716 low = middle + 1;
10717 else
10718 return middle->rolename; /* found a match */
10719 }
10720 }
10721
10722 pg_fatal("role with OID %u does not exist", roleoid);
10723 return NULL; /* keep compiler quiet */
10724}
10725
10726/*
10727 * collectRoleNames --
10728 *
10729 * Construct a table of all known roles.
10730 * The table is sorted by OID for speed in lookup.
10731 */
10732static void
10734{
10735 PGresult *res;
10736 const char *query;
10737 int i;
10738
10739 query = "SELECT oid, rolname FROM pg_catalog.pg_roles ORDER BY 1";
10740
10741 res = ExecuteSqlQuery(fout, query, PGRES_TUPLES_OK);
10742
10743 nrolenames = PQntuples(res);
10744
10746
10747 for (i = 0; i < nrolenames; i++)
10748 {
10749 rolenames[i].roleoid = atooid(PQgetvalue(res, i, 0));
10751 }
10752
10753 PQclear(res);
10754}
10755
10756/*
10757 * getAdditionalACLs
10758 *
10759 * We have now created all the DumpableObjects, and collected the ACL data
10760 * that appears in the directly-associated catalog entries. However, there's
10761 * more ACL-related info to collect. If any of a table's columns have ACLs,
10762 * we must set the TableInfo's DUMP_COMPONENT_ACL components flag, as well as
10763 * its hascolumnACLs flag (we won't store the ACLs themselves here, though).
10764 * Also, in versions having the pg_init_privs catalog, read that and load the
10765 * information into the relevant DumpableObjects.
10766 */
10767static void
10769{
10771 PGresult *res;
10772 int ntups,
10773 i;
10774
10775 /* Check for per-column ACLs */
10777 "SELECT DISTINCT attrelid FROM pg_attribute "
10778 "WHERE attacl IS NOT NULL");
10779
10780 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10781
10782 ntups = PQntuples(res);
10783 for (i = 0; i < ntups; i++)
10784 {
10785 Oid relid = atooid(PQgetvalue(res, i, 0));
10786 TableInfo *tblinfo;
10787
10788 tblinfo = findTableByOid(relid);
10789 /* OK to ignore tables we haven't got a DumpableObject for */
10790 if (tblinfo)
10791 {
10793 tblinfo->hascolumnACLs = true;
10794 }
10795 }
10796 PQclear(res);
10797
10798 /* Fetch initial-privileges data */
10799 if (fout->remoteVersion >= 90600)
10800 {
10801 printfPQExpBuffer(query,
10802 "SELECT objoid, classoid, objsubid, privtype, initprivs "
10803 "FROM pg_init_privs");
10804
10805 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10806
10807 ntups = PQntuples(res);
10808 for (i = 0; i < ntups; i++)
10809 {
10810 Oid objoid = atooid(PQgetvalue(res, i, 0));
10811 Oid classoid = atooid(PQgetvalue(res, i, 1));
10812 int objsubid = atoi(PQgetvalue(res, i, 2));
10813 char privtype = *(PQgetvalue(res, i, 3));
10814 char *initprivs = PQgetvalue(res, i, 4);
10815 CatalogId objId;
10816 DumpableObject *dobj;
10817
10818 objId.tableoid = classoid;
10819 objId.oid = objoid;
10820 dobj = findObjectByCatalogId(objId);
10821 /* OK to ignore entries we haven't got a DumpableObject for */
10822 if (dobj)
10823 {
10824 /* Cope with sub-object initprivs */
10825 if (objsubid != 0)
10826 {
10827 if (dobj->objType == DO_TABLE)
10828 {
10829 /* For a column initprivs, set the table's ACL flags */
10831 ((TableInfo *) dobj)->hascolumnACLs = true;
10832 }
10833 else
10834 pg_log_warning("unsupported pg_init_privs entry: %u %u %d",
10835 classoid, objoid, objsubid);
10836 continue;
10837 }
10838
10839 /*
10840 * We ignore any pg_init_privs.initprivs entry for the public
10841 * schema, as explained in getNamespaces().
10842 */
10843 if (dobj->objType == DO_NAMESPACE &&
10844 strcmp(dobj->name, "public") == 0)
10845 continue;
10846
10847 /* Else it had better be of a type we think has ACLs */
10848 if (dobj->objType == DO_NAMESPACE ||
10849 dobj->objType == DO_TYPE ||
10850 dobj->objType == DO_FUNC ||
10851 dobj->objType == DO_AGG ||
10852 dobj->objType == DO_TABLE ||
10853 dobj->objType == DO_PROCLANG ||
10854 dobj->objType == DO_FDW ||
10855 dobj->objType == DO_FOREIGN_SERVER)
10856 {
10858
10859 daobj->dacl.privtype = privtype;
10860 daobj->dacl.initprivs = pstrdup(initprivs);
10861 }
10862 else
10863 pg_log_warning("unsupported pg_init_privs entry: %u %u %d",
10864 classoid, objoid, objsubid);
10865 }
10866 }
10867 PQclear(res);
10868 }
10869
10870 destroyPQExpBuffer(query);
10871}
10872
10873/*
10874 * dumpCommentExtended --
10875 *
10876 * This routine is used to dump any comments associated with the
10877 * object handed to this routine. The routine takes the object type
10878 * and object name (ready to print, except for schema decoration), plus
10879 * the namespace and owner of the object (for labeling the ArchiveEntry),
10880 * plus catalog ID and subid which are the lookup key for pg_description,
10881 * plus the dump ID for the object (for setting a dependency).
10882 * If a matching pg_description entry is found, it is dumped.
10883 *
10884 * Note: in some cases, such as comments for triggers and rules, the "type"
10885 * string really looks like, e.g., "TRIGGER name ON". This is a bit of a hack
10886 * but it doesn't seem worth complicating the API for all callers to make
10887 * it cleaner.
10888 *
10889 * Note: although this routine takes a dumpId for dependency purposes,
10890 * that purpose is just to mark the dependency in the emitted dump file
10891 * for possible future use by pg_restore. We do NOT use it for determining
10892 * ordering of the comment in the dump file, because this routine is called
10893 * after dependency sorting occurs. This routine should be called just after
10894 * calling ArchiveEntry() for the specified object.
10895 */
10896static void
10898 const char *name, const char *namespace,
10899 const char *owner, CatalogId catalogId,
10900 int subid, DumpId dumpId,
10901 const char *initdb_comment)
10902{
10903 DumpOptions *dopt = fout->dopt;
10905 int ncomments;
10906
10907 /* do nothing, if --no-comments is supplied */
10908 if (dopt->no_comments)
10909 return;
10910
10911 /* Comments are schema not data ... except LO comments are data */
10912 if (strcmp(type, "LARGE OBJECT") != 0)
10913 {
10914 if (!dopt->dumpSchema)
10915 return;
10916 }
10917 else
10918 {
10919 /* We do dump LO comments in binary-upgrade mode */
10920 if (!dopt->dumpData && !dopt->binary_upgrade)
10921 return;
10922 }
10923
10924 /* Search for comments associated with catalogId, using table */
10925 ncomments = findComments(catalogId.tableoid, catalogId.oid,
10926 &comments);
10927
10928 /* Is there one matching the subid? */
10929 while (ncomments > 0)
10930 {
10931 if (comments->objsubid == subid)
10932 break;
10933 comments++;
10934 ncomments--;
10935 }
10936
10937 if (initdb_comment != NULL)
10938 {
10939 static CommentItem empty_comment = {.descr = ""};
10940
10941 /*
10942 * initdb creates this object with a comment. Skip dumping the
10943 * initdb-provided comment, which would complicate matters for
10944 * non-superuser use of pg_dump. When the DBA has removed initdb's
10945 * comment, replicate that.
10946 */
10947 if (ncomments == 0)
10948 {
10950 ncomments = 1;
10951 }
10952 else if (strcmp(comments->descr, initdb_comment) == 0)
10953 ncomments = 0;
10954 }
10955
10956 /* If a comment exists, build COMMENT ON statement */
10957 if (ncomments > 0)
10958 {
10961
10962 appendPQExpBuffer(query, "COMMENT ON %s ", type);
10963 if (namespace && *namespace)
10964 appendPQExpBuffer(query, "%s.", fmtId(namespace));
10965 appendPQExpBuffer(query, "%s IS ", name);
10967 appendPQExpBufferStr(query, ";\n");
10968
10969 appendPQExpBuffer(tag, "%s %s", type, name);
10970
10971 /*
10972 * We mark comments as SECTION_NONE because they really belong in the
10973 * same section as their parent, whether that is pre-data or
10974 * post-data.
10975 */
10977 ARCHIVE_OPTS(.tag = tag->data,
10978 .namespace = namespace,
10979 .owner = owner,
10980 .description = "COMMENT",
10981 .section = SECTION_NONE,
10982 .createStmt = query->data,
10983 .deps = &dumpId,
10984 .nDeps = 1));
10985
10986 destroyPQExpBuffer(query);
10987 destroyPQExpBuffer(tag);
10988 }
10989}
10990
10991/*
10992 * dumpComment --
10993 *
10994 * Typical simplification of the above function.
10995 */
10996static inline void
10998 const char *name, const char *namespace,
10999 const char *owner, CatalogId catalogId,
11000 int subid, DumpId dumpId)
11001{
11002 dumpCommentExtended(fout, type, name, namespace, owner,
11003 catalogId, subid, dumpId, NULL);
11004}
11005
11006/*
11007 * appendNamedArgument --
11008 *
11009 * Convenience routine for constructing parameters of the form:
11010 * 'paraname', 'value'::type
11011 */
11012static void
11013appendNamedArgument(PQExpBuffer out, Archive *fout, const char *argname,
11014 const char *argtype, const char *argval)
11015{
11016 appendPQExpBufferStr(out, ",\n\t");
11017
11018 appendStringLiteralAH(out, argname, fout);
11019 appendPQExpBufferStr(out, ", ");
11020
11022 appendPQExpBuffer(out, "::%s", argtype);
11023}
11024
11025/*
11026 * fetchAttributeStats --
11027 *
11028 * Fetch next batch of attribute statistics for dumpRelationStats_dumper().
11029 */
11030static PGresult *
11032{
11036 int count = 0;
11037 PGresult *res = NULL;
11038 static TocEntry *te;
11039 static bool restarted;
11041
11042 /*
11043 * Our query for retrieving statistics for multiple relations uses WITH
11044 * ORDINALITY and multi-argument UNNEST(), both of which were introduced
11045 * in v9.4. For older versions, we resort to gathering statistics for a
11046 * single relation at a time.
11047 */
11048 if (fout->remoteVersion < 90400)
11049 max_rels = 1;
11050
11051 /* If we're just starting, set our TOC pointer. */
11052 if (!te)
11053 te = AH->toc->next;
11054
11055 /*
11056 * We can't easily avoid a second TOC scan for the tar format because it
11057 * writes restore.sql separately, which means we must execute the queries
11058 * twice. This feels risky, but there is no known reason it should
11059 * generate different output than the first pass. Even if it does, the
11060 * worst-case scenario is that restore.sql might have different statistics
11061 * data than the archive.
11062 */
11063 if (!restarted && te == AH->toc && AH->format == archTar)
11064 {
11065 te = AH->toc->next;
11066 restarted = true;
11067 }
11068
11071
11072 /*
11073 * Scan the TOC for the next set of relevant stats entries. We assume
11074 * that statistics are dumped in the order they are listed in the TOC.
11075 * This is perhaps not the sturdiest assumption, so we verify it matches
11076 * reality in dumpRelationStats_dumper().
11077 */
11078 for (; te != AH->toc && count < max_rels; te = te->next)
11079 {
11080 if ((te->reqs & REQ_STATS) != 0 &&
11081 strcmp(te->desc, "STATISTICS DATA") == 0)
11082 {
11083 appendPGArray(nspnames, te->namespace);
11085 count++;
11086 }
11087 }
11088
11091
11092 /* Execute the query for the next batch of relations. */
11093 if (count > 0)
11094 {
11096
11097 appendPQExpBufferStr(query, "EXECUTE getAttributeStats(");
11099 appendPQExpBufferStr(query, "::pg_catalog.name[],");
11101 appendPQExpBufferStr(query, "::pg_catalog.name[])");
11102 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
11103 destroyPQExpBuffer(query);
11104 }
11105
11108 return res;
11109}
11110
11111/*
11112 * dumpRelationStats_dumper --
11113 *
11114 * Generate command to import stats into the relation on the new database.
11115 * This routine is called by the Archiver when it wants the statistics to be
11116 * dumped.
11117 */
11118static char *
11120{
11121 const RelStatsInfo *rsinfo = userArg;
11122 static PGresult *res;
11123 static int rownum;
11124 PQExpBuffer query;
11126 PQExpBuffer out = &out_data;
11127 int i_schemaname;
11128 int i_tablename;
11129 int i_attname;
11130 int i_inherited;
11131 int i_null_frac;
11132 int i_avg_width;
11133 int i_n_distinct;
11137 int i_correlation;
11144 static TocEntry *expected_te;
11145
11146 /*
11147 * fetchAttributeStats() assumes that the statistics are dumped in the
11148 * order they are listed in the TOC. We verify that here for safety.
11149 */
11150 if (!expected_te)
11151 expected_te = ((ArchiveHandle *) fout)->toc;
11152
11154 while ((expected_te->reqs & REQ_STATS) == 0 ||
11155 strcmp(expected_te->desc, "STATISTICS DATA") != 0)
11157
11158 if (te != expected_te)
11159 pg_fatal("statistics dumped out of order (current: %d %s %s, expected: %d %s %s)",
11160 te->dumpId, te->desc, te->tag,
11161 expected_te->dumpId, expected_te->desc, expected_te->tag);
11162
11163 query = createPQExpBuffer();
11165 {
11167 "PREPARE getAttributeStats(pg_catalog.name[], pg_catalog.name[]) AS\n"
11168 "SELECT s.schemaname, s.tablename, s.attname, s.inherited, "
11169 "s.null_frac, s.avg_width, s.n_distinct, "
11170 "s.most_common_vals, s.most_common_freqs, "
11171 "s.histogram_bounds, s.correlation, "
11172 "s.most_common_elems, s.most_common_elem_freqs, "
11173 "s.elem_count_histogram, ");
11174
11175 if (fout->remoteVersion >= 170000)
11177 "s.range_length_histogram, "
11178 "s.range_empty_frac, "
11179 "s.range_bounds_histogram ");
11180 else
11182 "NULL AS range_length_histogram,"
11183 "NULL AS range_empty_frac,"
11184 "NULL AS range_bounds_histogram ");
11185
11186 /*
11187 * The results must be in the order of the relations supplied in the
11188 * parameters to ensure we remain in sync as we walk through the TOC.
11189 * The redundant filter clause on s.tablename = ANY(...) seems
11190 * sufficient to convince the planner to use
11191 * pg_class_relname_nsp_index, which avoids a full scan of pg_stats.
11192 * This may not work for all versions.
11193 *
11194 * Our query for retrieving statistics for multiple relations uses
11195 * WITH ORDINALITY and multi-argument UNNEST(), both of which were
11196 * introduced in v9.4. For older versions, we resort to gathering
11197 * statistics for a single relation at a time.
11198 */
11199 if (fout->remoteVersion >= 90400)
11201 "FROM pg_catalog.pg_stats s "
11202 "JOIN unnest($1, $2) WITH ORDINALITY AS u (schemaname, tablename, ord) "
11203 "ON s.schemaname = u.schemaname "
11204 "AND s.tablename = u.tablename "
11205 "WHERE s.tablename = ANY($2) "
11206 "ORDER BY u.ord, s.attname, s.inherited");
11207 else
11209 "FROM pg_catalog.pg_stats s "
11210 "WHERE s.schemaname = $1[1] "
11211 "AND s.tablename = $2[1] "
11212 "ORDER BY s.attname, s.inherited");
11213
11214 ExecuteSqlStatement(fout, query->data);
11215
11217 resetPQExpBuffer(query);
11218 }
11219
11220 initPQExpBuffer(out);
11221
11222 /* restore relation stats */
11223 appendPQExpBufferStr(out, "SELECT * FROM pg_catalog.pg_restore_relation_stats(\n");
11224 appendPQExpBuffer(out, "\t'version', '%d'::integer,\n",
11226 appendPQExpBufferStr(out, "\t'schemaname', ");
11227 appendStringLiteralAH(out, rsinfo->dobj.namespace->dobj.name, fout);
11228 appendPQExpBufferStr(out, ",\n");
11229 appendPQExpBufferStr(out, "\t'relname', ");
11230 appendStringLiteralAH(out, rsinfo->dobj.name, fout);
11231 appendPQExpBufferStr(out, ",\n");
11232 appendPQExpBuffer(out, "\t'relpages', '%d'::integer,\n", rsinfo->relpages);
11233
11234 /*
11235 * Before v14, a reltuples value of 0 was ambiguous: it could either mean
11236 * the relation is empty, or it could mean that it hadn't yet been
11237 * vacuumed or analyzed. (Newer versions use -1 for the latter case.)
11238 * This ambiguity allegedly can cause the planner to choose inefficient
11239 * plans after restoring to v18 or newer. To deal with this, let's just
11240 * set reltuples to -1 in that case.
11241 */
11242 if (fout->remoteVersion < 140000 && strcmp("0", rsinfo->reltuples) == 0)
11243 appendPQExpBufferStr(out, "\t'reltuples', '-1'::real,\n");
11244 else
11245 appendPQExpBuffer(out, "\t'reltuples', '%s'::real,\n", rsinfo->reltuples);
11246
11247 appendPQExpBuffer(out, "\t'relallvisible', '%d'::integer",
11248 rsinfo->relallvisible);
11249
11250 if (fout->remoteVersion >= 180000)
11251 appendPQExpBuffer(out, ",\n\t'relallfrozen', '%d'::integer", rsinfo->relallfrozen);
11252
11253 appendPQExpBufferStr(out, "\n);\n");
11254
11255 /* Fetch the next batch of attribute statistics if needed. */
11256 if (rownum >= PQntuples(res))
11257 {
11258 PQclear(res);
11260 rownum = 0;
11261 }
11262
11263 i_schemaname = PQfnumber(res, "schemaname");
11264 i_tablename = PQfnumber(res, "tablename");
11265 i_attname = PQfnumber(res, "attname");
11266 i_inherited = PQfnumber(res, "inherited");
11267 i_null_frac = PQfnumber(res, "null_frac");
11268 i_avg_width = PQfnumber(res, "avg_width");
11269 i_n_distinct = PQfnumber(res, "n_distinct");
11270 i_most_common_vals = PQfnumber(res, "most_common_vals");
11271 i_most_common_freqs = PQfnumber(res, "most_common_freqs");
11272 i_histogram_bounds = PQfnumber(res, "histogram_bounds");
11273 i_correlation = PQfnumber(res, "correlation");
11274 i_most_common_elems = PQfnumber(res, "most_common_elems");
11275 i_most_common_elem_freqs = PQfnumber(res, "most_common_elem_freqs");
11276 i_elem_count_histogram = PQfnumber(res, "elem_count_histogram");
11277 i_range_length_histogram = PQfnumber(res, "range_length_histogram");
11278 i_range_empty_frac = PQfnumber(res, "range_empty_frac");
11279 i_range_bounds_histogram = PQfnumber(res, "range_bounds_histogram");
11280
11281 /* restore attribute stats */
11282 for (; rownum < PQntuples(res); rownum++)
11283 {
11284 const char *attname;
11285
11286 /* Stop if the next stat row in our cache isn't for this relation. */
11287 if (strcmp(te->tag, PQgetvalue(res, rownum, i_tablename)) != 0 ||
11288 strcmp(te->namespace, PQgetvalue(res, rownum, i_schemaname)) != 0)
11289 break;
11290
11291 appendPQExpBufferStr(out, "SELECT * FROM pg_catalog.pg_restore_attribute_stats(\n");
11292 appendPQExpBuffer(out, "\t'version', '%d'::integer,\n",
11294 appendPQExpBufferStr(out, "\t'schemaname', ");
11295 appendStringLiteralAH(out, rsinfo->dobj.namespace->dobj.name, fout);
11296 appendPQExpBufferStr(out, ",\n\t'relname', ");
11297 appendStringLiteralAH(out, rsinfo->dobj.name, fout);
11298
11299 if (PQgetisnull(res, rownum, i_attname))
11300 pg_fatal("unexpected null attname");
11301 attname = PQgetvalue(res, rownum, i_attname);
11302
11303 /*
11304 * Indexes look up attname in indAttNames to derive attnum, all others
11305 * use attname directly. We must specify attnum for indexes, since
11306 * their attnames are not necessarily stable across dump/reload.
11307 */
11308 if (rsinfo->nindAttNames == 0)
11309 {
11310 appendPQExpBufferStr(out, ",\n\t'attname', ");
11312 }
11313 else
11314 {
11315 bool found = false;
11316
11317 for (int i = 0; i < rsinfo->nindAttNames; i++)
11318 {
11319 if (strcmp(attname, rsinfo->indAttNames[i]) == 0)
11320 {
11321 appendPQExpBuffer(out, ",\n\t'attnum', '%d'::smallint",
11322 i + 1);
11323 found = true;
11324 break;
11325 }
11326 }
11327
11328 if (!found)
11329 pg_fatal("could not find index attname \"%s\"", attname);
11330 }
11331
11332 if (!PQgetisnull(res, rownum, i_inherited))
11333 appendNamedArgument(out, fout, "inherited", "boolean",
11334 PQgetvalue(res, rownum, i_inherited));
11335 if (!PQgetisnull(res, rownum, i_null_frac))
11336 appendNamedArgument(out, fout, "null_frac", "real",
11337 PQgetvalue(res, rownum, i_null_frac));
11338 if (!PQgetisnull(res, rownum, i_avg_width))
11339 appendNamedArgument(out, fout, "avg_width", "integer",
11340 PQgetvalue(res, rownum, i_avg_width));
11341 if (!PQgetisnull(res, rownum, i_n_distinct))
11342 appendNamedArgument(out, fout, "n_distinct", "real",
11343 PQgetvalue(res, rownum, i_n_distinct));
11344 if (!PQgetisnull(res, rownum, i_most_common_vals))
11345 appendNamedArgument(out, fout, "most_common_vals", "text",
11346 PQgetvalue(res, rownum, i_most_common_vals));
11347 if (!PQgetisnull(res, rownum, i_most_common_freqs))
11348 appendNamedArgument(out, fout, "most_common_freqs", "real[]",
11349 PQgetvalue(res, rownum, i_most_common_freqs));
11350 if (!PQgetisnull(res, rownum, i_histogram_bounds))
11351 appendNamedArgument(out, fout, "histogram_bounds", "text",
11352 PQgetvalue(res, rownum, i_histogram_bounds));
11353 if (!PQgetisnull(res, rownum, i_correlation))
11354 appendNamedArgument(out, fout, "correlation", "real",
11355 PQgetvalue(res, rownum, i_correlation));
11356 if (!PQgetisnull(res, rownum, i_most_common_elems))
11357 appendNamedArgument(out, fout, "most_common_elems", "text",
11358 PQgetvalue(res, rownum, i_most_common_elems));
11359 if (!PQgetisnull(res, rownum, i_most_common_elem_freqs))
11360 appendNamedArgument(out, fout, "most_common_elem_freqs", "real[]",
11361 PQgetvalue(res, rownum, i_most_common_elem_freqs));
11362 if (!PQgetisnull(res, rownum, i_elem_count_histogram))
11363 appendNamedArgument(out, fout, "elem_count_histogram", "real[]",
11364 PQgetvalue(res, rownum, i_elem_count_histogram));
11365 if (fout->remoteVersion >= 170000)
11366 {
11367 if (!PQgetisnull(res, rownum, i_range_length_histogram))
11368 appendNamedArgument(out, fout, "range_length_histogram", "text",
11369 PQgetvalue(res, rownum, i_range_length_histogram));
11370 if (!PQgetisnull(res, rownum, i_range_empty_frac))
11371 appendNamedArgument(out, fout, "range_empty_frac", "real",
11372 PQgetvalue(res, rownum, i_range_empty_frac));
11373 if (!PQgetisnull(res, rownum, i_range_bounds_histogram))
11374 appendNamedArgument(out, fout, "range_bounds_histogram", "text",
11375 PQgetvalue(res, rownum, i_range_bounds_histogram));
11376 }
11377 appendPQExpBufferStr(out, "\n);\n");
11378 }
11379
11380 destroyPQExpBuffer(query);
11381 return out->data;
11382}
11383
11384/*
11385 * dumpRelationStats --
11386 *
11387 * Make an ArchiveEntry for the relation statistics. The Archiver will take
11388 * care of gathering the statistics and generating the restore commands when
11389 * they are needed.
11390 */
11391static void
11393{
11394 const DumpableObject *dobj = &rsinfo->dobj;
11395
11396 /* nothing to do if we are not dumping statistics */
11397 if (!fout->dopt->dumpStatistics)
11398 return;
11399
11401 ARCHIVE_OPTS(.tag = dobj->name,
11402 .namespace = dobj->namespace->dobj.name,
11403 .description = "STATISTICS DATA",
11404 .section = rsinfo->section,
11405 .defnFn = dumpRelationStats_dumper,
11406 .defnArg = rsinfo,
11407 .deps = dobj->dependencies,
11408 .nDeps = dobj->nDeps));
11409}
11410
11411/*
11412 * dumpTableComment --
11413 *
11414 * As above, but dump comments for both the specified table (or view)
11415 * and its columns.
11416 */
11417static void
11419 const char *reltypename)
11420{
11421 DumpOptions *dopt = fout->dopt;
11423 int ncomments;
11424 PQExpBuffer query;
11425 PQExpBuffer tag;
11426
11427 /* do nothing, if --no-comments is supplied */
11428 if (dopt->no_comments)
11429 return;
11430
11431 /* Comments are SCHEMA not data */
11432 if (!dopt->dumpSchema)
11433 return;
11434
11435 /* Search for comments associated with relation, using table */
11436 ncomments = findComments(tbinfo->dobj.catId.tableoid,
11437 tbinfo->dobj.catId.oid,
11438 &comments);
11439
11440 /* If comments exist, build COMMENT ON statements */
11441 if (ncomments <= 0)
11442 return;
11443
11444 query = createPQExpBuffer();
11445 tag = createPQExpBuffer();
11446
11447 while (ncomments > 0)
11448 {
11449 const char *descr = comments->descr;
11450 int objsubid = comments->objsubid;
11451
11452 if (objsubid == 0)
11453 {
11454 resetPQExpBuffer(tag);
11455 appendPQExpBuffer(tag, "%s %s", reltypename,
11456 fmtId(tbinfo->dobj.name));
11457
11458 resetPQExpBuffer(query);
11459 appendPQExpBuffer(query, "COMMENT ON %s %s IS ", reltypename,
11461 appendStringLiteralAH(query, descr, fout);
11462 appendPQExpBufferStr(query, ";\n");
11463
11465 ARCHIVE_OPTS(.tag = tag->data,
11466 .namespace = tbinfo->dobj.namespace->dobj.name,
11467 .owner = tbinfo->rolname,
11468 .description = "COMMENT",
11469 .section = SECTION_NONE,
11470 .createStmt = query->data,
11471 .deps = &(tbinfo->dobj.dumpId),
11472 .nDeps = 1));
11473 }
11474 else if (objsubid > 0 && objsubid <= tbinfo->numatts)
11475 {
11476 resetPQExpBuffer(tag);
11477 appendPQExpBuffer(tag, "COLUMN %s.",
11478 fmtId(tbinfo->dobj.name));
11479 appendPQExpBufferStr(tag, fmtId(tbinfo->attnames[objsubid - 1]));
11480
11481 resetPQExpBuffer(query);
11482 appendPQExpBuffer(query, "COMMENT ON COLUMN %s.",
11484 appendPQExpBuffer(query, "%s IS ",
11485 fmtId(tbinfo->attnames[objsubid - 1]));
11486 appendStringLiteralAH(query, descr, fout);
11487 appendPQExpBufferStr(query, ";\n");
11488
11490 ARCHIVE_OPTS(.tag = tag->data,
11491 .namespace = tbinfo->dobj.namespace->dobj.name,
11492 .owner = tbinfo->rolname,
11493 .description = "COMMENT",
11494 .section = SECTION_NONE,
11495 .createStmt = query->data,
11496 .deps = &(tbinfo->dobj.dumpId),
11497 .nDeps = 1));
11498 }
11499
11500 comments++;
11501 ncomments--;
11502 }
11503
11504 destroyPQExpBuffer(query);
11505 destroyPQExpBuffer(tag);
11506}
11507
11508/*
11509 * findComments --
11510 *
11511 * Find the comment(s), if any, associated with the given object. All the
11512 * objsubid values associated with the given classoid/objoid are found with
11513 * one search.
11514 */
11515static int
11517{
11519 CommentItem *low;
11520 CommentItem *high;
11521 int nmatch;
11522
11523 /*
11524 * Do binary search to find some item matching the object.
11525 */
11526 low = &comments[0];
11527 high = &comments[ncomments - 1];
11528 while (low <= high)
11529 {
11530 middle = low + (high - low) / 2;
11531
11532 if (classoid < middle->classoid)
11533 high = middle - 1;
11534 else if (classoid > middle->classoid)
11535 low = middle + 1;
11536 else if (objoid < middle->objoid)
11537 high = middle - 1;
11538 else if (objoid > middle->objoid)
11539 low = middle + 1;
11540 else
11541 break; /* found a match */
11542 }
11543
11544 if (low > high) /* no matches */
11545 {
11546 *items = NULL;
11547 return 0;
11548 }
11549
11550 /*
11551 * Now determine how many items match the object. The search loop
11552 * invariant still holds: only items between low and high inclusive could
11553 * match.
11554 */
11555 nmatch = 1;
11556 while (middle > low)
11557 {
11558 if (classoid != middle[-1].classoid ||
11559 objoid != middle[-1].objoid)
11560 break;
11561 middle--;
11562 nmatch++;
11563 }
11564
11565 *items = middle;
11566
11567 middle += nmatch;
11568 while (middle <= high)
11569 {
11570 if (classoid != middle->classoid ||
11571 objoid != middle->objoid)
11572 break;
11573 middle++;
11574 nmatch++;
11575 }
11576
11577 return nmatch;
11578}
11579
11580/*
11581 * collectComments --
11582 *
11583 * Construct a table of all comments available for database objects;
11584 * also set the has-comment component flag for each relevant object.
11585 *
11586 * We used to do per-object queries for the comments, but it's much faster
11587 * to pull them all over at once, and on most databases the memory cost
11588 * isn't high.
11589 *
11590 * The table is sorted by classoid/objid/objsubid for speed in lookup.
11591 */
11592static void
11594{
11595 PGresult *res;
11596 PQExpBuffer query;
11597 int i_description;
11598 int i_classoid;
11599 int i_objoid;
11600 int i_objsubid;
11601 int ntups;
11602 int i;
11603 DumpableObject *dobj;
11604
11605 query = createPQExpBuffer();
11606
11607 appendPQExpBufferStr(query, "SELECT description, classoid, objoid, objsubid "
11608 "FROM pg_catalog.pg_description "
11609 "ORDER BY classoid, objoid, objsubid");
11610
11611 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
11612
11613 /* Construct lookup table containing OIDs in numeric form */
11614
11615 i_description = PQfnumber(res, "description");
11616 i_classoid = PQfnumber(res, "classoid");
11617 i_objoid = PQfnumber(res, "objoid");
11618 i_objsubid = PQfnumber(res, "objsubid");
11619
11620 ntups = PQntuples(res);
11621
11623 ncomments = 0;
11624 dobj = NULL;
11625
11626 for (i = 0; i < ntups; i++)
11627 {
11628 CatalogId objId;
11629 int subid;
11630
11631 objId.tableoid = atooid(PQgetvalue(res, i, i_classoid));
11632 objId.oid = atooid(PQgetvalue(res, i, i_objoid));
11633 subid = atoi(PQgetvalue(res, i, i_objsubid));
11634
11635 /* We needn't remember comments that don't match any dumpable object */
11636 if (dobj == NULL ||
11637 dobj->catId.tableoid != objId.tableoid ||
11638 dobj->catId.oid != objId.oid)
11639 dobj = findObjectByCatalogId(objId);
11640 if (dobj == NULL)
11641 continue;
11642
11643 /*
11644 * Comments on columns of composite types are linked to the type's
11645 * pg_class entry, but we need to set the DUMP_COMPONENT_COMMENT flag
11646 * in the type's own DumpableObject.
11647 */
11648 if (subid != 0 && dobj->objType == DO_TABLE &&
11649 ((TableInfo *) dobj)->relkind == RELKIND_COMPOSITE_TYPE)
11650 {
11652
11653 cTypeInfo = findTypeByOid(((TableInfo *) dobj)->reltype);
11654 if (cTypeInfo)
11655 cTypeInfo->dobj.components |= DUMP_COMPONENT_COMMENT;
11656 }
11657 else
11658 dobj->components |= DUMP_COMPONENT_COMMENT;
11659
11662 comments[ncomments].objoid = objId.oid;
11663 comments[ncomments].objsubid = subid;
11664 ncomments++;
11665 }
11666
11667 PQclear(res);
11668 destroyPQExpBuffer(query);
11669}
11670
11671/*
11672 * dumpDumpableObject
11673 *
11674 * This routine and its subsidiaries are responsible for creating
11675 * ArchiveEntries (TOC objects) for each object to be dumped.
11676 */
11677static void
11679{
11680 /*
11681 * Clear any dump-request bits for components that don't exist for this
11682 * object. (This makes it safe to initially use DUMP_COMPONENT_ALL as the
11683 * request for every kind of object.)
11684 */
11685 dobj->dump &= dobj->components;
11686
11687 /* Now, short-circuit if there's nothing to be done here. */
11688 if (dobj->dump == 0)
11689 return;
11690
11691 switch (dobj->objType)
11692 {
11693 case DO_NAMESPACE:
11694 dumpNamespace(fout, (const NamespaceInfo *) dobj);
11695 break;
11696 case DO_EXTENSION:
11697 dumpExtension(fout, (const ExtensionInfo *) dobj);
11698 break;
11699 case DO_TYPE:
11700 dumpType(fout, (const TypeInfo *) dobj);
11701 break;
11702 case DO_SHELL_TYPE:
11703 dumpShellType(fout, (const ShellTypeInfo *) dobj);
11704 break;
11705 case DO_FUNC:
11706 dumpFunc(fout, (const FuncInfo *) dobj);
11707 break;
11708 case DO_AGG:
11709 dumpAgg(fout, (const AggInfo *) dobj);
11710 break;
11711 case DO_OPERATOR:
11712 dumpOpr(fout, (const OprInfo *) dobj);
11713 break;
11714 case DO_ACCESS_METHOD:
11715 dumpAccessMethod(fout, (const AccessMethodInfo *) dobj);
11716 break;
11717 case DO_OPCLASS:
11718 dumpOpclass(fout, (const OpclassInfo *) dobj);
11719 break;
11720 case DO_OPFAMILY:
11721 dumpOpfamily(fout, (const OpfamilyInfo *) dobj);
11722 break;
11723 case DO_COLLATION:
11724 dumpCollation(fout, (const CollInfo *) dobj);
11725 break;
11726 case DO_CONVERSION:
11727 dumpConversion(fout, (const ConvInfo *) dobj);
11728 break;
11729 case DO_TABLE:
11730 dumpTable(fout, (const TableInfo *) dobj);
11731 break;
11732 case DO_TABLE_ATTACH:
11733 dumpTableAttach(fout, (const TableAttachInfo *) dobj);
11734 break;
11735 case DO_ATTRDEF:
11736 dumpAttrDef(fout, (const AttrDefInfo *) dobj);
11737 break;
11738 case DO_INDEX:
11739 dumpIndex(fout, (const IndxInfo *) dobj);
11740 break;
11741 case DO_INDEX_ATTACH:
11742 dumpIndexAttach(fout, (const IndexAttachInfo *) dobj);
11743 break;
11744 case DO_STATSEXT:
11745 dumpStatisticsExt(fout, (const StatsExtInfo *) dobj);
11746 dumpStatisticsExtStats(fout, (const StatsExtInfo *) dobj);
11747 break;
11748 case DO_REFRESH_MATVIEW:
11749 refreshMatViewData(fout, (const TableDataInfo *) dobj);
11750 break;
11751 case DO_RULE:
11752 dumpRule(fout, (const RuleInfo *) dobj);
11753 break;
11754 case DO_TRIGGER:
11755 dumpTrigger(fout, (const TriggerInfo *) dobj);
11756 break;
11757 case DO_EVENT_TRIGGER:
11758 dumpEventTrigger(fout, (const EventTriggerInfo *) dobj);
11759 break;
11760 case DO_CONSTRAINT:
11761 dumpConstraint(fout, (const ConstraintInfo *) dobj);
11762 break;
11763 case DO_FK_CONSTRAINT:
11764 dumpConstraint(fout, (const ConstraintInfo *) dobj);
11765 break;
11766 case DO_PROCLANG:
11767 dumpProcLang(fout, (const ProcLangInfo *) dobj);
11768 break;
11769 case DO_CAST:
11770 dumpCast(fout, (const CastInfo *) dobj);
11771 break;
11772 case DO_TRANSFORM:
11773 dumpTransform(fout, (const TransformInfo *) dobj);
11774 break;
11775 case DO_SEQUENCE_SET:
11776 dumpSequenceData(fout, (const TableDataInfo *) dobj);
11777 break;
11778 case DO_TABLE_DATA:
11779 dumpTableData(fout, (const TableDataInfo *) dobj);
11780 break;
11781 case DO_DUMMY_TYPE:
11782 /* table rowtypes and array types are never dumped separately */
11783 break;
11784 case DO_TSPARSER:
11785 dumpTSParser(fout, (const TSParserInfo *) dobj);
11786 break;
11787 case DO_TSDICT:
11788 dumpTSDictionary(fout, (const TSDictInfo *) dobj);
11789 break;
11790 case DO_TSTEMPLATE:
11791 dumpTSTemplate(fout, (const TSTemplateInfo *) dobj);
11792 break;
11793 case DO_TSCONFIG:
11794 dumpTSConfig(fout, (const TSConfigInfo *) dobj);
11795 break;
11796 case DO_FDW:
11797 dumpForeignDataWrapper(fout, (const FdwInfo *) dobj);
11798 break;
11799 case DO_FOREIGN_SERVER:
11800 dumpForeignServer(fout, (const ForeignServerInfo *) dobj);
11801 break;
11802 case DO_DEFAULT_ACL:
11803 dumpDefaultACL(fout, (const DefaultACLInfo *) dobj);
11804 break;
11805 case DO_LARGE_OBJECT:
11806 dumpLO(fout, (const LoInfo *) dobj);
11807 break;
11809 if (dobj->dump & DUMP_COMPONENT_DATA)
11810 {
11811 LoInfo *loinfo;
11812 TocEntry *te;
11813
11814 loinfo = (LoInfo *) findObjectByDumpId(dobj->dependencies[0]);
11815 if (loinfo == NULL)
11816 pg_fatal("missing metadata for large objects \"%s\"",
11817 dobj->name);
11818
11819 te = ArchiveEntry(fout, dobj->catId, dobj->dumpId,
11820 ARCHIVE_OPTS(.tag = dobj->name,
11821 .owner = loinfo->rolname,
11822 .description = "BLOBS",
11823 .section = SECTION_DATA,
11824 .deps = dobj->dependencies,
11825 .nDeps = dobj->nDeps,
11826 .dumpFn = dumpLOs,
11827 .dumpArg = loinfo));
11828
11829 /*
11830 * Set the TocEntry's dataLength in case we are doing a
11831 * parallel dump and want to order dump jobs by table size.
11832 * (We need some size estimate for every TocEntry with a
11833 * DataDumper function.) We don't currently have any cheap
11834 * way to estimate the size of LOs, but fortunately it doesn't
11835 * matter too much as long as we get large batches of LOs
11836 * processed reasonably early. Assume 8K per blob.
11837 */
11838 te->dataLength = loinfo->numlos * (pgoff_t) 8192;
11839 }
11840 break;
11841 case DO_POLICY:
11842 dumpPolicy(fout, (const PolicyInfo *) dobj);
11843 break;
11844 case DO_PUBLICATION:
11845 dumpPublication(fout, (const PublicationInfo *) dobj);
11846 break;
11847 case DO_PUBLICATION_REL:
11849 break;
11852 (const PublicationSchemaInfo *) dobj);
11853 break;
11854 case DO_SUBSCRIPTION:
11855 dumpSubscription(fout, (const SubscriptionInfo *) dobj);
11856 break;
11858 dumpSubscriptionTable(fout, (const SubRelInfo *) dobj);
11859 break;
11860 case DO_REL_STATS:
11861 dumpRelationStats(fout, (const RelStatsInfo *) dobj);
11862 break;
11865 /* never dumped, nothing to do */
11866 break;
11867 }
11868}
11869
11870/*
11871 * dumpNamespace
11872 * writes out to fout the queries to recreate a user-defined namespace
11873 */
11874static void
11876{
11877 DumpOptions *dopt = fout->dopt;
11878 PQExpBuffer q;
11880 char *qnspname;
11881
11882 /* Do nothing if not dumping schema */
11883 if (!dopt->dumpSchema)
11884 return;
11885
11886 q = createPQExpBuffer();
11888
11889 qnspname = pg_strdup(fmtId(nspinfo->dobj.name));
11890
11891 if (nspinfo->create)
11892 {
11893 appendPQExpBuffer(delq, "DROP SCHEMA %s;\n", qnspname);
11894 appendPQExpBuffer(q, "CREATE SCHEMA %s;\n", qnspname);
11895 }
11896 else
11897 {
11898 /* see selectDumpableNamespace() */
11900 "-- *not* dropping schema, since initdb creates it\n");
11902 "-- *not* creating schema, since initdb creates it\n");
11903 }
11904
11905 if (dopt->binary_upgrade)
11907 "SCHEMA", qnspname, NULL);
11908
11909 if (nspinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
11910 ArchiveEntry(fout, nspinfo->dobj.catId, nspinfo->dobj.dumpId,
11911 ARCHIVE_OPTS(.tag = nspinfo->dobj.name,
11912 .owner = nspinfo->rolname,
11913 .description = "SCHEMA",
11914 .section = SECTION_PRE_DATA,
11915 .createStmt = q->data,
11916 .dropStmt = delq->data));
11917
11918 /* Dump Schema Comments and Security Labels */
11919 if (nspinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
11920 {
11921 const char *initdb_comment = NULL;
11922
11923 if (!nspinfo->create && strcmp(qnspname, "public") == 0)
11924 initdb_comment = "standard public schema";
11926 NULL, nspinfo->rolname,
11927 nspinfo->dobj.catId, 0, nspinfo->dobj.dumpId,
11929 }
11930
11931 if (nspinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
11932 dumpSecLabel(fout, "SCHEMA", qnspname,
11933 NULL, nspinfo->rolname,
11934 nspinfo->dobj.catId, 0, nspinfo->dobj.dumpId);
11935
11936 if (nspinfo->dobj.dump & DUMP_COMPONENT_ACL)
11937 dumpACL(fout, nspinfo->dobj.dumpId, InvalidDumpId, "SCHEMA",
11938 qnspname, NULL, NULL,
11939 NULL, nspinfo->rolname, &nspinfo->dacl);
11940
11941 free(qnspname);
11942
11945}
11946
11947/*
11948 * dumpExtension
11949 * writes out to fout the queries to recreate an extension
11950 */
11951static void
11953{
11954 DumpOptions *dopt = fout->dopt;
11955 PQExpBuffer q;
11957 char *qextname;
11958
11959 /* Do nothing if not dumping schema */
11960 if (!dopt->dumpSchema)
11961 return;
11962
11963 q = createPQExpBuffer();
11965
11966 qextname = pg_strdup(fmtId(extinfo->dobj.name));
11967
11968 appendPQExpBuffer(delq, "DROP EXTENSION %s;\n", qextname);
11969
11970 if (!dopt->binary_upgrade)
11971 {
11972 /*
11973 * In a regular dump, we simply create the extension, intentionally
11974 * not specifying a version, so that the destination installation's
11975 * default version is used.
11976 *
11977 * Use of IF NOT EXISTS here is unlike our behavior for other object
11978 * types; but there are various scenarios in which it's convenient to
11979 * manually create the desired extension before restoring, so we
11980 * prefer to allow it to exist already.
11981 */
11982 appendPQExpBuffer(q, "CREATE EXTENSION IF NOT EXISTS %s WITH SCHEMA %s;\n",
11983 qextname, fmtId(extinfo->namespace));
11984 }
11985 else
11986 {
11987 /*
11988 * In binary-upgrade mode, it's critical to reproduce the state of the
11989 * database exactly, so our procedure is to create an empty extension,
11990 * restore all the contained objects normally, and add them to the
11991 * extension one by one. This function performs just the first of
11992 * those steps. binary_upgrade_extension_member() takes care of
11993 * adding member objects as they're created.
11994 */
11995 int i;
11996 int n;
11997
11998 appendPQExpBufferStr(q, "-- For binary upgrade, create an empty extension and insert objects into it\n");
11999
12000 /*
12001 * We unconditionally create the extension, so we must drop it if it
12002 * exists. This could happen if the user deleted 'plpgsql' and then
12003 * readded it, causing its oid to be greater than g_last_builtin_oid.
12004 */
12005 appendPQExpBuffer(q, "DROP EXTENSION IF EXISTS %s;\n", qextname);
12006
12008 "SELECT pg_catalog.binary_upgrade_create_empty_extension(");
12009 appendStringLiteralAH(q, extinfo->dobj.name, fout);
12010 appendPQExpBufferStr(q, ", ");
12011 appendStringLiteralAH(q, extinfo->namespace, fout);
12012 appendPQExpBufferStr(q, ", ");
12013 appendPQExpBuffer(q, "%s, ", extinfo->relocatable ? "true" : "false");
12014 appendStringLiteralAH(q, extinfo->extversion, fout);
12015 appendPQExpBufferStr(q, ", ");
12016
12017 /*
12018 * Note that we're pushing extconfig (an OID array) back into
12019 * pg_extension exactly as-is. This is OK because pg_class OIDs are
12020 * preserved in binary upgrade.
12021 */
12022 if (strlen(extinfo->extconfig) > 2)
12023 appendStringLiteralAH(q, extinfo->extconfig, fout);
12024 else
12025 appendPQExpBufferStr(q, "NULL");
12026 appendPQExpBufferStr(q, ", ");
12027 if (strlen(extinfo->extcondition) > 2)
12028 appendStringLiteralAH(q, extinfo->extcondition, fout);
12029 else
12030 appendPQExpBufferStr(q, "NULL");
12031 appendPQExpBufferStr(q, ", ");
12032 appendPQExpBufferStr(q, "ARRAY[");
12033 n = 0;
12034 for (i = 0; i < extinfo->dobj.nDeps; i++)
12035 {
12037
12038 extobj = findObjectByDumpId(extinfo->dobj.dependencies[i]);
12039 if (extobj && extobj->objType == DO_EXTENSION)
12040 {
12041 if (n++ > 0)
12042 appendPQExpBufferChar(q, ',');
12044 }
12045 }
12046 appendPQExpBufferStr(q, "]::pg_catalog.text[]");
12047 appendPQExpBufferStr(q, ");\n");
12048 }
12049
12050 if (extinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12051 ArchiveEntry(fout, extinfo->dobj.catId, extinfo->dobj.dumpId,
12052 ARCHIVE_OPTS(.tag = extinfo->dobj.name,
12053 .description = "EXTENSION",
12054 .section = SECTION_PRE_DATA,
12055 .createStmt = q->data,
12056 .dropStmt = delq->data));
12057
12058 /* Dump Extension Comments */
12059 if (extinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12060 dumpComment(fout, "EXTENSION", qextname,
12061 NULL, "",
12062 extinfo->dobj.catId, 0, extinfo->dobj.dumpId);
12063
12064 free(qextname);
12065
12068}
12069
12070/*
12071 * dumpType
12072 * writes out to fout the queries to recreate a user-defined type
12073 */
12074static void
12076{
12077 DumpOptions *dopt = fout->dopt;
12078
12079 /* Do nothing if not dumping schema */
12080 if (!dopt->dumpSchema)
12081 return;
12082
12083 /* Dump out in proper style */
12084 if (tyinfo->typtype == TYPTYPE_BASE)
12086 else if (tyinfo->typtype == TYPTYPE_DOMAIN)
12088 else if (tyinfo->typtype == TYPTYPE_COMPOSITE)
12090 else if (tyinfo->typtype == TYPTYPE_ENUM)
12092 else if (tyinfo->typtype == TYPTYPE_RANGE)
12094 else if (tyinfo->typtype == TYPTYPE_PSEUDO && !tyinfo->isDefined)
12096 else
12097 pg_log_warning("typtype of data type \"%s\" appears to be invalid",
12098 tyinfo->dobj.name);
12099}
12100
12101/*
12102 * dumpEnumType
12103 * writes out to fout the queries to recreate a user-defined enum type
12104 */
12105static void
12107{
12108 DumpOptions *dopt = fout->dopt;
12112 PGresult *res;
12113 int num,
12114 i;
12115 Oid enum_oid;
12116 char *qtypname;
12117 char *qualtypname;
12118 char *label;
12119 int i_enumlabel;
12120 int i_oid;
12121
12123 {
12124 /* Set up query for enum-specific details */
12126 "PREPARE dumpEnumType(pg_catalog.oid) AS\n"
12127 "SELECT oid, enumlabel "
12128 "FROM pg_catalog.pg_enum "
12129 "WHERE enumtypid = $1 "
12130 "ORDER BY enumsortorder");
12131
12132 ExecuteSqlStatement(fout, query->data);
12133
12135 }
12136
12137 printfPQExpBuffer(query,
12138 "EXECUTE dumpEnumType('%u')",
12139 tyinfo->dobj.catId.oid);
12140
12141 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
12142
12143 num = PQntuples(res);
12144
12145 qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12147
12148 /*
12149 * CASCADE shouldn't be required here as for normal types since the I/O
12150 * functions are generic and do not get dropped.
12151 */
12152 appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
12153
12154 if (dopt->binary_upgrade)
12156 tyinfo->dobj.catId.oid,
12157 false, false);
12158
12159 appendPQExpBuffer(q, "CREATE TYPE %s AS ENUM (",
12160 qualtypname);
12161
12162 if (!dopt->binary_upgrade)
12163 {
12164 i_enumlabel = PQfnumber(res, "enumlabel");
12165
12166 /* Labels with server-assigned oids */
12167 for (i = 0; i < num; i++)
12168 {
12169 label = PQgetvalue(res, i, i_enumlabel);
12170 if (i > 0)
12171 appendPQExpBufferChar(q, ',');
12172 appendPQExpBufferStr(q, "\n ");
12174 }
12175 }
12176
12177 appendPQExpBufferStr(q, "\n);\n");
12178
12179 if (dopt->binary_upgrade)
12180 {
12181 i_oid = PQfnumber(res, "oid");
12182 i_enumlabel = PQfnumber(res, "enumlabel");
12183
12184 /* Labels with dump-assigned (preserved) oids */
12185 for (i = 0; i < num; i++)
12186 {
12187 enum_oid = atooid(PQgetvalue(res, i, i_oid));
12188 label = PQgetvalue(res, i, i_enumlabel);
12189
12190 if (i == 0)
12191 appendPQExpBufferStr(q, "\n-- For binary upgrade, must preserve pg_enum oids\n");
12193 "SELECT pg_catalog.binary_upgrade_set_next_pg_enum_oid('%u'::pg_catalog.oid);\n",
12194 enum_oid);
12195 appendPQExpBuffer(q, "ALTER TYPE %s ADD VALUE ", qualtypname);
12197 appendPQExpBufferStr(q, ";\n\n");
12198 }
12199 }
12200
12201 if (dopt->binary_upgrade)
12203 "TYPE", qtypname,
12204 tyinfo->dobj.namespace->dobj.name);
12205
12206 if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12207 ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12208 ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12209 .namespace = tyinfo->dobj.namespace->dobj.name,
12210 .owner = tyinfo->rolname,
12211 .description = "TYPE",
12212 .section = SECTION_PRE_DATA,
12213 .createStmt = q->data,
12214 .dropStmt = delq->data));
12215
12216 /* Dump Type Comments and Security Labels */
12217 if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12218 dumpComment(fout, "TYPE", qtypname,
12219 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12220 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12221
12222 if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12223 dumpSecLabel(fout, "TYPE", qtypname,
12224 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12225 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12226
12227 if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12228 dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12229 qtypname, NULL,
12230 tyinfo->dobj.namespace->dobj.name,
12231 NULL, tyinfo->rolname, &tyinfo->dacl);
12232
12233 PQclear(res);
12236 destroyPQExpBuffer(query);
12237 free(qtypname);
12239}
12240
12241/*
12242 * dumpRangeType
12243 * writes out to fout the queries to recreate a user-defined range type
12244 */
12245static void
12247{
12248 DumpOptions *dopt = fout->dopt;
12252 PGresult *res;
12254 char *qtypname;
12255 char *qualtypname;
12256 char *procname;
12257
12259 {
12260 /* Set up query for range-specific details */
12262 "PREPARE dumpRangeType(pg_catalog.oid) AS\n");
12263
12265 "SELECT ");
12266
12267 if (fout->remoteVersion >= 140000)
12269 "pg_catalog.format_type(rngmultitypid, NULL) AS rngmultitype, ");
12270 else
12272 "NULL AS rngmultitype, ");
12273
12275 "pg_catalog.format_type(rngsubtype, NULL) AS rngsubtype, "
12276 "opc.opcname AS opcname, "
12277 "(SELECT nspname FROM pg_catalog.pg_namespace nsp "
12278 " WHERE nsp.oid = opc.opcnamespace) AS opcnsp, "
12279 "opc.opcdefault, "
12280 "CASE WHEN rngcollation = st.typcollation THEN 0 "
12281 " ELSE rngcollation END AS collation, "
12282 "rngcanonical, rngsubdiff "
12283 "FROM pg_catalog.pg_range r, pg_catalog.pg_type st, "
12284 " pg_catalog.pg_opclass opc "
12285 "WHERE st.oid = rngsubtype AND opc.oid = rngsubopc AND "
12286 "rngtypid = $1");
12287
12288 ExecuteSqlStatement(fout, query->data);
12289
12291 }
12292
12293 printfPQExpBuffer(query,
12294 "EXECUTE dumpRangeType('%u')",
12295 tyinfo->dobj.catId.oid);
12296
12297 res = ExecuteSqlQueryForSingleRow(fout, query->data);
12298
12299 qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12301
12302 /*
12303 * CASCADE shouldn't be required here as for normal types since the I/O
12304 * functions are generic and do not get dropped.
12305 */
12306 appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
12307
12308 if (dopt->binary_upgrade)
12310 tyinfo->dobj.catId.oid,
12311 false, true);
12312
12313 appendPQExpBuffer(q, "CREATE TYPE %s AS RANGE (",
12314 qualtypname);
12315
12316 appendPQExpBuffer(q, "\n subtype = %s",
12317 PQgetvalue(res, 0, PQfnumber(res, "rngsubtype")));
12318
12319 if (!PQgetisnull(res, 0, PQfnumber(res, "rngmultitype")))
12320 appendPQExpBuffer(q, ",\n multirange_type_name = %s",
12321 PQgetvalue(res, 0, PQfnumber(res, "rngmultitype")));
12322
12323 /* print subtype_opclass only if not default for subtype */
12324 if (PQgetvalue(res, 0, PQfnumber(res, "opcdefault"))[0] != 't')
12325 {
12326 char *opcname = PQgetvalue(res, 0, PQfnumber(res, "opcname"));
12327 char *nspname = PQgetvalue(res, 0, PQfnumber(res, "opcnsp"));
12328
12329 appendPQExpBuffer(q, ",\n subtype_opclass = %s.",
12330 fmtId(nspname));
12332 }
12333
12334 collationOid = atooid(PQgetvalue(res, 0, PQfnumber(res, "collation")));
12336 {
12338
12339 if (coll)
12340 appendPQExpBuffer(q, ",\n collation = %s",
12342 }
12343
12344 procname = PQgetvalue(res, 0, PQfnumber(res, "rngcanonical"));
12345 if (strcmp(procname, "-") != 0)
12346 appendPQExpBuffer(q, ",\n canonical = %s", procname);
12347
12348 procname = PQgetvalue(res, 0, PQfnumber(res, "rngsubdiff"));
12349 if (strcmp(procname, "-") != 0)
12350 appendPQExpBuffer(q, ",\n subtype_diff = %s", procname);
12351
12352 appendPQExpBufferStr(q, "\n);\n");
12353
12354 if (dopt->binary_upgrade)
12356 "TYPE", qtypname,
12357 tyinfo->dobj.namespace->dobj.name);
12358
12359 if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12360 ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12361 ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12362 .namespace = tyinfo->dobj.namespace->dobj.name,
12363 .owner = tyinfo->rolname,
12364 .description = "TYPE",
12365 .section = SECTION_PRE_DATA,
12366 .createStmt = q->data,
12367 .dropStmt = delq->data));
12368
12369 /* Dump Type Comments and Security Labels */
12370 if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12371 dumpComment(fout, "TYPE", qtypname,
12372 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12373 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12374
12375 if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12376 dumpSecLabel(fout, "TYPE", qtypname,
12377 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12378 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12379
12380 if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12381 dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12382 qtypname, NULL,
12383 tyinfo->dobj.namespace->dobj.name,
12384 NULL, tyinfo->rolname, &tyinfo->dacl);
12385
12386 PQclear(res);
12389 destroyPQExpBuffer(query);
12390 free(qtypname);
12392}
12393
12394/*
12395 * dumpUndefinedType
12396 * writes out to fout the queries to recreate a !typisdefined type
12397 *
12398 * This is a shell type, but we use different terminology to distinguish
12399 * this case from where we have to emit a shell type definition to break
12400 * circular dependencies. An undefined type shouldn't ever have anything
12401 * depending on it.
12402 */
12403static void
12405{
12406 DumpOptions *dopt = fout->dopt;
12409 char *qtypname;
12410 char *qualtypname;
12411
12412 qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12414
12415 appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
12416
12417 if (dopt->binary_upgrade)
12419 tyinfo->dobj.catId.oid,
12420 false, false);
12421
12422 appendPQExpBuffer(q, "CREATE TYPE %s;\n",
12423 qualtypname);
12424
12425 if (dopt->binary_upgrade)
12427 "TYPE", qtypname,
12428 tyinfo->dobj.namespace->dobj.name);
12429
12430 if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12431 ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12432 ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12433 .namespace = tyinfo->dobj.namespace->dobj.name,
12434 .owner = tyinfo->rolname,
12435 .description = "TYPE",
12436 .section = SECTION_PRE_DATA,
12437 .createStmt = q->data,
12438 .dropStmt = delq->data));
12439
12440 /* Dump Type Comments and Security Labels */
12441 if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12442 dumpComment(fout, "TYPE", qtypname,
12443 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12444 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12445
12446 if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12447 dumpSecLabel(fout, "TYPE", qtypname,
12448 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12449 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12450
12451 if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12452 dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12453 qtypname, NULL,
12454 tyinfo->dobj.namespace->dobj.name,
12455 NULL, tyinfo->rolname, &tyinfo->dacl);
12456
12459 free(qtypname);
12461}
12462
12463/*
12464 * dumpBaseType
12465 * writes out to fout the queries to recreate a user-defined base type
12466 */
12467static void
12469{
12470 DumpOptions *dopt = fout->dopt;
12474 PGresult *res;
12475 char *qtypname;
12476 char *qualtypname;
12477 char *typlen;
12478 char *typinput;
12479 char *typoutput;
12480 char *typreceive;
12481 char *typsend;
12482 char *typmodin;
12483 char *typmodout;
12484 char *typanalyze;
12485 char *typsubscript;
12492 char *typcategory;
12493 char *typispreferred;
12494 char *typdelim;
12495 char *typbyval;
12496 char *typalign;
12497 char *typstorage;
12498 char *typcollatable;
12499 char *typdefault;
12500 bool typdefault_is_literal = false;
12501
12503 {
12504 /* Set up query for type-specific details */
12506 "PREPARE dumpBaseType(pg_catalog.oid) AS\n"
12507 "SELECT typlen, "
12508 "typinput, typoutput, typreceive, typsend, "
12509 "typreceive::pg_catalog.oid AS typreceiveoid, "
12510 "typsend::pg_catalog.oid AS typsendoid, "
12511 "typanalyze, "
12512 "typanalyze::pg_catalog.oid AS typanalyzeoid, "
12513 "typdelim, typbyval, typalign, typstorage, "
12514 "typmodin, typmodout, "
12515 "typmodin::pg_catalog.oid AS typmodinoid, "
12516 "typmodout::pg_catalog.oid AS typmodoutoid, "
12517 "typcategory, typispreferred, "
12518 "(typcollation <> 0) AS typcollatable, "
12519 "pg_catalog.pg_get_expr(typdefaultbin, 0) AS typdefaultbin, typdefault, ");
12520
12521 if (fout->remoteVersion >= 140000)
12523 "typsubscript, "
12524 "typsubscript::pg_catalog.oid AS typsubscriptoid ");
12525 else
12527 "'-' AS typsubscript, 0 AS typsubscriptoid ");
12528
12529 appendPQExpBufferStr(query, "FROM pg_catalog.pg_type "
12530 "WHERE oid = $1");
12531
12532 ExecuteSqlStatement(fout, query->data);
12533
12535 }
12536
12537 printfPQExpBuffer(query,
12538 "EXECUTE dumpBaseType('%u')",
12539 tyinfo->dobj.catId.oid);
12540
12541 res = ExecuteSqlQueryForSingleRow(fout, query->data);
12542
12543 typlen = PQgetvalue(res, 0, PQfnumber(res, "typlen"));
12544 typinput = PQgetvalue(res, 0, PQfnumber(res, "typinput"));
12545 typoutput = PQgetvalue(res, 0, PQfnumber(res, "typoutput"));
12546 typreceive = PQgetvalue(res, 0, PQfnumber(res, "typreceive"));
12547 typsend = PQgetvalue(res, 0, PQfnumber(res, "typsend"));
12548 typmodin = PQgetvalue(res, 0, PQfnumber(res, "typmodin"));
12549 typmodout = PQgetvalue(res, 0, PQfnumber(res, "typmodout"));
12550 typanalyze = PQgetvalue(res, 0, PQfnumber(res, "typanalyze"));
12551 typsubscript = PQgetvalue(res, 0, PQfnumber(res, "typsubscript"));
12552 typreceiveoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typreceiveoid")));
12553 typsendoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typsendoid")));
12554 typmodinoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typmodinoid")));
12555 typmodoutoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typmodoutoid")));
12556 typanalyzeoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typanalyzeoid")));
12557 typsubscriptoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typsubscriptoid")));
12558 typcategory = PQgetvalue(res, 0, PQfnumber(res, "typcategory"));
12559 typispreferred = PQgetvalue(res, 0, PQfnumber(res, "typispreferred"));
12560 typdelim = PQgetvalue(res, 0, PQfnumber(res, "typdelim"));
12561 typbyval = PQgetvalue(res, 0, PQfnumber(res, "typbyval"));
12562 typalign = PQgetvalue(res, 0, PQfnumber(res, "typalign"));
12563 typstorage = PQgetvalue(res, 0, PQfnumber(res, "typstorage"));
12564 typcollatable = PQgetvalue(res, 0, PQfnumber(res, "typcollatable"));
12565 if (!PQgetisnull(res, 0, PQfnumber(res, "typdefaultbin")))
12566 typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefaultbin"));
12567 else if (!PQgetisnull(res, 0, PQfnumber(res, "typdefault")))
12568 {
12569 typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefault"));
12570 typdefault_is_literal = true; /* it needs quotes */
12571 }
12572 else
12573 typdefault = NULL;
12574
12575 qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12577
12578 /*
12579 * The reason we include CASCADE is that the circular dependency between
12580 * the type and its I/O functions makes it impossible to drop the type any
12581 * other way.
12582 */
12583 appendPQExpBuffer(delq, "DROP TYPE %s CASCADE;\n", qualtypname);
12584
12585 /*
12586 * We might already have a shell type, but setting pg_type_oid is
12587 * harmless, and in any case we'd better set the array type OID.
12588 */
12589 if (dopt->binary_upgrade)
12591 tyinfo->dobj.catId.oid,
12592 false, false);
12593
12595 "CREATE TYPE %s (\n"
12596 " INTERNALLENGTH = %s",
12598 (strcmp(typlen, "-1") == 0) ? "variable" : typlen);
12599
12600 /* regproc result is sufficiently quoted already */
12601 appendPQExpBuffer(q, ",\n INPUT = %s", typinput);
12602 appendPQExpBuffer(q, ",\n OUTPUT = %s", typoutput);
12604 appendPQExpBuffer(q, ",\n RECEIVE = %s", typreceive);
12606 appendPQExpBuffer(q, ",\n SEND = %s", typsend);
12608 appendPQExpBuffer(q, ",\n TYPMOD_IN = %s", typmodin);
12610 appendPQExpBuffer(q, ",\n TYPMOD_OUT = %s", typmodout);
12612 appendPQExpBuffer(q, ",\n ANALYZE = %s", typanalyze);
12613
12614 if (strcmp(typcollatable, "t") == 0)
12615 appendPQExpBufferStr(q, ",\n COLLATABLE = true");
12616
12617 if (typdefault != NULL)
12618 {
12619 appendPQExpBufferStr(q, ",\n DEFAULT = ");
12622 else
12624 }
12625
12627 appendPQExpBuffer(q, ",\n SUBSCRIPT = %s", typsubscript);
12628
12629 if (OidIsValid(tyinfo->typelem))
12630 appendPQExpBuffer(q, ",\n ELEMENT = %s",
12632 zeroIsError));
12633
12634 if (strcmp(typcategory, "U") != 0)
12635 {
12636 appendPQExpBufferStr(q, ",\n CATEGORY = ");
12638 }
12639
12640 if (strcmp(typispreferred, "t") == 0)
12641 appendPQExpBufferStr(q, ",\n PREFERRED = true");
12642
12643 if (typdelim && strcmp(typdelim, ",") != 0)
12644 {
12645 appendPQExpBufferStr(q, ",\n DELIMITER = ");
12646 appendStringLiteralAH(q, typdelim, fout);
12647 }
12648
12649 if (*typalign == TYPALIGN_CHAR)
12650 appendPQExpBufferStr(q, ",\n ALIGNMENT = char");
12651 else if (*typalign == TYPALIGN_SHORT)
12652 appendPQExpBufferStr(q, ",\n ALIGNMENT = int2");
12653 else if (*typalign == TYPALIGN_INT)
12654 appendPQExpBufferStr(q, ",\n ALIGNMENT = int4");
12655 else if (*typalign == TYPALIGN_DOUBLE)
12656 appendPQExpBufferStr(q, ",\n ALIGNMENT = double");
12657
12658 if (*typstorage == TYPSTORAGE_PLAIN)
12659 appendPQExpBufferStr(q, ",\n STORAGE = plain");
12660 else if (*typstorage == TYPSTORAGE_EXTERNAL)
12661 appendPQExpBufferStr(q, ",\n STORAGE = external");
12662 else if (*typstorage == TYPSTORAGE_EXTENDED)
12663 appendPQExpBufferStr(q, ",\n STORAGE = extended");
12664 else if (*typstorage == TYPSTORAGE_MAIN)
12665 appendPQExpBufferStr(q, ",\n STORAGE = main");
12666
12667 if (strcmp(typbyval, "t") == 0)
12668 appendPQExpBufferStr(q, ",\n PASSEDBYVALUE");
12669
12670 appendPQExpBufferStr(q, "\n);\n");
12671
12672 if (dopt->binary_upgrade)
12674 "TYPE", qtypname,
12675 tyinfo->dobj.namespace->dobj.name);
12676
12677 if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12678 ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12679 ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12680 .namespace = tyinfo->dobj.namespace->dobj.name,
12681 .owner = tyinfo->rolname,
12682 .description = "TYPE",
12683 .section = SECTION_PRE_DATA,
12684 .createStmt = q->data,
12685 .dropStmt = delq->data));
12686
12687 /* Dump Type Comments and Security Labels */
12688 if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12689 dumpComment(fout, "TYPE", qtypname,
12690 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12691 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12692
12693 if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12694 dumpSecLabel(fout, "TYPE", qtypname,
12695 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12696 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12697
12698 if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12699 dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12700 qtypname, NULL,
12701 tyinfo->dobj.namespace->dobj.name,
12702 NULL, tyinfo->rolname, &tyinfo->dacl);
12703
12704 PQclear(res);
12707 destroyPQExpBuffer(query);
12708 free(qtypname);
12710}
12711
12712/*
12713 * dumpDomain
12714 * writes out to fout the queries to recreate a user-defined domain
12715 */
12716static void
12718{
12719 DumpOptions *dopt = fout->dopt;
12723 PGresult *res;
12724 int i;
12725 char *qtypname;
12726 char *qualtypname;
12727 char *typnotnull;
12728 char *typdefn;
12729 char *typdefault;
12730 Oid typcollation;
12731 bool typdefault_is_literal = false;
12732
12734 {
12735 /* Set up query for domain-specific details */
12737 "PREPARE dumpDomain(pg_catalog.oid) AS\n");
12738
12739 appendPQExpBufferStr(query, "SELECT t.typnotnull, "
12740 "pg_catalog.format_type(t.typbasetype, t.typtypmod) AS typdefn, "
12741 "pg_catalog.pg_get_expr(t.typdefaultbin, 'pg_catalog.pg_type'::pg_catalog.regclass) AS typdefaultbin, "
12742 "t.typdefault, "
12743 "CASE WHEN t.typcollation <> u.typcollation "
12744 "THEN t.typcollation ELSE 0 END AS typcollation "
12745 "FROM pg_catalog.pg_type t "
12746 "LEFT JOIN pg_catalog.pg_type u ON (t.typbasetype = u.oid) "
12747 "WHERE t.oid = $1");
12748
12749 ExecuteSqlStatement(fout, query->data);
12750
12752 }
12753
12754 printfPQExpBuffer(query,
12755 "EXECUTE dumpDomain('%u')",
12756 tyinfo->dobj.catId.oid);
12757
12758 res = ExecuteSqlQueryForSingleRow(fout, query->data);
12759
12760 typnotnull = PQgetvalue(res, 0, PQfnumber(res, "typnotnull"));
12761 typdefn = PQgetvalue(res, 0, PQfnumber(res, "typdefn"));
12762 if (!PQgetisnull(res, 0, PQfnumber(res, "typdefaultbin")))
12763 typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefaultbin"));
12764 else if (!PQgetisnull(res, 0, PQfnumber(res, "typdefault")))
12765 {
12766 typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefault"));
12767 typdefault_is_literal = true; /* it needs quotes */
12768 }
12769 else
12770 typdefault = NULL;
12771 typcollation = atooid(PQgetvalue(res, 0, PQfnumber(res, "typcollation")));
12772
12773 if (dopt->binary_upgrade)
12775 tyinfo->dobj.catId.oid,
12776 true, /* force array type */
12777 false); /* force multirange type */
12778
12779 qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12781
12783 "CREATE DOMAIN %s AS %s",
12785 typdefn);
12786
12787 /* Print collation only if different from base type's collation */
12788 if (OidIsValid(typcollation))
12789 {
12790 CollInfo *coll;
12791
12792 coll = findCollationByOid(typcollation);
12793 if (coll)
12794 appendPQExpBuffer(q, " COLLATE %s", fmtQualifiedDumpable(coll));
12795 }
12796
12797 /*
12798 * Print a not-null constraint if there's one. In servers older than 17
12799 * these don't have names, so just print it unadorned; in newer ones they
12800 * do, but most of the time it's going to be the standard generated one,
12801 * so omit the name in that case also.
12802 */
12803 if (typnotnull[0] == 't')
12804 {
12805 if (fout->remoteVersion < 170000 || tyinfo->notnull == NULL)
12806 appendPQExpBufferStr(q, " NOT NULL");
12807 else
12808 {
12809 ConstraintInfo *notnull = tyinfo->notnull;
12810
12811 if (!notnull->separate)
12812 {
12813 char *default_name;
12814
12815 /* XXX should match ChooseConstraintName better */
12816 default_name = psprintf("%s_not_null", tyinfo->dobj.name);
12817
12818 if (strcmp(default_name, notnull->dobj.name) == 0)
12819 appendPQExpBufferStr(q, " NOT NULL");
12820 else
12821 appendPQExpBuffer(q, " CONSTRAINT %s %s",
12822 fmtId(notnull->dobj.name), notnull->condef);
12824 }
12825 }
12826 }
12827
12828 if (typdefault != NULL)
12829 {
12830 appendPQExpBufferStr(q, " DEFAULT ");
12833 else
12835 }
12836
12837 PQclear(res);
12838
12839 /*
12840 * Add any CHECK constraints for the domain
12841 */
12842 for (i = 0; i < tyinfo->nDomChecks; i++)
12843 {
12844 ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
12845
12846 if (!domcheck->separate && domcheck->contype == 'c')
12847 appendPQExpBuffer(q, "\n\tCONSTRAINT %s %s",
12848 fmtId(domcheck->dobj.name), domcheck->condef);
12849 }
12850
12851 appendPQExpBufferStr(q, ";\n");
12852
12853 appendPQExpBuffer(delq, "DROP DOMAIN %s;\n", qualtypname);
12854
12855 if (dopt->binary_upgrade)
12857 "DOMAIN", qtypname,
12858 tyinfo->dobj.namespace->dobj.name);
12859
12860 if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12861 ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12862 ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12863 .namespace = tyinfo->dobj.namespace->dobj.name,
12864 .owner = tyinfo->rolname,
12865 .description = "DOMAIN",
12866 .section = SECTION_PRE_DATA,
12867 .createStmt = q->data,
12868 .dropStmt = delq->data));
12869
12870 /* Dump Domain Comments and Security Labels */
12871 if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12872 dumpComment(fout, "DOMAIN", qtypname,
12873 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12874 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12875
12876 if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12877 dumpSecLabel(fout, "DOMAIN", qtypname,
12878 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12879 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12880
12881 if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12882 dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12883 qtypname, NULL,
12884 tyinfo->dobj.namespace->dobj.name,
12885 NULL, tyinfo->rolname, &tyinfo->dacl);
12886
12887 /* Dump any per-constraint comments */
12888 for (i = 0; i < tyinfo->nDomChecks; i++)
12889 {
12890 ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
12892
12893 /* but only if the constraint itself was dumped here */
12894 if (domcheck->separate)
12895 continue;
12896
12898 appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
12899 fmtId(domcheck->dobj.name));
12900
12901 if (domcheck->dobj.dump & DUMP_COMPONENT_COMMENT)
12903 tyinfo->dobj.namespace->dobj.name,
12904 tyinfo->rolname,
12905 domcheck->dobj.catId, 0, tyinfo->dobj.dumpId);
12906
12908 }
12909
12910 /*
12911 * And a comment on the not-null constraint, if there's one -- but only if
12912 * the constraint itself was dumped here
12913 */
12914 if (tyinfo->notnull != NULL && !tyinfo->notnull->separate)
12915 {
12917
12918 appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
12919 fmtId(tyinfo->notnull->dobj.name));
12920
12921 if (tyinfo->notnull->dobj.dump & DUMP_COMPONENT_COMMENT)
12923 tyinfo->dobj.namespace->dobj.name,
12924 tyinfo->rolname,
12925 tyinfo->notnull->dobj.catId, 0, tyinfo->dobj.dumpId);
12927 }
12928
12931 destroyPQExpBuffer(query);
12932 free(qtypname);
12934}
12935
12936/*
12937 * dumpCompositeType
12938 * writes out to fout the queries to recreate a user-defined stand-alone
12939 * composite type
12940 */
12941static void
12943{
12944 DumpOptions *dopt = fout->dopt;
12946 PQExpBuffer dropped = createPQExpBuffer();
12949 PGresult *res;
12950 char *qtypname;
12951 char *qualtypname;
12952 int ntups;
12953 int i_attname;
12954 int i_atttypdefn;
12955 int i_attlen;
12956 int i_attalign;
12957 int i_attisdropped;
12958 int i_attcollation;
12959 int i;
12960 int actual_atts;
12961
12963 {
12964 /*
12965 * Set up query for type-specific details.
12966 *
12967 * Since we only want to dump COLLATE clauses for attributes whose
12968 * collation is different from their type's default, we use a CASE
12969 * here to suppress uninteresting attcollations cheaply. atttypid
12970 * will be 0 for dropped columns; collation does not matter for those.
12971 */
12973 "PREPARE dumpCompositeType(pg_catalog.oid) AS\n"
12974 "SELECT a.attname, a.attnum, "
12975 "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
12976 "a.attlen, a.attalign, a.attisdropped, "
12977 "CASE WHEN a.attcollation <> at.typcollation "
12978 "THEN a.attcollation ELSE 0 END AS attcollation "
12979 "FROM pg_catalog.pg_type ct "
12980 "JOIN pg_catalog.pg_attribute a ON a.attrelid = ct.typrelid "
12981 "LEFT JOIN pg_catalog.pg_type at ON at.oid = a.atttypid "
12982 "WHERE ct.oid = $1 "
12983 "ORDER BY a.attnum");
12984
12985 ExecuteSqlStatement(fout, query->data);
12986
12988 }
12989
12990 printfPQExpBuffer(query,
12991 "EXECUTE dumpCompositeType('%u')",
12992 tyinfo->dobj.catId.oid);
12993
12994 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
12995
12996 ntups = PQntuples(res);
12997
12998 i_attname = PQfnumber(res, "attname");
12999 i_atttypdefn = PQfnumber(res, "atttypdefn");
13000 i_attlen = PQfnumber(res, "attlen");
13001 i_attalign = PQfnumber(res, "attalign");
13002 i_attisdropped = PQfnumber(res, "attisdropped");
13003 i_attcollation = PQfnumber(res, "attcollation");
13004
13005 if (dopt->binary_upgrade)
13006 {
13008 tyinfo->dobj.catId.oid,
13009 false, false);
13011 }
13012
13013 qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
13015
13016 appendPQExpBuffer(q, "CREATE TYPE %s AS (",
13017 qualtypname);
13018
13019 actual_atts = 0;
13020 for (i = 0; i < ntups; i++)
13021 {
13022 char *attname;
13023 char *atttypdefn;
13024 char *attlen;
13025 char *attalign;
13026 bool attisdropped;
13027 Oid attcollation;
13028
13029 attname = PQgetvalue(res, i, i_attname);
13031 attlen = PQgetvalue(res, i, i_attlen);
13033 attisdropped = (PQgetvalue(res, i, i_attisdropped)[0] == 't');
13034 attcollation = atooid(PQgetvalue(res, i, i_attcollation));
13035
13036 if (attisdropped && !dopt->binary_upgrade)
13037 continue;
13038
13039 /* Format properly if not first attr */
13040 if (actual_atts++ > 0)
13041 appendPQExpBufferChar(q, ',');
13042 appendPQExpBufferStr(q, "\n\t");
13043
13044 if (!attisdropped)
13045 {
13047
13048 /* Add collation if not default for the column type */
13049 if (OidIsValid(attcollation))
13050 {
13051 CollInfo *coll;
13052
13053 coll = findCollationByOid(attcollation);
13054 if (coll)
13055 appendPQExpBuffer(q, " COLLATE %s",
13057 }
13058 }
13059 else
13060 {
13061 /*
13062 * This is a dropped attribute and we're in binary_upgrade mode.
13063 * Insert a placeholder for it in the CREATE TYPE command, and set
13064 * length and alignment with direct UPDATE to the catalogs
13065 * afterwards. See similar code in dumpTableSchema().
13066 */
13067 appendPQExpBuffer(q, "%s INTEGER /* dummy */", fmtId(attname));
13068
13069 /* stash separately for insertion after the CREATE TYPE */
13070 appendPQExpBufferStr(dropped,
13071 "\n-- For binary upgrade, recreate dropped column.\n");
13072 appendPQExpBuffer(dropped, "UPDATE pg_catalog.pg_attribute\n"
13073 "SET attlen = %s, "
13074 "attalign = '%s', attbyval = false\n"
13075 "WHERE attname = ", attlen, attalign);
13077 appendPQExpBufferStr(dropped, "\n AND attrelid = ");
13079 appendPQExpBufferStr(dropped, "::pg_catalog.regclass;\n");
13080
13081 appendPQExpBuffer(dropped, "ALTER TYPE %s ",
13082 qualtypname);
13083 appendPQExpBuffer(dropped, "DROP ATTRIBUTE %s;\n",
13084 fmtId(attname));
13085 }
13086 }
13087 appendPQExpBufferStr(q, "\n);\n");
13088 appendPQExpBufferStr(q, dropped->data);
13089
13090 appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
13091
13092 if (dopt->binary_upgrade)
13094 "TYPE", qtypname,
13095 tyinfo->dobj.namespace->dobj.name);
13096
13097 if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
13098 ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
13099 ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
13100 .namespace = tyinfo->dobj.namespace->dobj.name,
13101 .owner = tyinfo->rolname,
13102 .description = "TYPE",
13103 .section = SECTION_PRE_DATA,
13104 .createStmt = q->data,
13105 .dropStmt = delq->data));
13106
13107
13108 /* Dump Type Comments and Security Labels */
13109 if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
13110 dumpComment(fout, "TYPE", qtypname,
13111 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
13112 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
13113
13114 if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
13115 dumpSecLabel(fout, "TYPE", qtypname,
13116 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
13117 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
13118
13119 if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
13120 dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
13121 qtypname, NULL,
13122 tyinfo->dobj.namespace->dobj.name,
13123 NULL, tyinfo->rolname, &tyinfo->dacl);
13124
13125 /* Dump any per-column comments */
13126 if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
13128
13129 PQclear(res);
13131 destroyPQExpBuffer(dropped);
13133 destroyPQExpBuffer(query);
13134 free(qtypname);
13136}
13137
13138/*
13139 * dumpCompositeTypeColComments
13140 * writes out to fout the queries to recreate comments on the columns of
13141 * a user-defined stand-alone composite type.
13142 *
13143 * The caller has already made a query to collect the names and attnums
13144 * of the type's columns, so we just pass that result into here rather
13145 * than reading them again.
13146 */
13147static void
13149 PGresult *res)
13150{
13152 int ncomments;
13153 PQExpBuffer query;
13154 PQExpBuffer target;
13155 int i;
13156 int ntups;
13157 int i_attname;
13158 int i_attnum;
13159 int i_attisdropped;
13160
13161 /* do nothing, if --no-comments is supplied */
13162 if (fout->dopt->no_comments)
13163 return;
13164
13165 /* Search for comments associated with type's pg_class OID */
13167 &comments);
13168
13169 /* If no comments exist, we're done */
13170 if (ncomments <= 0)
13171 return;
13172
13173 /* Build COMMENT ON statements */
13174 query = createPQExpBuffer();
13175 target = createPQExpBuffer();
13176
13177 ntups = PQntuples(res);
13178 i_attnum = PQfnumber(res, "attnum");
13179 i_attname = PQfnumber(res, "attname");
13180 i_attisdropped = PQfnumber(res, "attisdropped");
13181 while (ncomments > 0)
13182 {
13183 const char *attname;
13184
13185 attname = NULL;
13186 for (i = 0; i < ntups; i++)
13187 {
13188 if (atoi(PQgetvalue(res, i, i_attnum)) == comments->objsubid &&
13189 PQgetvalue(res, i, i_attisdropped)[0] != 't')
13190 {
13191 attname = PQgetvalue(res, i, i_attname);
13192 break;
13193 }
13194 }
13195 if (attname) /* just in case we don't find it */
13196 {
13197 const char *descr = comments->descr;
13198
13199 resetPQExpBuffer(target);
13200 appendPQExpBuffer(target, "COLUMN %s.",
13201 fmtId(tyinfo->dobj.name));
13203
13204 resetPQExpBuffer(query);
13205 appendPQExpBuffer(query, "COMMENT ON COLUMN %s.",
13207 appendPQExpBuffer(query, "%s IS ", fmtId(attname));
13208 appendStringLiteralAH(query, descr, fout);
13209 appendPQExpBufferStr(query, ";\n");
13210
13212 ARCHIVE_OPTS(.tag = target->data,
13213 .namespace = tyinfo->dobj.namespace->dobj.name,
13214 .owner = tyinfo->rolname,
13215 .description = "COMMENT",
13216 .section = SECTION_NONE,
13217 .createStmt = query->data,
13218 .deps = &(tyinfo->dobj.dumpId),
13219 .nDeps = 1));
13220 }
13221
13222 comments++;
13223 ncomments--;
13224 }
13225
13226 destroyPQExpBuffer(query);
13227 destroyPQExpBuffer(target);
13228}
13229
13230/*
13231 * dumpShellType
13232 * writes out to fout the queries to create a shell type
13233 *
13234 * We dump a shell definition in advance of the I/O functions for the type.
13235 */
13236static void
13238{
13239 DumpOptions *dopt = fout->dopt;
13240 PQExpBuffer q;
13241
13242 /* Do nothing if not dumping schema */
13243 if (!dopt->dumpSchema)
13244 return;
13245
13246 q = createPQExpBuffer();
13247
13248 /*
13249 * Note the lack of a DROP command for the shell type; any required DROP
13250 * is driven off the base type entry, instead. This interacts with
13251 * _printTocEntry()'s use of the presence of a DROP command to decide
13252 * whether an entry needs an ALTER OWNER command. We don't want to alter
13253 * the shell type's owner immediately on creation; that should happen only
13254 * after it's filled in, otherwise the backend complains.
13255 */
13256
13257 if (dopt->binary_upgrade)
13259 stinfo->baseType->dobj.catId.oid,
13260 false, false);
13261
13262 appendPQExpBuffer(q, "CREATE TYPE %s;\n",
13264
13265 if (stinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
13266 ArchiveEntry(fout, stinfo->dobj.catId, stinfo->dobj.dumpId,
13267 ARCHIVE_OPTS(.tag = stinfo->dobj.name,
13268 .namespace = stinfo->dobj.namespace->dobj.name,
13269 .owner = stinfo->baseType->rolname,
13270 .description = "SHELL TYPE",
13271 .section = SECTION_PRE_DATA,
13272 .createStmt = q->data));
13273
13275}
13276
13277/*
13278 * dumpProcLang
13279 * writes out to fout the queries to recreate a user-defined
13280 * procedural language
13281 */
13282static void
13284{
13285 DumpOptions *dopt = fout->dopt;
13288 bool useParams;
13289 char *qlanname;
13293
13294 /* Do nothing if not dumping schema */
13295 if (!dopt->dumpSchema)
13296 return;
13297
13298 /*
13299 * Try to find the support function(s). It is not an error if we don't
13300 * find them --- if the functions are in the pg_catalog schema, as is
13301 * standard in 8.1 and up, then we won't have loaded them. (In this case
13302 * we will emit a parameterless CREATE LANGUAGE command, which will
13303 * require PL template knowledge in the backend to reload.)
13304 */
13305
13306 funcInfo = findFuncByOid(plang->lanplcallfoid);
13307 if (funcInfo != NULL && !funcInfo->dobj.dump)
13308 funcInfo = NULL; /* treat not-dumped same as not-found */
13309
13310 if (OidIsValid(plang->laninline))
13311 {
13312 inlineInfo = findFuncByOid(plang->laninline);
13313 if (inlineInfo != NULL && !inlineInfo->dobj.dump)
13314 inlineInfo = NULL;
13315 }
13316
13317 if (OidIsValid(plang->lanvalidator))
13318 {
13319 validatorInfo = findFuncByOid(plang->lanvalidator);
13320 if (validatorInfo != NULL && !validatorInfo->dobj.dump)
13322 }
13323
13324 /*
13325 * If the functions are dumpable then emit a complete CREATE LANGUAGE with
13326 * parameters. Otherwise, we'll write a parameterless command, which will
13327 * be interpreted as CREATE EXTENSION.
13328 */
13329 useParams = (funcInfo != NULL &&
13330 (inlineInfo != NULL || !OidIsValid(plang->laninline)) &&
13331 (validatorInfo != NULL || !OidIsValid(plang->lanvalidator)));
13332
13335
13336 qlanname = pg_strdup(fmtId(plang->dobj.name));
13337
13338 appendPQExpBuffer(delqry, "DROP PROCEDURAL LANGUAGE %s;\n",
13339 qlanname);
13340
13341 if (useParams)
13342 {
13343 appendPQExpBuffer(defqry, "CREATE %sPROCEDURAL LANGUAGE %s",
13344 plang->lanpltrusted ? "TRUSTED " : "",
13345 qlanname);
13346 appendPQExpBuffer(defqry, " HANDLER %s",
13348 if (OidIsValid(plang->laninline))
13349 appendPQExpBuffer(defqry, " INLINE %s",
13351 if (OidIsValid(plang->lanvalidator))
13352 appendPQExpBuffer(defqry, " VALIDATOR %s",
13354 }
13355 else
13356 {
13357 /*
13358 * If not dumping parameters, then use CREATE OR REPLACE so that the
13359 * command will not fail if the language is preinstalled in the target
13360 * database.
13361 *
13362 * Modern servers will interpret this as CREATE EXTENSION IF NOT
13363 * EXISTS; perhaps we should emit that instead? But it might just add
13364 * confusion.
13365 */
13366 appendPQExpBuffer(defqry, "CREATE OR REPLACE PROCEDURAL LANGUAGE %s",
13367 qlanname);
13368 }
13370
13371 if (dopt->binary_upgrade)
13373 "LANGUAGE", qlanname, NULL);
13374
13375 if (plang->dobj.dump & DUMP_COMPONENT_DEFINITION)
13376 ArchiveEntry(fout, plang->dobj.catId, plang->dobj.dumpId,
13377 ARCHIVE_OPTS(.tag = plang->dobj.name,
13378 .owner = plang->lanowner,
13379 .description = "PROCEDURAL LANGUAGE",
13380 .section = SECTION_PRE_DATA,
13381 .createStmt = defqry->data,
13382 .dropStmt = delqry->data,
13383 ));
13384
13385 /* Dump Proc Lang Comments and Security Labels */
13386 if (plang->dobj.dump & DUMP_COMPONENT_COMMENT)
13387 dumpComment(fout, "LANGUAGE", qlanname,
13388 NULL, plang->lanowner,
13389 plang->dobj.catId, 0, plang->dobj.dumpId);
13390
13391 if (plang->dobj.dump & DUMP_COMPONENT_SECLABEL)
13392 dumpSecLabel(fout, "LANGUAGE", qlanname,
13393 NULL, plang->lanowner,
13394 plang->dobj.catId, 0, plang->dobj.dumpId);
13395
13396 if (plang->lanpltrusted && plang->dobj.dump & DUMP_COMPONENT_ACL)
13397 dumpACL(fout, plang->dobj.dumpId, InvalidDumpId, "LANGUAGE",
13398 qlanname, NULL, NULL,
13399 NULL, plang->lanowner, &plang->dacl);
13400
13401 free(qlanname);
13402
13405}
13406
13407/*
13408 * format_function_arguments: generate function name and argument list
13409 *
13410 * This is used when we can rely on pg_get_function_arguments to format
13411 * the argument list. Note, however, that pg_get_function_arguments
13412 * does not special-case zero-argument aggregates.
13413 */
13414static char *
13415format_function_arguments(const FuncInfo *finfo, const char *funcargs, bool is_agg)
13416{
13418
13421 if (is_agg && finfo->nargs == 0)
13422 appendPQExpBufferStr(&fn, "(*)");
13423 else
13424 appendPQExpBuffer(&fn, "(%s)", funcargs);
13425 return fn.data;
13426}
13427
13428/*
13429 * format_function_signature: generate function name and argument list
13430 *
13431 * Only a minimal list of input argument types is generated; this is
13432 * sufficient to reference the function, but not to define it.
13433 *
13434 * If honor_quotes is false then the function name is never quoted.
13435 * This is appropriate for use in TOC tags, but not in SQL commands.
13436 */
13437static char *
13439{
13441 int j;
13442
13444 if (honor_quotes)
13445 appendPQExpBuffer(&fn, "%s(", fmtId(finfo->dobj.name));
13446 else
13447 appendPQExpBuffer(&fn, "%s(", finfo->dobj.name);
13448 for (j = 0; j < finfo->nargs; j++)
13449 {
13450 if (j > 0)
13451 appendPQExpBufferStr(&fn, ", ");
13452
13455 zeroIsError));
13456 }
13458 return fn.data;
13459}
13460
13461
13462/*
13463 * dumpFunc:
13464 * dump out one function
13465 */
13466static void
13468{
13469 DumpOptions *dopt = fout->dopt;
13470 PQExpBuffer query;
13471 PQExpBuffer q;
13474 PGresult *res;
13475 char *funcsig; /* identity signature */
13476 char *funcfullsig = NULL; /* full signature */
13477 char *funcsig_tag;
13478 char *qual_funcsig;
13479 char *proretset;
13480 char *prosrc;
13481 char *probin;
13482 char *prosqlbody;
13483 char *funcargs;
13484 char *funciargs;
13485 char *funcresult;
13486 char *protrftypes;
13487 char *prokind;
13488 char *provolatile;
13489 char *proisstrict;
13490 char *prosecdef;
13491 char *proleakproof;
13492 char *proconfig;
13493 char *procost;
13494 char *prorows;
13495 char *prosupport;
13496 char *proparallel;
13497 char *lanname;
13498 char **configitems = NULL;
13499 int nconfigitems = 0;
13500 const char *keyword;
13501
13502 /* Do nothing if not dumping schema */
13503 if (!dopt->dumpSchema)
13504 return;
13505
13506 query = createPQExpBuffer();
13507 q = createPQExpBuffer();
13510
13512 {
13513 /* Set up query for function-specific details */
13515 "PREPARE dumpFunc(pg_catalog.oid) AS\n");
13516
13518 "SELECT\n"
13519 "proretset,\n"
13520 "prosrc,\n"
13521 "probin,\n"
13522 "provolatile,\n"
13523 "proisstrict,\n"
13524 "prosecdef,\n"
13525 "lanname,\n"
13526 "proconfig,\n"
13527 "procost,\n"
13528 "prorows,\n"
13529 "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs,\n"
13530 "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs,\n"
13531 "pg_catalog.pg_get_function_result(p.oid) AS funcresult,\n"
13532 "proleakproof,\n");
13533
13534 if (fout->remoteVersion >= 90500)
13536 "array_to_string(protrftypes, ' ') AS protrftypes,\n");
13537 else
13539 "NULL AS protrftypes,\n");
13540
13541 if (fout->remoteVersion >= 90600)
13543 "proparallel,\n");
13544 else
13546 "'u' AS proparallel,\n");
13547
13548 if (fout->remoteVersion >= 110000)
13550 "prokind,\n");
13551 else
13553 "CASE WHEN proiswindow THEN 'w' ELSE 'f' END AS prokind,\n");
13554
13555 if (fout->remoteVersion >= 120000)
13557 "prosupport,\n");
13558 else
13560 "'-' AS prosupport,\n");
13561
13562 if (fout->remoteVersion >= 140000)
13564 "pg_get_function_sqlbody(p.oid) AS prosqlbody\n");
13565 else
13567 "NULL AS prosqlbody\n");
13568
13570 "FROM pg_catalog.pg_proc p, pg_catalog.pg_language l\n"
13571 "WHERE p.oid = $1 "
13572 "AND l.oid = p.prolang");
13573
13574 ExecuteSqlStatement(fout, query->data);
13575
13577 }
13578
13579 printfPQExpBuffer(query,
13580 "EXECUTE dumpFunc('%u')",
13581 finfo->dobj.catId.oid);
13582
13583 res = ExecuteSqlQueryForSingleRow(fout, query->data);
13584
13585 proretset = PQgetvalue(res, 0, PQfnumber(res, "proretset"));
13586 if (PQgetisnull(res, 0, PQfnumber(res, "prosqlbody")))
13587 {
13588 prosrc = PQgetvalue(res, 0, PQfnumber(res, "prosrc"));
13589 probin = PQgetvalue(res, 0, PQfnumber(res, "probin"));
13590 prosqlbody = NULL;
13591 }
13592 else
13593 {
13594 prosrc = NULL;
13595 probin = NULL;
13596 prosqlbody = PQgetvalue(res, 0, PQfnumber(res, "prosqlbody"));
13597 }
13598 funcargs = PQgetvalue(res, 0, PQfnumber(res, "funcargs"));
13599 funciargs = PQgetvalue(res, 0, PQfnumber(res, "funciargs"));
13600 funcresult = PQgetvalue(res, 0, PQfnumber(res, "funcresult"));
13601 protrftypes = PQgetvalue(res, 0, PQfnumber(res, "protrftypes"));
13602 prokind = PQgetvalue(res, 0, PQfnumber(res, "prokind"));
13603 provolatile = PQgetvalue(res, 0, PQfnumber(res, "provolatile"));
13604 proisstrict = PQgetvalue(res, 0, PQfnumber(res, "proisstrict"));
13605 prosecdef = PQgetvalue(res, 0, PQfnumber(res, "prosecdef"));
13606 proleakproof = PQgetvalue(res, 0, PQfnumber(res, "proleakproof"));
13607 proconfig = PQgetvalue(res, 0, PQfnumber(res, "proconfig"));
13608 procost = PQgetvalue(res, 0, PQfnumber(res, "procost"));
13609 prorows = PQgetvalue(res, 0, PQfnumber(res, "prorows"));
13610 prosupport = PQgetvalue(res, 0, PQfnumber(res, "prosupport"));
13611 proparallel = PQgetvalue(res, 0, PQfnumber(res, "proparallel"));
13612 lanname = PQgetvalue(res, 0, PQfnumber(res, "lanname"));
13613
13614 /*
13615 * See backend/commands/functioncmds.c for details of how the 'AS' clause
13616 * is used.
13617 */
13618 if (prosqlbody)
13619 {
13621 }
13622 else if (probin[0] != '\0')
13623 {
13626 if (prosrc[0] != '\0')
13627 {
13629
13630 /*
13631 * where we have bin, use dollar quoting if allowed and src
13632 * contains quote or backslash; else use regular quoting.
13633 */
13634 if (dopt->disable_dollar_quoting ||
13635 (strchr(prosrc, '\'') == NULL && strchr(prosrc, '\\') == NULL))
13637 else
13639 }
13640 }
13641 else
13642 {
13644 /* with no bin, dollar quote src unconditionally if allowed */
13645 if (dopt->disable_dollar_quoting)
13647 else
13649 }
13650
13651 if (*proconfig)
13652 {
13654 pg_fatal("could not parse %s array", "proconfig");
13655 }
13656 else
13657 {
13658 configitems = NULL;
13659 nconfigitems = 0;
13660 }
13661
13664
13666
13667 qual_funcsig = psprintf("%s.%s",
13668 fmtId(finfo->dobj.namespace->dobj.name),
13669 funcsig);
13670
13671 if (prokind[0] == PROKIND_PROCEDURE)
13672 keyword = "PROCEDURE";
13673 else
13674 keyword = "FUNCTION"; /* works for window functions too */
13675
13676 appendPQExpBuffer(delqry, "DROP %s %s;\n",
13677 keyword, qual_funcsig);
13678
13679 appendPQExpBuffer(q, "CREATE %s %s.%s",
13680 keyword,
13681 fmtId(finfo->dobj.namespace->dobj.name),
13683 funcsig);
13684
13685 if (prokind[0] == PROKIND_PROCEDURE)
13686 /* no result type to output */ ;
13687 else if (funcresult)
13688 appendPQExpBuffer(q, " RETURNS %s", funcresult);
13689 else
13690 appendPQExpBuffer(q, " RETURNS %s%s",
13691 (proretset[0] == 't') ? "SETOF " : "",
13693 zeroIsError));
13694
13695 appendPQExpBuffer(q, "\n LANGUAGE %s", fmtId(lanname));
13696
13697 if (*protrftypes)
13698 {
13700 int i;
13701
13702 appendPQExpBufferStr(q, " TRANSFORM ");
13704 for (i = 0; typeids[i]; i++)
13705 {
13706 if (i != 0)
13707 appendPQExpBufferStr(q, ", ");
13708 appendPQExpBuffer(q, "FOR TYPE %s",
13710 }
13711
13712 free(typeids);
13713 }
13714
13715 if (prokind[0] == PROKIND_WINDOW)
13716 appendPQExpBufferStr(q, " WINDOW");
13717
13719 {
13721 appendPQExpBufferStr(q, " IMMUTABLE");
13722 else if (provolatile[0] == PROVOLATILE_STABLE)
13723 appendPQExpBufferStr(q, " STABLE");
13724 else if (provolatile[0] != PROVOLATILE_VOLATILE)
13725 pg_fatal("unrecognized provolatile value for function \"%s\"",
13726 finfo->dobj.name);
13727 }
13728
13729 if (proisstrict[0] == 't')
13730 appendPQExpBufferStr(q, " STRICT");
13731
13732 if (prosecdef[0] == 't')
13733 appendPQExpBufferStr(q, " SECURITY DEFINER");
13734
13735 if (proleakproof[0] == 't')
13736 appendPQExpBufferStr(q, " LEAKPROOF");
13737
13738 /*
13739 * COST and ROWS are emitted only if present and not default, so as not to
13740 * break backwards-compatibility of the dump without need. Keep this code
13741 * in sync with the defaults in functioncmds.c.
13742 */
13743 if (strcmp(procost, "0") != 0)
13744 {
13745 if (strcmp(lanname, "internal") == 0 || strcmp(lanname, "c") == 0)
13746 {
13747 /* default cost is 1 */
13748 if (strcmp(procost, "1") != 0)
13749 appendPQExpBuffer(q, " COST %s", procost);
13750 }
13751 else
13752 {
13753 /* default cost is 100 */
13754 if (strcmp(procost, "100") != 0)
13755 appendPQExpBuffer(q, " COST %s", procost);
13756 }
13757 }
13758 if (proretset[0] == 't' &&
13759 strcmp(prorows, "0") != 0 && strcmp(prorows, "1000") != 0)
13760 appendPQExpBuffer(q, " ROWS %s", prorows);
13761
13762 if (strcmp(prosupport, "-") != 0)
13763 {
13764 /* We rely on regprocout to provide quoting and qualification */
13765 appendPQExpBuffer(q, " SUPPORT %s", prosupport);
13766 }
13767
13769 {
13770 if (proparallel[0] == PROPARALLEL_SAFE)
13771 appendPQExpBufferStr(q, " PARALLEL SAFE");
13772 else if (proparallel[0] == PROPARALLEL_RESTRICTED)
13773 appendPQExpBufferStr(q, " PARALLEL RESTRICTED");
13774 else if (proparallel[0] != PROPARALLEL_UNSAFE)
13775 pg_fatal("unrecognized proparallel value for function \"%s\"",
13776 finfo->dobj.name);
13777 }
13778
13779 for (int i = 0; i < nconfigitems; i++)
13780 {
13781 /* we feel free to scribble on configitems[] here */
13782 char *configitem = configitems[i];
13783 char *pos;
13784
13785 pos = strchr(configitem, '=');
13786 if (pos == NULL)
13787 continue;
13788 *pos++ = '\0';
13789 appendPQExpBuffer(q, "\n SET %s TO ", fmtId(configitem));
13790
13791 /*
13792 * Variables that are marked GUC_LIST_QUOTE were already fully quoted
13793 * by flatten_set_variable_args() before they were put into the
13794 * proconfig array. However, because the quoting rules used there
13795 * aren't exactly like SQL's, we have to break the list value apart
13796 * and then quote the elements as string literals. (The elements may
13797 * be double-quoted as-is, but we can't just feed them to the SQL
13798 * parser; it would do the wrong thing with elements that are
13799 * zero-length or longer than NAMEDATALEN.) Also, we need a special
13800 * case for empty lists.
13801 *
13802 * Variables that are not so marked should just be emitted as simple
13803 * string literals. If the variable is not known to
13804 * variable_is_guc_list_quote(), we'll do that; this makes it unsafe
13805 * to use GUC_LIST_QUOTE for extension variables.
13806 */
13808 {
13809 char **namelist;
13810 char **nameptr;
13811
13812 /* Parse string into list of identifiers */
13813 /* this shouldn't fail really */
13814 if (SplitGUCList(pos, ',', &namelist))
13815 {
13816 /* Special case: represent an empty list as NULL */
13817 if (*namelist == NULL)
13818 appendPQExpBufferStr(q, "NULL");
13819 for (nameptr = namelist; *nameptr; nameptr++)
13820 {
13821 if (nameptr != namelist)
13822 appendPQExpBufferStr(q, ", ");
13824 }
13825 }
13827 }
13828 else
13829 appendStringLiteralAH(q, pos, fout);
13830 }
13831
13832 appendPQExpBuffer(q, "\n %s;\n", asPart->data);
13833
13835 "pg_catalog.pg_proc", keyword,
13836 qual_funcsig);
13837
13838 if (dopt->binary_upgrade)
13840 keyword, funcsig,
13841 finfo->dobj.namespace->dobj.name);
13842
13843 if (finfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
13844 ArchiveEntry(fout, finfo->dobj.catId, finfo->dobj.dumpId,
13846 .namespace = finfo->dobj.namespace->dobj.name,
13847 .owner = finfo->rolname,
13848 .description = keyword,
13849 .section = finfo->postponed_def ?
13851 .createStmt = q->data,
13852 .dropStmt = delqry->data));
13853
13854 /* Dump Function Comments and Security Labels */
13855 if (finfo->dobj.dump & DUMP_COMPONENT_COMMENT)
13856 dumpComment(fout, keyword, funcsig,
13857 finfo->dobj.namespace->dobj.name, finfo->rolname,
13858 finfo->dobj.catId, 0, finfo->dobj.dumpId);
13859
13860 if (finfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
13861 dumpSecLabel(fout, keyword, funcsig,
13862 finfo->dobj.namespace->dobj.name, finfo->rolname,
13863 finfo->dobj.catId, 0, finfo->dobj.dumpId);
13864
13865 if (finfo->dobj.dump & DUMP_COMPONENT_ACL)
13866 dumpACL(fout, finfo->dobj.dumpId, InvalidDumpId, keyword,
13867 funcsig, NULL,
13868 finfo->dobj.namespace->dobj.name,
13869 NULL, finfo->rolname, &finfo->dacl);
13870
13871 PQclear(res);
13872
13873 destroyPQExpBuffer(query);
13877 free(funcsig);
13882}
13883
13884
13885/*
13886 * Dump a user-defined cast
13887 */
13888static void
13890{
13891 DumpOptions *dopt = fout->dopt;
13897 const char *sourceType;
13898 const char *targetType;
13899
13900 /* Do nothing if not dumping schema */
13901 if (!dopt->dumpSchema)
13902 return;
13903
13904 /* Cannot dump if we don't have the cast function's info */
13905 if (OidIsValid(cast->castfunc))
13906 {
13907 funcInfo = findFuncByOid(cast->castfunc);
13908 if (funcInfo == NULL)
13909 pg_fatal("could not find function definition for function with OID %u",
13910 cast->castfunc);
13911 }
13912
13917
13920 appendPQExpBuffer(delqry, "DROP CAST (%s AS %s);\n",
13922
13923 appendPQExpBuffer(defqry, "CREATE CAST (%s AS %s) ",
13925
13926 switch (cast->castmethod)
13927 {
13929 appendPQExpBufferStr(defqry, "WITHOUT FUNCTION");
13930 break;
13932 appendPQExpBufferStr(defqry, "WITH INOUT");
13933 break;
13935 if (funcInfo)
13936 {
13938
13939 /*
13940 * Always qualify the function name (format_function_signature
13941 * won't qualify it).
13942 */
13943 appendPQExpBuffer(defqry, "WITH FUNCTION %s.%s",
13944 fmtId(funcInfo->dobj.namespace->dobj.name), fsig);
13945 free(fsig);
13946 }
13947 else
13948 pg_log_warning("bogus value in pg_cast.castfunc or pg_cast.castmethod field");
13949 break;
13950 default:
13951 pg_log_warning("bogus value in pg_cast.castmethod field");
13952 }
13953
13954 if (cast->castcontext == 'a')
13955 appendPQExpBufferStr(defqry, " AS ASSIGNMENT");
13956 else if (cast->castcontext == 'i')
13957 appendPQExpBufferStr(defqry, " AS IMPLICIT");
13959
13960 appendPQExpBuffer(labelq, "CAST (%s AS %s)",
13962
13963 appendPQExpBuffer(castargs, "(%s AS %s)",
13965
13966 if (dopt->binary_upgrade)
13968 "CAST", castargs->data, NULL);
13969
13970 if (cast->dobj.dump & DUMP_COMPONENT_DEFINITION)
13971 ArchiveEntry(fout, cast->dobj.catId, cast->dobj.dumpId,
13972 ARCHIVE_OPTS(.tag = labelq->data,
13973 .description = "CAST",
13974 .section = SECTION_PRE_DATA,
13975 .createStmt = defqry->data,
13976 .dropStmt = delqry->data));
13977
13978 /* Dump Cast Comments */
13979 if (cast->dobj.dump & DUMP_COMPONENT_COMMENT)
13980 dumpComment(fout, "CAST", castargs->data,
13981 NULL, "",
13982 cast->dobj.catId, 0, cast->dobj.dumpId);
13983
13988}
13989
13990/*
13991 * Dump a transform
13992 */
13993static void
13995{
13996 DumpOptions *dopt = fout->dopt;
14003 char *lanname;
14004 const char *transformType;
14005
14006 /* Do nothing if not dumping schema */
14007 if (!dopt->dumpSchema)
14008 return;
14009
14010 /* Cannot dump if we don't have the transform functions' info */
14011 if (OidIsValid(transform->trffromsql))
14012 {
14014 if (fromsqlFuncInfo == NULL)
14015 pg_fatal("could not find function definition for function with OID %u",
14016 transform->trffromsql);
14017 }
14018 if (OidIsValid(transform->trftosql))
14019 {
14020 tosqlFuncInfo = findFuncByOid(transform->trftosql);
14021 if (tosqlFuncInfo == NULL)
14022 pg_fatal("could not find function definition for function with OID %u",
14023 transform->trftosql);
14024 }
14025
14030
14031 lanname = get_language_name(fout, transform->trflang);
14033
14034 appendPQExpBuffer(delqry, "DROP TRANSFORM FOR %s LANGUAGE %s;\n",
14036
14037 appendPQExpBuffer(defqry, "CREATE TRANSFORM FOR %s LANGUAGE %s (",
14039
14040 if (!transform->trffromsql && !transform->trftosql)
14041 pg_log_warning("bogus transform definition, at least one of trffromsql and trftosql should be nonzero");
14042
14043 if (transform->trffromsql)
14044 {
14045 if (fromsqlFuncInfo)
14046 {
14048
14049 /*
14050 * Always qualify the function name (format_function_signature
14051 * won't qualify it).
14052 */
14053 appendPQExpBuffer(defqry, "FROM SQL WITH FUNCTION %s.%s",
14054 fmtId(fromsqlFuncInfo->dobj.namespace->dobj.name), fsig);
14055 free(fsig);
14056 }
14057 else
14058 pg_log_warning("bogus value in pg_transform.trffromsql field");
14059 }
14060
14061 if (transform->trftosql)
14062 {
14063 if (transform->trffromsql)
14065
14066 if (tosqlFuncInfo)
14067 {
14069
14070 /*
14071 * Always qualify the function name (format_function_signature
14072 * won't qualify it).
14073 */
14074 appendPQExpBuffer(defqry, "TO SQL WITH FUNCTION %s.%s",
14075 fmtId(tosqlFuncInfo->dobj.namespace->dobj.name), fsig);
14076 free(fsig);
14077 }
14078 else
14079 pg_log_warning("bogus value in pg_transform.trftosql field");
14080 }
14081
14083
14084 appendPQExpBuffer(labelq, "TRANSFORM FOR %s LANGUAGE %s",
14086
14087 appendPQExpBuffer(transformargs, "FOR %s LANGUAGE %s",
14089
14090 if (dopt->binary_upgrade)
14092 "TRANSFORM", transformargs->data, NULL);
14093
14094 if (transform->dobj.dump & DUMP_COMPONENT_DEFINITION)
14095 ArchiveEntry(fout, transform->dobj.catId, transform->dobj.dumpId,
14096 ARCHIVE_OPTS(.tag = labelq->data,
14097 .description = "TRANSFORM",
14098 .section = SECTION_PRE_DATA,
14099 .createStmt = defqry->data,
14100 .dropStmt = delqry->data,
14101 .deps = transform->dobj.dependencies,
14102 .nDeps = transform->dobj.nDeps));
14103
14104 /* Dump Transform Comments */
14105 if (transform->dobj.dump & DUMP_COMPONENT_COMMENT)
14106 dumpComment(fout, "TRANSFORM", transformargs->data,
14107 NULL, "",
14108 transform->dobj.catId, 0, transform->dobj.dumpId);
14109
14110 free(lanname);
14115}
14116
14117
14118/*
14119 * dumpOpr
14120 * write out a single operator definition
14121 */
14122static void
14124{
14125 DumpOptions *dopt = fout->dopt;
14126 PQExpBuffer query;
14127 PQExpBuffer q;
14130 PQExpBuffer details;
14131 PGresult *res;
14132 int i_oprkind;
14133 int i_oprcode;
14134 int i_oprleft;
14135 int i_oprright;
14136 int i_oprcom;
14137 int i_oprnegate;
14138 int i_oprrest;
14139 int i_oprjoin;
14140 int i_oprcanmerge;
14141 int i_oprcanhash;
14142 char *oprkind;
14143 char *oprcode;
14144 char *oprleft;
14145 char *oprright;
14146 char *oprcom;
14147 char *oprnegate;
14148 char *oprrest;
14149 char *oprjoin;
14150 char *oprcanmerge;
14151 char *oprcanhash;
14152 char *oprregproc;
14153 char *oprref;
14154
14155 /* Do nothing if not dumping schema */
14156 if (!dopt->dumpSchema)
14157 return;
14158
14159 /*
14160 * some operators are invalid because they were the result of user
14161 * defining operators before commutators exist
14162 */
14163 if (!OidIsValid(oprinfo->oprcode))
14164 return;
14165
14166 query = createPQExpBuffer();
14167 q = createPQExpBuffer();
14170 details = createPQExpBuffer();
14171
14173 {
14174 /* Set up query for operator-specific details */
14176 "PREPARE dumpOpr(pg_catalog.oid) AS\n"
14177 "SELECT oprkind, "
14178 "oprcode::pg_catalog.regprocedure, "
14179 "oprleft::pg_catalog.regtype, "
14180 "oprright::pg_catalog.regtype, "
14181 "oprcom, "
14182 "oprnegate, "
14183 "oprrest::pg_catalog.regprocedure, "
14184 "oprjoin::pg_catalog.regprocedure, "
14185 "oprcanmerge, oprcanhash "
14186 "FROM pg_catalog.pg_operator "
14187 "WHERE oid = $1");
14188
14189 ExecuteSqlStatement(fout, query->data);
14190
14192 }
14193
14194 printfPQExpBuffer(query,
14195 "EXECUTE dumpOpr('%u')",
14196 oprinfo->dobj.catId.oid);
14197
14198 res = ExecuteSqlQueryForSingleRow(fout, query->data);
14199
14200 i_oprkind = PQfnumber(res, "oprkind");
14201 i_oprcode = PQfnumber(res, "oprcode");
14202 i_oprleft = PQfnumber(res, "oprleft");
14203 i_oprright = PQfnumber(res, "oprright");
14204 i_oprcom = PQfnumber(res, "oprcom");
14205 i_oprnegate = PQfnumber(res, "oprnegate");
14206 i_oprrest = PQfnumber(res, "oprrest");
14207 i_oprjoin = PQfnumber(res, "oprjoin");
14208 i_oprcanmerge = PQfnumber(res, "oprcanmerge");
14209 i_oprcanhash = PQfnumber(res, "oprcanhash");
14210
14211 oprkind = PQgetvalue(res, 0, i_oprkind);
14212 oprcode = PQgetvalue(res, 0, i_oprcode);
14213 oprleft = PQgetvalue(res, 0, i_oprleft);
14214 oprright = PQgetvalue(res, 0, i_oprright);
14215 oprcom = PQgetvalue(res, 0, i_oprcom);
14216 oprnegate = PQgetvalue(res, 0, i_oprnegate);
14217 oprrest = PQgetvalue(res, 0, i_oprrest);
14218 oprjoin = PQgetvalue(res, 0, i_oprjoin);
14221
14222 /* In PG14 upwards postfix operator support does not exist anymore. */
14223 if (strcmp(oprkind, "r") == 0)
14224 pg_log_warning("postfix operators are not supported anymore (operator \"%s\")",
14225 oprcode);
14226
14228 if (oprregproc)
14229 {
14230 appendPQExpBuffer(details, " FUNCTION = %s", oprregproc);
14232 }
14233
14234 appendPQExpBuffer(oprid, "%s (",
14235 oprinfo->dobj.name);
14236
14237 /*
14238 * right unary means there's a left arg and left unary means there's a
14239 * right arg. (Although the "r" case is dead code for PG14 and later,
14240 * continue to support it in case we're dumping from an old server.)
14241 */
14242 if (strcmp(oprkind, "r") == 0 ||
14243 strcmp(oprkind, "b") == 0)
14244 {
14245 appendPQExpBuffer(details, ",\n LEFTARG = %s", oprleft);
14246 appendPQExpBufferStr(oprid, oprleft);
14247 }
14248 else
14249 appendPQExpBufferStr(oprid, "NONE");
14250
14251 if (strcmp(oprkind, "l") == 0 ||
14252 strcmp(oprkind, "b") == 0)
14253 {
14254 appendPQExpBuffer(details, ",\n RIGHTARG = %s", oprright);
14255 appendPQExpBuffer(oprid, ", %s)", oprright);
14256 }
14257 else
14258 appendPQExpBufferStr(oprid, ", NONE)");
14259
14261 if (oprref)
14262 {
14263 appendPQExpBuffer(details, ",\n COMMUTATOR = %s", oprref);
14264 free(oprref);
14265 }
14266
14268 if (oprref)
14269 {
14270 appendPQExpBuffer(details, ",\n NEGATOR = %s", oprref);
14271 free(oprref);
14272 }
14273
14274 if (strcmp(oprcanmerge, "t") == 0)
14275 appendPQExpBufferStr(details, ",\n MERGES");
14276
14277 if (strcmp(oprcanhash, "t") == 0)
14278 appendPQExpBufferStr(details, ",\n HASHES");
14279
14281 if (oprregproc)
14282 {
14283 appendPQExpBuffer(details, ",\n RESTRICT = %s", oprregproc);
14285 }
14286
14288 if (oprregproc)
14289 {
14290 appendPQExpBuffer(details, ",\n JOIN = %s", oprregproc);
14292 }
14293
14294 appendPQExpBuffer(delq, "DROP OPERATOR %s.%s;\n",
14295 fmtId(oprinfo->dobj.namespace->dobj.name),
14296 oprid->data);
14297
14298 appendPQExpBuffer(q, "CREATE OPERATOR %s.%s (\n%s\n);\n",
14299 fmtId(oprinfo->dobj.namespace->dobj.name),
14300 oprinfo->dobj.name, details->data);
14301
14302 if (dopt->binary_upgrade)
14304 "OPERATOR", oprid->data,
14305 oprinfo->dobj.namespace->dobj.name);
14306
14307 if (oprinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14308 ArchiveEntry(fout, oprinfo->dobj.catId, oprinfo->dobj.dumpId,
14309 ARCHIVE_OPTS(.tag = oprinfo->dobj.name,
14310 .namespace = oprinfo->dobj.namespace->dobj.name,
14311 .owner = oprinfo->rolname,
14312 .description = "OPERATOR",
14313 .section = SECTION_PRE_DATA,
14314 .createStmt = q->data,
14315 .dropStmt = delq->data));
14316
14317 /* Dump Operator Comments */
14318 if (oprinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14319 dumpComment(fout, "OPERATOR", oprid->data,
14320 oprinfo->dobj.namespace->dobj.name, oprinfo->rolname,
14321 oprinfo->dobj.catId, 0, oprinfo->dobj.dumpId);
14322
14323 PQclear(res);
14324
14325 destroyPQExpBuffer(query);
14329 destroyPQExpBuffer(details);
14330}
14331
14332/*
14333 * Convert a function reference obtained from pg_operator
14334 *
14335 * Returns allocated string of what to print, or NULL if function references
14336 * is InvalidOid. Returned string is expected to be free'd by the caller.
14337 *
14338 * The input is a REGPROCEDURE display; we have to strip the argument-types
14339 * part.
14340 */
14341static char *
14343{
14344 char *name;
14345 char *paren;
14346 bool inquote;
14347
14348 /* In all cases "-" means a null reference */
14349 if (strcmp(proc, "-") == 0)
14350 return NULL;
14351
14352 name = pg_strdup(proc);
14353 /* find non-double-quoted left paren */
14354 inquote = false;
14355 for (paren = name; *paren; paren++)
14356 {
14357 if (*paren == '(' && !inquote)
14358 {
14359 *paren = '\0';
14360 break;
14361 }
14362 if (*paren == '"')
14363 inquote = !inquote;
14364 }
14365 return name;
14366}
14367
14368/*
14369 * getFormattedOperatorName - retrieve the operator name for the
14370 * given operator OID (presented in string form).
14371 *
14372 * Returns an allocated string, or NULL if the given OID is invalid.
14373 * Caller is responsible for free'ing result string.
14374 *
14375 * What we produce has the format "OPERATOR(schema.oprname)". This is only
14376 * useful in commands where the operator's argument types can be inferred from
14377 * context. We always schema-qualify the name, though. The predecessor to
14378 * this code tried to skip the schema qualification if possible, but that led
14379 * to wrong results in corner cases, such as if an operator and its negator
14380 * are in different schemas.
14381 */
14382static char *
14384{
14386
14387 /* In all cases "0" means a null reference */
14388 if (strcmp(oproid, "0") == 0)
14389 return NULL;
14390
14392 if (oprInfo == NULL)
14393 {
14394 pg_log_warning("could not find operator with OID %s",
14395 oproid);
14396 return NULL;
14397 }
14398
14399 return psprintf("OPERATOR(%s.%s)",
14400 fmtId(oprInfo->dobj.namespace->dobj.name),
14401 oprInfo->dobj.name);
14402}
14403
14404/*
14405 * Convert a function OID obtained from pg_ts_parser or pg_ts_template
14406 *
14407 * It is sufficient to use REGPROC rather than REGPROCEDURE, since the
14408 * argument lists of these functions are predetermined. Note that the
14409 * caller should ensure we are in the proper schema, because the results
14410 * are search path dependent!
14411 */
14412static char *
14414{
14415 char *result;
14416 char query[128];
14417 PGresult *res;
14418
14419 snprintf(query, sizeof(query),
14420 "SELECT '%u'::pg_catalog.regproc", funcOid);
14421 res = ExecuteSqlQueryForSingleRow(fout, query);
14422
14423 result = pg_strdup(PQgetvalue(res, 0, 0));
14424
14425 PQclear(res);
14426
14427 return result;
14428}
14429
14430/*
14431 * dumpAccessMethod
14432 * write out a single access method definition
14433 */
14434static void
14436{
14437 DumpOptions *dopt = fout->dopt;
14438 PQExpBuffer q;
14440 char *qamname;
14441
14442 /* Do nothing if not dumping schema */
14443 if (!dopt->dumpSchema)
14444 return;
14445
14446 q = createPQExpBuffer();
14448
14449 qamname = pg_strdup(fmtId(aminfo->dobj.name));
14450
14451 appendPQExpBuffer(q, "CREATE ACCESS METHOD %s ", qamname);
14452
14453 switch (aminfo->amtype)
14454 {
14455 case AMTYPE_INDEX:
14456 appendPQExpBufferStr(q, "TYPE INDEX ");
14457 break;
14458 case AMTYPE_TABLE:
14459 appendPQExpBufferStr(q, "TYPE TABLE ");
14460 break;
14461 default:
14462 pg_log_warning("invalid type \"%c\" of access method \"%s\"",
14463 aminfo->amtype, qamname);
14466 free(qamname);
14467 return;
14468 }
14469
14470 appendPQExpBuffer(q, "HANDLER %s;\n", aminfo->amhandler);
14471
14472 appendPQExpBuffer(delq, "DROP ACCESS METHOD %s;\n",
14473 qamname);
14474
14475 if (dopt->binary_upgrade)
14477 "ACCESS METHOD", qamname, NULL);
14478
14479 if (aminfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14480 ArchiveEntry(fout, aminfo->dobj.catId, aminfo->dobj.dumpId,
14481 ARCHIVE_OPTS(.tag = aminfo->dobj.name,
14482 .description = "ACCESS METHOD",
14483 .section = SECTION_PRE_DATA,
14484 .createStmt = q->data,
14485 .dropStmt = delq->data));
14486
14487 /* Dump Access Method Comments */
14488 if (aminfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14489 dumpComment(fout, "ACCESS METHOD", qamname,
14490 NULL, "",
14491 aminfo->dobj.catId, 0, aminfo->dobj.dumpId);
14492
14495 free(qamname);
14496}
14497
14498/*
14499 * dumpOpclass
14500 * write out a single operator class definition
14501 */
14502static void
14504{
14505 DumpOptions *dopt = fout->dopt;
14506 PQExpBuffer query;
14507 PQExpBuffer q;
14510 PGresult *res;
14511 int ntups;
14512 int i_opcintype;
14513 int i_opckeytype;
14514 int i_opcdefault;
14515 int i_opcfamily;
14516 int i_opcfamilyname;
14517 int i_opcfamilynsp;
14518 int i_amname;
14519 int i_amopstrategy;
14520 int i_amopopr;
14521 int i_sortfamily;
14522 int i_sortfamilynsp;
14523 int i_amprocnum;
14524 int i_amproc;
14525 int i_amproclefttype;
14527 char *opcintype;
14528 char *opckeytype;
14529 char *opcdefault;
14530 char *opcfamily;
14531 char *opcfamilyname;
14532 char *opcfamilynsp;
14533 char *amname;
14534 char *amopstrategy;
14535 char *amopopr;
14536 char *sortfamily;
14537 char *sortfamilynsp;
14538 char *amprocnum;
14539 char *amproc;
14540 char *amproclefttype;
14541 char *amprocrighttype;
14542 bool needComma;
14543 int i;
14544
14545 /* Do nothing if not dumping schema */
14546 if (!dopt->dumpSchema)
14547 return;
14548
14549 query = createPQExpBuffer();
14550 q = createPQExpBuffer();
14553
14554 /* Get additional fields from the pg_opclass row */
14555 appendPQExpBuffer(query, "SELECT opcintype::pg_catalog.regtype, "
14556 "opckeytype::pg_catalog.regtype, "
14557 "opcdefault, opcfamily, "
14558 "opfname AS opcfamilyname, "
14559 "nspname AS opcfamilynsp, "
14560 "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opcmethod) AS amname "
14561 "FROM pg_catalog.pg_opclass c "
14562 "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = opcfamily "
14563 "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
14564 "WHERE c.oid = '%u'::pg_catalog.oid",
14565 opcinfo->dobj.catId.oid);
14566
14567 res = ExecuteSqlQueryForSingleRow(fout, query->data);
14568
14569 i_opcintype = PQfnumber(res, "opcintype");
14570 i_opckeytype = PQfnumber(res, "opckeytype");
14571 i_opcdefault = PQfnumber(res, "opcdefault");
14572 i_opcfamily = PQfnumber(res, "opcfamily");
14573 i_opcfamilyname = PQfnumber(res, "opcfamilyname");
14574 i_opcfamilynsp = PQfnumber(res, "opcfamilynsp");
14575 i_amname = PQfnumber(res, "amname");
14576
14577 /* opcintype may still be needed after we PQclear res */
14578 opcintype = pg_strdup(PQgetvalue(res, 0, i_opcintype));
14581 /* opcfamily will still be needed after we PQclear res */
14582 opcfamily = pg_strdup(PQgetvalue(res, 0, i_opcfamily));
14585 /* amname will still be needed after we PQclear res */
14586 amname = pg_strdup(PQgetvalue(res, 0, i_amname));
14587
14588 appendPQExpBuffer(delq, "DROP OPERATOR CLASS %s",
14590 appendPQExpBuffer(delq, " USING %s;\n",
14591 fmtId(amname));
14592
14593 /* Build the fixed portion of the CREATE command */
14594 appendPQExpBuffer(q, "CREATE OPERATOR CLASS %s\n ",
14596 if (strcmp(opcdefault, "t") == 0)
14597 appendPQExpBufferStr(q, "DEFAULT ");
14598 appendPQExpBuffer(q, "FOR TYPE %s USING %s",
14599 opcintype,
14600 fmtId(amname));
14601 if (strlen(opcfamilyname) > 0)
14602 {
14603 appendPQExpBufferStr(q, " FAMILY ");
14606 }
14607 appendPQExpBufferStr(q, " AS\n ");
14608
14609 needComma = false;
14610
14611 if (strcmp(opckeytype, "-") != 0)
14612 {
14613 appendPQExpBuffer(q, "STORAGE %s",
14614 opckeytype);
14615 needComma = true;
14616 }
14617
14618 PQclear(res);
14619
14620 /*
14621 * Now fetch and print the OPERATOR entries (pg_amop rows).
14622 *
14623 * Print only those opfamily members that are tied to the opclass by
14624 * pg_depend entries.
14625 */
14626 resetPQExpBuffer(query);
14627 appendPQExpBuffer(query, "SELECT amopstrategy, "
14628 "amopopr::pg_catalog.regoperator, "
14629 "opfname AS sortfamily, "
14630 "nspname AS sortfamilynsp "
14631 "FROM pg_catalog.pg_amop ao JOIN pg_catalog.pg_depend ON "
14632 "(classid = 'pg_catalog.pg_amop'::pg_catalog.regclass AND objid = ao.oid) "
14633 "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = amopsortfamily "
14634 "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
14635 "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
14636 "AND refobjid = '%u'::pg_catalog.oid "
14637 "AND amopfamily = '%s'::pg_catalog.oid "
14638 "ORDER BY amopstrategy",
14639 opcinfo->dobj.catId.oid,
14640 opcfamily);
14641
14642 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
14643
14644 ntups = PQntuples(res);
14645
14646 i_amopstrategy = PQfnumber(res, "amopstrategy");
14647 i_amopopr = PQfnumber(res, "amopopr");
14648 i_sortfamily = PQfnumber(res, "sortfamily");
14649 i_sortfamilynsp = PQfnumber(res, "sortfamilynsp");
14650
14651 for (i = 0; i < ntups; i++)
14652 {
14654 amopopr = PQgetvalue(res, i, i_amopopr);
14655 sortfamily = PQgetvalue(res, i, i_sortfamily);
14657
14658 if (needComma)
14659 appendPQExpBufferStr(q, " ,\n ");
14660
14661 appendPQExpBuffer(q, "OPERATOR %s %s",
14663
14664 if (strlen(sortfamily) > 0)
14665 {
14666 appendPQExpBufferStr(q, " FOR ORDER BY ");
14668 appendPQExpBufferStr(q, fmtId(sortfamily));
14669 }
14670
14671 needComma = true;
14672 }
14673
14674 PQclear(res);
14675
14676 /*
14677 * Now fetch and print the FUNCTION entries (pg_amproc rows).
14678 *
14679 * Print only those opfamily members that are tied to the opclass by
14680 * pg_depend entries.
14681 *
14682 * We print the amproclefttype/amprocrighttype even though in most cases
14683 * the backend could deduce the right values, because of the corner case
14684 * of a btree sort support function for a cross-type comparison.
14685 */
14686 resetPQExpBuffer(query);
14687
14688 appendPQExpBuffer(query, "SELECT amprocnum, "
14689 "amproc::pg_catalog.regprocedure, "
14690 "amproclefttype::pg_catalog.regtype, "
14691 "amprocrighttype::pg_catalog.regtype "
14692 "FROM pg_catalog.pg_amproc ap, pg_catalog.pg_depend "
14693 "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
14694 "AND refobjid = '%u'::pg_catalog.oid "
14695 "AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass "
14696 "AND objid = ap.oid "
14697 "ORDER BY amprocnum",
14698 opcinfo->dobj.catId.oid);
14699
14700 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
14701
14702 ntups = PQntuples(res);
14703
14704 i_amprocnum = PQfnumber(res, "amprocnum");
14705 i_amproc = PQfnumber(res, "amproc");
14706 i_amproclefttype = PQfnumber(res, "amproclefttype");
14707 i_amprocrighttype = PQfnumber(res, "amprocrighttype");
14708
14709 for (i = 0; i < ntups; i++)
14710 {
14712 amproc = PQgetvalue(res, i, i_amproc);
14715
14716 if (needComma)
14717 appendPQExpBufferStr(q, " ,\n ");
14718
14719 appendPQExpBuffer(q, "FUNCTION %s", amprocnum);
14720
14723
14724 appendPQExpBuffer(q, " %s", amproc);
14725
14726 needComma = true;
14727 }
14728
14729 PQclear(res);
14730
14731 /*
14732 * If needComma is still false it means we haven't added anything after
14733 * the AS keyword. To avoid printing broken SQL, append a dummy STORAGE
14734 * clause with the same datatype. This isn't sanctioned by the
14735 * documentation, but actually DefineOpClass will treat it as a no-op.
14736 */
14737 if (!needComma)
14738 appendPQExpBuffer(q, "STORAGE %s", opcintype);
14739
14740 appendPQExpBufferStr(q, ";\n");
14741
14743 appendPQExpBuffer(nameusing, " USING %s",
14744 fmtId(amname));
14745
14746 if (dopt->binary_upgrade)
14748 "OPERATOR CLASS", nameusing->data,
14749 opcinfo->dobj.namespace->dobj.name);
14750
14751 if (opcinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14752 ArchiveEntry(fout, opcinfo->dobj.catId, opcinfo->dobj.dumpId,
14753 ARCHIVE_OPTS(.tag = opcinfo->dobj.name,
14754 .namespace = opcinfo->dobj.namespace->dobj.name,
14755 .owner = opcinfo->rolname,
14756 .description = "OPERATOR CLASS",
14757 .section = SECTION_PRE_DATA,
14758 .createStmt = q->data,
14759 .dropStmt = delq->data));
14760
14761 /* Dump Operator Class Comments */
14762 if (opcinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14763 dumpComment(fout, "OPERATOR CLASS", nameusing->data,
14764 opcinfo->dobj.namespace->dobj.name, opcinfo->rolname,
14765 opcinfo->dobj.catId, 0, opcinfo->dobj.dumpId);
14766
14767 free(opcintype);
14768 free(opcfamily);
14769 free(amname);
14770 destroyPQExpBuffer(query);
14774}
14775
14776/*
14777 * dumpOpfamily
14778 * write out a single operator family definition
14779 *
14780 * Note: this also dumps any "loose" operator members that aren't bound to a
14781 * specific opclass within the opfamily.
14782 */
14783static void
14785{
14786 DumpOptions *dopt = fout->dopt;
14787 PQExpBuffer query;
14788 PQExpBuffer q;
14791 PGresult *res;
14794 int ntups;
14795 int i_amname;
14796 int i_amopstrategy;
14797 int i_amopopr;
14798 int i_sortfamily;
14799 int i_sortfamilynsp;
14800 int i_amprocnum;
14801 int i_amproc;
14802 int i_amproclefttype;
14804 char *amname;
14805 char *amopstrategy;
14806 char *amopopr;
14807 char *sortfamily;
14808 char *sortfamilynsp;
14809 char *amprocnum;
14810 char *amproc;
14811 char *amproclefttype;
14812 char *amprocrighttype;
14813 bool needComma;
14814 int i;
14815
14816 /* Do nothing if not dumping schema */
14817 if (!dopt->dumpSchema)
14818 return;
14819
14820 query = createPQExpBuffer();
14821 q = createPQExpBuffer();
14824
14825 /*
14826 * Fetch only those opfamily members that are tied directly to the
14827 * opfamily by pg_depend entries.
14828 */
14829 appendPQExpBuffer(query, "SELECT amopstrategy, "
14830 "amopopr::pg_catalog.regoperator, "
14831 "opfname AS sortfamily, "
14832 "nspname AS sortfamilynsp "
14833 "FROM pg_catalog.pg_amop ao JOIN pg_catalog.pg_depend ON "
14834 "(classid = 'pg_catalog.pg_amop'::pg_catalog.regclass AND objid = ao.oid) "
14835 "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = amopsortfamily "
14836 "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
14837 "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
14838 "AND refobjid = '%u'::pg_catalog.oid "
14839 "AND amopfamily = '%u'::pg_catalog.oid "
14840 "ORDER BY amopstrategy",
14841 opfinfo->dobj.catId.oid,
14842 opfinfo->dobj.catId.oid);
14843
14845
14846 resetPQExpBuffer(query);
14847
14848 appendPQExpBuffer(query, "SELECT amprocnum, "
14849 "amproc::pg_catalog.regprocedure, "
14850 "amproclefttype::pg_catalog.regtype, "
14851 "amprocrighttype::pg_catalog.regtype "
14852 "FROM pg_catalog.pg_amproc ap, pg_catalog.pg_depend "
14853 "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
14854 "AND refobjid = '%u'::pg_catalog.oid "
14855 "AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass "
14856 "AND objid = ap.oid "
14857 "ORDER BY amprocnum",
14858 opfinfo->dobj.catId.oid);
14859
14861
14862 /* Get additional fields from the pg_opfamily row */
14863 resetPQExpBuffer(query);
14864
14865 appendPQExpBuffer(query, "SELECT "
14866 "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opfmethod) AS amname "
14867 "FROM pg_catalog.pg_opfamily "
14868 "WHERE oid = '%u'::pg_catalog.oid",
14869 opfinfo->dobj.catId.oid);
14870
14871 res = ExecuteSqlQueryForSingleRow(fout, query->data);
14872
14873 i_amname = PQfnumber(res, "amname");
14874
14875 /* amname will still be needed after we PQclear res */
14876 amname = pg_strdup(PQgetvalue(res, 0, i_amname));
14877
14878 appendPQExpBuffer(delq, "DROP OPERATOR FAMILY %s",
14880 appendPQExpBuffer(delq, " USING %s;\n",
14881 fmtId(amname));
14882
14883 /* Build the fixed portion of the CREATE command */
14884 appendPQExpBuffer(q, "CREATE OPERATOR FAMILY %s",
14886 appendPQExpBuffer(q, " USING %s;\n",
14887 fmtId(amname));
14888
14889 PQclear(res);
14890
14891 /* Do we need an ALTER to add loose members? */
14892 if (PQntuples(res_ops) > 0 || PQntuples(res_procs) > 0)
14893 {
14894 appendPQExpBuffer(q, "ALTER OPERATOR FAMILY %s",
14896 appendPQExpBuffer(q, " USING %s ADD\n ",
14897 fmtId(amname));
14898
14899 needComma = false;
14900
14901 /*
14902 * Now fetch and print the OPERATOR entries (pg_amop rows).
14903 */
14904 ntups = PQntuples(res_ops);
14905
14906 i_amopstrategy = PQfnumber(res_ops, "amopstrategy");
14907 i_amopopr = PQfnumber(res_ops, "amopopr");
14908 i_sortfamily = PQfnumber(res_ops, "sortfamily");
14909 i_sortfamilynsp = PQfnumber(res_ops, "sortfamilynsp");
14910
14911 for (i = 0; i < ntups; i++)
14912 {
14915 sortfamily = PQgetvalue(res_ops, i, i_sortfamily);
14917
14918 if (needComma)
14919 appendPQExpBufferStr(q, " ,\n ");
14920
14921 appendPQExpBuffer(q, "OPERATOR %s %s",
14923
14924 if (strlen(sortfamily) > 0)
14925 {
14926 appendPQExpBufferStr(q, " FOR ORDER BY ");
14928 appendPQExpBufferStr(q, fmtId(sortfamily));
14929 }
14930
14931 needComma = true;
14932 }
14933
14934 /*
14935 * Now fetch and print the FUNCTION entries (pg_amproc rows).
14936 */
14937 ntups = PQntuples(res_procs);
14938
14939 i_amprocnum = PQfnumber(res_procs, "amprocnum");
14940 i_amproc = PQfnumber(res_procs, "amproc");
14941 i_amproclefttype = PQfnumber(res_procs, "amproclefttype");
14942 i_amprocrighttype = PQfnumber(res_procs, "amprocrighttype");
14943
14944 for (i = 0; i < ntups; i++)
14945 {
14950
14951 if (needComma)
14952 appendPQExpBufferStr(q, " ,\n ");
14953
14954 appendPQExpBuffer(q, "FUNCTION %s (%s, %s) %s",
14956 amproc);
14957
14958 needComma = true;
14959 }
14960
14961 appendPQExpBufferStr(q, ";\n");
14962 }
14963
14965 appendPQExpBuffer(nameusing, " USING %s",
14966 fmtId(amname));
14967
14968 if (dopt->binary_upgrade)
14970 "OPERATOR FAMILY", nameusing->data,
14971 opfinfo->dobj.namespace->dobj.name);
14972
14973 if (opfinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14974 ArchiveEntry(fout, opfinfo->dobj.catId, opfinfo->dobj.dumpId,
14975 ARCHIVE_OPTS(.tag = opfinfo->dobj.name,
14976 .namespace = opfinfo->dobj.namespace->dobj.name,
14977 .owner = opfinfo->rolname,
14978 .description = "OPERATOR FAMILY",
14979 .section = SECTION_PRE_DATA,
14980 .createStmt = q->data,
14981 .dropStmt = delq->data));
14982
14983 /* Dump Operator Family Comments */
14984 if (opfinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14985 dumpComment(fout, "OPERATOR FAMILY", nameusing->data,
14986 opfinfo->dobj.namespace->dobj.name, opfinfo->rolname,
14987 opfinfo->dobj.catId, 0, opfinfo->dobj.dumpId);
14988
14989 free(amname);
14992 destroyPQExpBuffer(query);
14996}
14997
14998/*
14999 * dumpCollation
15000 * write out a single collation definition
15001 */
15002static void
15004{
15005 DumpOptions *dopt = fout->dopt;
15006 PQExpBuffer query;
15007 PQExpBuffer q;
15009 char *qcollname;
15010 PGresult *res;
15011 int i_collprovider;
15013 int i_collcollate;
15014 int i_collctype;
15015 int i_colllocale;
15016 int i_collicurules;
15017 const char *collprovider;
15018 const char *collcollate;
15019 const char *collctype;
15020 const char *colllocale;
15021 const char *collicurules;
15022
15023 /* Do nothing if not dumping schema */
15024 if (!dopt->dumpSchema)
15025 return;
15026
15027 query = createPQExpBuffer();
15028 q = createPQExpBuffer();
15030
15031 qcollname = pg_strdup(fmtId(collinfo->dobj.name));
15032
15033 /* Get collation-specific details */
15034 appendPQExpBufferStr(query, "SELECT ");
15035
15036 if (fout->remoteVersion >= 100000)
15038 "collprovider, "
15039 "collversion, ");
15040 else
15042 "'c' AS collprovider, "
15043 "NULL AS collversion, ");
15044
15045 if (fout->remoteVersion >= 120000)
15047 "collisdeterministic, ");
15048 else
15050 "true AS collisdeterministic, ");
15051
15052 if (fout->remoteVersion >= 170000)
15054 "colllocale, ");
15055 else if (fout->remoteVersion >= 150000)
15057 "colliculocale AS colllocale, ");
15058 else
15060 "NULL AS colllocale, ");
15061
15062 if (fout->remoteVersion >= 160000)
15064 "collicurules, ");
15065 else
15067 "NULL AS collicurules, ");
15068
15069 appendPQExpBuffer(query,
15070 "collcollate, "
15071 "collctype "
15072 "FROM pg_catalog.pg_collation c "
15073 "WHERE c.oid = '%u'::pg_catalog.oid",
15074 collinfo->dobj.catId.oid);
15075
15076 res = ExecuteSqlQueryForSingleRow(fout, query->data);
15077
15078 i_collprovider = PQfnumber(res, "collprovider");
15079 i_collisdeterministic = PQfnumber(res, "collisdeterministic");
15080 i_collcollate = PQfnumber(res, "collcollate");
15081 i_collctype = PQfnumber(res, "collctype");
15082 i_colllocale = PQfnumber(res, "colllocale");
15083 i_collicurules = PQfnumber(res, "collicurules");
15084
15086
15087 if (!PQgetisnull(res, 0, i_collcollate))
15089 else
15090 collcollate = NULL;
15091
15092 if (!PQgetisnull(res, 0, i_collctype))
15093 collctype = PQgetvalue(res, 0, i_collctype);
15094 else
15095 collctype = NULL;
15096
15097 /*
15098 * Before version 15, collcollate and collctype were of type NAME and
15099 * non-nullable. Treat empty strings as NULL for consistency.
15100 */
15101 if (fout->remoteVersion < 150000)
15102 {
15103 if (collcollate[0] == '\0')
15104 collcollate = NULL;
15105 if (collctype[0] == '\0')
15106 collctype = NULL;
15107 }
15108
15109 if (!PQgetisnull(res, 0, i_colllocale))
15111 else
15112 colllocale = NULL;
15113
15114 if (!PQgetisnull(res, 0, i_collicurules))
15116 else
15118
15119 appendPQExpBuffer(delq, "DROP COLLATION %s;\n",
15121
15122 appendPQExpBuffer(q, "CREATE COLLATION %s (",
15124
15125 appendPQExpBufferStr(q, "provider = ");
15126 if (collprovider[0] == 'b')
15127 appendPQExpBufferStr(q, "builtin");
15128 else if (collprovider[0] == 'c')
15129 appendPQExpBufferStr(q, "libc");
15130 else if (collprovider[0] == 'i')
15131 appendPQExpBufferStr(q, "icu");
15132 else if (collprovider[0] == 'd')
15133 /* to allow dumping pg_catalog; not accepted on input */
15134 appendPQExpBufferStr(q, "default");
15135 else
15136 pg_fatal("unrecognized collation provider: %s",
15137 collprovider);
15138
15139 if (strcmp(PQgetvalue(res, 0, i_collisdeterministic), "f") == 0)
15140 appendPQExpBufferStr(q, ", deterministic = false");
15141
15142 if (collprovider[0] == 'd')
15143 {
15145 pg_log_warning("invalid collation \"%s\"", qcollname);
15146
15147 /* no locale -- the default collation cannot be reloaded anyway */
15148 }
15149 else if (collprovider[0] == 'b')
15150 {
15152 pg_log_warning("invalid collation \"%s\"", qcollname);
15153
15154 appendPQExpBufferStr(q, ", locale = ");
15156 fout);
15157 }
15158 else if (collprovider[0] == 'i')
15159 {
15160 if (fout->remoteVersion >= 150000)
15161 {
15162 if (collcollate || collctype || !colllocale)
15163 pg_log_warning("invalid collation \"%s\"", qcollname);
15164
15165 appendPQExpBufferStr(q, ", locale = ");
15167 fout);
15168 }
15169 else
15170 {
15171 if (!collcollate || !collctype || colllocale ||
15173 pg_log_warning("invalid collation \"%s\"", qcollname);
15174
15175 appendPQExpBufferStr(q, ", locale = ");
15177 }
15178
15179 if (collicurules)
15180 {
15181 appendPQExpBufferStr(q, ", rules = ");
15183 }
15184 }
15185 else if (collprovider[0] == 'c')
15186 {
15188 pg_log_warning("invalid collation \"%s\"", qcollname);
15189
15191 {
15192 appendPQExpBufferStr(q, ", locale = ");
15194 }
15195 else
15196 {
15197 appendPQExpBufferStr(q, ", lc_collate = ");
15199 appendPQExpBufferStr(q, ", lc_ctype = ");
15201 }
15202 }
15203 else
15204 pg_fatal("unrecognized collation provider: %s", collprovider);
15205
15206 /*
15207 * For binary upgrade, carry over the collation version. For normal
15208 * dump/restore, omit the version, so that it is computed upon restore.
15209 */
15210 if (dopt->binary_upgrade)
15211 {
15212 int i_collversion;
15213
15214 i_collversion = PQfnumber(res, "collversion");
15215 if (!PQgetisnull(res, 0, i_collversion))
15216 {
15217 appendPQExpBufferStr(q, ", version = ");
15219 PQgetvalue(res, 0, i_collversion),
15220 fout);
15221 }
15222 }
15223
15224 appendPQExpBufferStr(q, ");\n");
15225
15226 if (dopt->binary_upgrade)
15228 "COLLATION", qcollname,
15229 collinfo->dobj.namespace->dobj.name);
15230
15231 if (collinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15232 ArchiveEntry(fout, collinfo->dobj.catId, collinfo->dobj.dumpId,
15233 ARCHIVE_OPTS(.tag = collinfo->dobj.name,
15234 .namespace = collinfo->dobj.namespace->dobj.name,
15235 .owner = collinfo->rolname,
15236 .description = "COLLATION",
15237 .section = SECTION_PRE_DATA,
15238 .createStmt = q->data,
15239 .dropStmt = delq->data));
15240
15241 /* Dump Collation Comments */
15242 if (collinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15243 dumpComment(fout, "COLLATION", qcollname,
15244 collinfo->dobj.namespace->dobj.name, collinfo->rolname,
15245 collinfo->dobj.catId, 0, collinfo->dobj.dumpId);
15246
15247 PQclear(res);
15248
15249 destroyPQExpBuffer(query);
15252 free(qcollname);
15253}
15254
15255/*
15256 * dumpConversion
15257 * write out a single conversion definition
15258 */
15259static void
15261{
15262 DumpOptions *dopt = fout->dopt;
15263 PQExpBuffer query;
15264 PQExpBuffer q;
15266 char *qconvname;
15267 PGresult *res;
15268 int i_conforencoding;
15269 int i_contoencoding;
15270 int i_conproc;
15271 int i_condefault;
15272 const char *conforencoding;
15273 const char *contoencoding;
15274 const char *conproc;
15275 bool condefault;
15276
15277 /* Do nothing if not dumping schema */
15278 if (!dopt->dumpSchema)
15279 return;
15280
15281 query = createPQExpBuffer();
15282 q = createPQExpBuffer();
15284
15285 qconvname = pg_strdup(fmtId(convinfo->dobj.name));
15286
15287 /* Get conversion-specific details */
15288 appendPQExpBuffer(query, "SELECT "
15289 "pg_catalog.pg_encoding_to_char(conforencoding) AS conforencoding, "
15290 "pg_catalog.pg_encoding_to_char(contoencoding) AS contoencoding, "
15291 "conproc, condefault "
15292 "FROM pg_catalog.pg_conversion c "
15293 "WHERE c.oid = '%u'::pg_catalog.oid",
15294 convinfo->dobj.catId.oid);
15295
15296 res = ExecuteSqlQueryForSingleRow(fout, query->data);
15297
15298 i_conforencoding = PQfnumber(res, "conforencoding");
15299 i_contoencoding = PQfnumber(res, "contoencoding");
15300 i_conproc = PQfnumber(res, "conproc");
15301 i_condefault = PQfnumber(res, "condefault");
15302
15305 conproc = PQgetvalue(res, 0, i_conproc);
15306 condefault = (PQgetvalue(res, 0, i_condefault)[0] == 't');
15307
15308 appendPQExpBuffer(delq, "DROP CONVERSION %s;\n",
15310
15311 appendPQExpBuffer(q, "CREATE %sCONVERSION %s FOR ",
15312 (condefault) ? "DEFAULT " : "",
15315 appendPQExpBufferStr(q, " TO ");
15317 /* regproc output is already sufficiently quoted */
15318 appendPQExpBuffer(q, " FROM %s;\n", conproc);
15319
15320 if (dopt->binary_upgrade)
15322 "CONVERSION", qconvname,
15323 convinfo->dobj.namespace->dobj.name);
15324
15325 if (convinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15326 ArchiveEntry(fout, convinfo->dobj.catId, convinfo->dobj.dumpId,
15327 ARCHIVE_OPTS(.tag = convinfo->dobj.name,
15328 .namespace = convinfo->dobj.namespace->dobj.name,
15329 .owner = convinfo->rolname,
15330 .description = "CONVERSION",
15331 .section = SECTION_PRE_DATA,
15332 .createStmt = q->data,
15333 .dropStmt = delq->data));
15334
15335 /* Dump Conversion Comments */
15336 if (convinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15337 dumpComment(fout, "CONVERSION", qconvname,
15338 convinfo->dobj.namespace->dobj.name, convinfo->rolname,
15339 convinfo->dobj.catId, 0, convinfo->dobj.dumpId);
15340
15341 PQclear(res);
15342
15343 destroyPQExpBuffer(query);
15346 free(qconvname);
15347}
15348
15349/*
15350 * format_aggregate_signature: generate aggregate name and argument list
15351 *
15352 * The argument type names are qualified if needed. The aggregate name
15353 * is never qualified.
15354 */
15355static char *
15357{
15359 int j;
15360
15362 if (honor_quotes)
15363 appendPQExpBufferStr(&buf, fmtId(agginfo->aggfn.dobj.name));
15364 else
15365 appendPQExpBufferStr(&buf, agginfo->aggfn.dobj.name);
15366
15367 if (agginfo->aggfn.nargs == 0)
15368 appendPQExpBufferStr(&buf, "(*)");
15369 else
15370 {
15372 for (j = 0; j < agginfo->aggfn.nargs; j++)
15373 appendPQExpBuffer(&buf, "%s%s",
15374 (j > 0) ? ", " : "",
15376 agginfo->aggfn.argtypes[j],
15377 zeroIsError));
15379 }
15380 return buf.data;
15381}
15382
15383/*
15384 * dumpAgg
15385 * write out a single aggregate definition
15386 */
15387static void
15389{
15390 DumpOptions *dopt = fout->dopt;
15391 PQExpBuffer query;
15392 PQExpBuffer q;
15394 PQExpBuffer details;
15395 char *aggsig; /* identity signature */
15396 char *aggfullsig = NULL; /* full signature */
15397 char *aggsig_tag;
15398 PGresult *res;
15399 int i_agginitval;
15400 int i_aggminitval;
15401 const char *aggtransfn;
15402 const char *aggfinalfn;
15403 const char *aggcombinefn;
15404 const char *aggserialfn;
15405 const char *aggdeserialfn;
15406 const char *aggmtransfn;
15407 const char *aggminvtransfn;
15408 const char *aggmfinalfn;
15409 bool aggfinalextra;
15410 bool aggmfinalextra;
15411 char aggfinalmodify;
15412 char aggmfinalmodify;
15413 const char *aggsortop;
15414 char *aggsortconvop;
15415 char aggkind;
15416 const char *aggtranstype;
15417 const char *aggtransspace;
15418 const char *aggmtranstype;
15419 const char *aggmtransspace;
15420 const char *agginitval;
15421 const char *aggminitval;
15422 const char *proparallel;
15423 char defaultfinalmodify;
15424
15425 /* Do nothing if not dumping schema */
15426 if (!dopt->dumpSchema)
15427 return;
15428
15429 query = createPQExpBuffer();
15430 q = createPQExpBuffer();
15432 details = createPQExpBuffer();
15433
15435 {
15436 /* Set up query for aggregate-specific details */
15438 "PREPARE dumpAgg(pg_catalog.oid) AS\n");
15439
15441 "SELECT "
15442 "aggtransfn,\n"
15443 "aggfinalfn,\n"
15444 "aggtranstype::pg_catalog.regtype,\n"
15445 "agginitval,\n"
15446 "aggsortop,\n"
15447 "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs,\n"
15448 "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs,\n");
15449
15450 if (fout->remoteVersion >= 90400)
15452 "aggkind,\n"
15453 "aggmtransfn,\n"
15454 "aggminvtransfn,\n"
15455 "aggmfinalfn,\n"
15456 "aggmtranstype::pg_catalog.regtype,\n"
15457 "aggfinalextra,\n"
15458 "aggmfinalextra,\n"
15459 "aggtransspace,\n"
15460 "aggmtransspace,\n"
15461 "aggminitval,\n");
15462 else
15464 "'n' AS aggkind,\n"
15465 "'-' AS aggmtransfn,\n"
15466 "'-' AS aggminvtransfn,\n"
15467 "'-' AS aggmfinalfn,\n"
15468 "0 AS aggmtranstype,\n"
15469 "false AS aggfinalextra,\n"
15470 "false AS aggmfinalextra,\n"
15471 "0 AS aggtransspace,\n"
15472 "0 AS aggmtransspace,\n"
15473 "NULL AS aggminitval,\n");
15474
15475 if (fout->remoteVersion >= 90600)
15477 "aggcombinefn,\n"
15478 "aggserialfn,\n"
15479 "aggdeserialfn,\n"
15480 "proparallel,\n");
15481 else
15483 "'-' AS aggcombinefn,\n"
15484 "'-' AS aggserialfn,\n"
15485 "'-' AS aggdeserialfn,\n"
15486 "'u' AS proparallel,\n");
15487
15488 if (fout->remoteVersion >= 110000)
15490 "aggfinalmodify,\n"
15491 "aggmfinalmodify\n");
15492 else
15494 "'0' AS aggfinalmodify,\n"
15495 "'0' AS aggmfinalmodify\n");
15496
15498 "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
15499 "WHERE a.aggfnoid = p.oid "
15500 "AND p.oid = $1");
15501
15502 ExecuteSqlStatement(fout, query->data);
15503
15505 }
15506
15507 printfPQExpBuffer(query,
15508 "EXECUTE dumpAgg('%u')",
15509 agginfo->aggfn.dobj.catId.oid);
15510
15511 res = ExecuteSqlQueryForSingleRow(fout, query->data);
15512
15513 i_agginitval = PQfnumber(res, "agginitval");
15514 i_aggminitval = PQfnumber(res, "aggminitval");
15515
15516 aggtransfn = PQgetvalue(res, 0, PQfnumber(res, "aggtransfn"));
15517 aggfinalfn = PQgetvalue(res, 0, PQfnumber(res, "aggfinalfn"));
15518 aggcombinefn = PQgetvalue(res, 0, PQfnumber(res, "aggcombinefn"));
15519 aggserialfn = PQgetvalue(res, 0, PQfnumber(res, "aggserialfn"));
15520 aggdeserialfn = PQgetvalue(res, 0, PQfnumber(res, "aggdeserialfn"));
15521 aggmtransfn = PQgetvalue(res, 0, PQfnumber(res, "aggmtransfn"));
15522 aggminvtransfn = PQgetvalue(res, 0, PQfnumber(res, "aggminvtransfn"));
15523 aggmfinalfn = PQgetvalue(res, 0, PQfnumber(res, "aggmfinalfn"));
15524 aggfinalextra = (PQgetvalue(res, 0, PQfnumber(res, "aggfinalextra"))[0] == 't');
15525 aggmfinalextra = (PQgetvalue(res, 0, PQfnumber(res, "aggmfinalextra"))[0] == 't');
15526 aggfinalmodify = PQgetvalue(res, 0, PQfnumber(res, "aggfinalmodify"))[0];
15527 aggmfinalmodify = PQgetvalue(res, 0, PQfnumber(res, "aggmfinalmodify"))[0];
15528 aggsortop = PQgetvalue(res, 0, PQfnumber(res, "aggsortop"));
15529 aggkind = PQgetvalue(res, 0, PQfnumber(res, "aggkind"))[0];
15530 aggtranstype = PQgetvalue(res, 0, PQfnumber(res, "aggtranstype"));
15531 aggtransspace = PQgetvalue(res, 0, PQfnumber(res, "aggtransspace"));
15532 aggmtranstype = PQgetvalue(res, 0, PQfnumber(res, "aggmtranstype"));
15533 aggmtransspace = PQgetvalue(res, 0, PQfnumber(res, "aggmtransspace"));
15536 proparallel = PQgetvalue(res, 0, PQfnumber(res, "proparallel"));
15537
15538 {
15539 char *funcargs;
15540 char *funciargs;
15541
15542 funcargs = PQgetvalue(res, 0, PQfnumber(res, "funcargs"));
15543 funciargs = PQgetvalue(res, 0, PQfnumber(res, "funciargs"));
15546 }
15547
15549
15550 /* identify default modify flag for aggkind (must match DefineAggregate) */
15552 /* replace omitted flags for old versions */
15553 if (aggfinalmodify == '0')
15555 if (aggmfinalmodify == '0')
15557
15558 /* regproc and regtype output is already sufficiently quoted */
15559 appendPQExpBuffer(details, " SFUNC = %s,\n STYPE = %s",
15560 aggtransfn, aggtranstype);
15561
15562 if (strcmp(aggtransspace, "0") != 0)
15563 {
15564 appendPQExpBuffer(details, ",\n SSPACE = %s",
15565 aggtransspace);
15566 }
15567
15568 if (!PQgetisnull(res, 0, i_agginitval))
15569 {
15570 appendPQExpBufferStr(details, ",\n INITCOND = ");
15572 }
15573
15574 if (strcmp(aggfinalfn, "-") != 0)
15575 {
15576 appendPQExpBuffer(details, ",\n FINALFUNC = %s",
15577 aggfinalfn);
15578 if (aggfinalextra)
15579 appendPQExpBufferStr(details, ",\n FINALFUNC_EXTRA");
15581 {
15582 switch (aggfinalmodify)
15583 {
15585 appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = READ_ONLY");
15586 break;
15588 appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = SHAREABLE");
15589 break;
15591 appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = READ_WRITE");
15592 break;
15593 default:
15594 pg_fatal("unrecognized aggfinalmodify value for aggregate \"%s\"",
15595 agginfo->aggfn.dobj.name);
15596 break;
15597 }
15598 }
15599 }
15600
15601 if (strcmp(aggcombinefn, "-") != 0)
15602 appendPQExpBuffer(details, ",\n COMBINEFUNC = %s", aggcombinefn);
15603
15604 if (strcmp(aggserialfn, "-") != 0)
15605 appendPQExpBuffer(details, ",\n SERIALFUNC = %s", aggserialfn);
15606
15607 if (strcmp(aggdeserialfn, "-") != 0)
15608 appendPQExpBuffer(details, ",\n DESERIALFUNC = %s", aggdeserialfn);
15609
15610 if (strcmp(aggmtransfn, "-") != 0)
15611 {
15612 appendPQExpBuffer(details, ",\n MSFUNC = %s,\n MINVFUNC = %s,\n MSTYPE = %s",
15616 }
15617
15618 if (strcmp(aggmtransspace, "0") != 0)
15619 {
15620 appendPQExpBuffer(details, ",\n MSSPACE = %s",
15622 }
15623
15624 if (!PQgetisnull(res, 0, i_aggminitval))
15625 {
15626 appendPQExpBufferStr(details, ",\n MINITCOND = ");
15628 }
15629
15630 if (strcmp(aggmfinalfn, "-") != 0)
15631 {
15632 appendPQExpBuffer(details, ",\n MFINALFUNC = %s",
15633 aggmfinalfn);
15634 if (aggmfinalextra)
15635 appendPQExpBufferStr(details, ",\n MFINALFUNC_EXTRA");
15637 {
15638 switch (aggmfinalmodify)
15639 {
15641 appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = READ_ONLY");
15642 break;
15644 appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = SHAREABLE");
15645 break;
15647 appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = READ_WRITE");
15648 break;
15649 default:
15650 pg_fatal("unrecognized aggmfinalmodify value for aggregate \"%s\"",
15651 agginfo->aggfn.dobj.name);
15652 break;
15653 }
15654 }
15655 }
15656
15658 if (aggsortconvop)
15659 {
15660 appendPQExpBuffer(details, ",\n SORTOP = %s",
15663 }
15664
15666 appendPQExpBufferStr(details, ",\n HYPOTHETICAL");
15667
15669 {
15670 if (proparallel[0] == PROPARALLEL_SAFE)
15671 appendPQExpBufferStr(details, ",\n PARALLEL = safe");
15672 else if (proparallel[0] == PROPARALLEL_RESTRICTED)
15673 appendPQExpBufferStr(details, ",\n PARALLEL = restricted");
15674 else if (proparallel[0] != PROPARALLEL_UNSAFE)
15675 pg_fatal("unrecognized proparallel value for function \"%s\"",
15676 agginfo->aggfn.dobj.name);
15677 }
15678
15679 appendPQExpBuffer(delq, "DROP AGGREGATE %s.%s;\n",
15680 fmtId(agginfo->aggfn.dobj.namespace->dobj.name),
15681 aggsig);
15682
15683 appendPQExpBuffer(q, "CREATE AGGREGATE %s.%s (\n%s\n);\n",
15684 fmtId(agginfo->aggfn.dobj.namespace->dobj.name),
15685 aggfullsig ? aggfullsig : aggsig, details->data);
15686
15687 if (dopt->binary_upgrade)
15689 "AGGREGATE", aggsig,
15690 agginfo->aggfn.dobj.namespace->dobj.name);
15691
15692 if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_DEFINITION)
15693 ArchiveEntry(fout, agginfo->aggfn.dobj.catId,
15694 agginfo->aggfn.dobj.dumpId,
15695 ARCHIVE_OPTS(.tag = aggsig_tag,
15696 .namespace = agginfo->aggfn.dobj.namespace->dobj.name,
15697 .owner = agginfo->aggfn.rolname,
15698 .description = "AGGREGATE",
15699 .section = SECTION_PRE_DATA,
15700 .createStmt = q->data,
15701 .dropStmt = delq->data));
15702
15703 /* Dump Aggregate Comments */
15704 if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_COMMENT)
15705 dumpComment(fout, "AGGREGATE", aggsig,
15706 agginfo->aggfn.dobj.namespace->dobj.name,
15707 agginfo->aggfn.rolname,
15708 agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId);
15709
15710 if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_SECLABEL)
15711 dumpSecLabel(fout, "AGGREGATE", aggsig,
15712 agginfo->aggfn.dobj.namespace->dobj.name,
15713 agginfo->aggfn.rolname,
15714 agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId);
15715
15716 /*
15717 * Since there is no GRANT ON AGGREGATE syntax, we have to make the ACL
15718 * command look like a function's GRANT; in particular this affects the
15719 * syntax for zero-argument aggregates and ordered-set aggregates.
15720 */
15721 free(aggsig);
15722
15723 aggsig = format_function_signature(fout, &agginfo->aggfn, true);
15724
15725 if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_ACL)
15726 dumpACL(fout, agginfo->aggfn.dobj.dumpId, InvalidDumpId,
15727 "FUNCTION", aggsig, NULL,
15728 agginfo->aggfn.dobj.namespace->dobj.name,
15729 NULL, agginfo->aggfn.rolname, &agginfo->aggfn.dacl);
15730
15731 free(aggsig);
15734
15735 PQclear(res);
15736
15737 destroyPQExpBuffer(query);
15740 destroyPQExpBuffer(details);
15741}
15742
15743/*
15744 * dumpTSParser
15745 * write out a single text search parser
15746 */
15747static void
15749{
15750 DumpOptions *dopt = fout->dopt;
15751 PQExpBuffer q;
15753 char *qprsname;
15754
15755 /* Do nothing if not dumping schema */
15756 if (!dopt->dumpSchema)
15757 return;
15758
15759 q = createPQExpBuffer();
15761
15762 qprsname = pg_strdup(fmtId(prsinfo->dobj.name));
15763
15764 appendPQExpBuffer(q, "CREATE TEXT SEARCH PARSER %s (\n",
15766
15767 appendPQExpBuffer(q, " START = %s,\n",
15768 convertTSFunction(fout, prsinfo->prsstart));
15769 appendPQExpBuffer(q, " GETTOKEN = %s,\n",
15770 convertTSFunction(fout, prsinfo->prstoken));
15771 appendPQExpBuffer(q, " END = %s,\n",
15772 convertTSFunction(fout, prsinfo->prsend));
15773 if (prsinfo->prsheadline != InvalidOid)
15774 appendPQExpBuffer(q, " HEADLINE = %s,\n",
15775 convertTSFunction(fout, prsinfo->prsheadline));
15776 appendPQExpBuffer(q, " LEXTYPES = %s );\n",
15777 convertTSFunction(fout, prsinfo->prslextype));
15778
15779 appendPQExpBuffer(delq, "DROP TEXT SEARCH PARSER %s;\n",
15781
15782 if (dopt->binary_upgrade)
15784 "TEXT SEARCH PARSER", qprsname,
15785 prsinfo->dobj.namespace->dobj.name);
15786
15787 if (prsinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15788 ArchiveEntry(fout, prsinfo->dobj.catId, prsinfo->dobj.dumpId,
15789 ARCHIVE_OPTS(.tag = prsinfo->dobj.name,
15790 .namespace = prsinfo->dobj.namespace->dobj.name,
15791 .description = "TEXT SEARCH PARSER",
15792 .section = SECTION_PRE_DATA,
15793 .createStmt = q->data,
15794 .dropStmt = delq->data));
15795
15796 /* Dump Parser Comments */
15797 if (prsinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15798 dumpComment(fout, "TEXT SEARCH PARSER", qprsname,
15799 prsinfo->dobj.namespace->dobj.name, "",
15800 prsinfo->dobj.catId, 0, prsinfo->dobj.dumpId);
15801
15804 free(qprsname);
15805}
15806
15807/*
15808 * dumpTSDictionary
15809 * write out a single text search dictionary
15810 */
15811static void
15813{
15814 DumpOptions *dopt = fout->dopt;
15815 PQExpBuffer q;
15817 PQExpBuffer query;
15818 char *qdictname;
15819 PGresult *res;
15820 char *nspname;
15821 char *tmplname;
15822
15823 /* Do nothing if not dumping schema */
15824 if (!dopt->dumpSchema)
15825 return;
15826
15827 q = createPQExpBuffer();
15829 query = createPQExpBuffer();
15830
15831 qdictname = pg_strdup(fmtId(dictinfo->dobj.name));
15832
15833 /* Fetch name and namespace of the dictionary's template */
15834 appendPQExpBuffer(query, "SELECT nspname, tmplname "
15835 "FROM pg_ts_template p, pg_namespace n "
15836 "WHERE p.oid = '%u' AND n.oid = tmplnamespace",
15837 dictinfo->dicttemplate);
15838 res = ExecuteSqlQueryForSingleRow(fout, query->data);
15839 nspname = PQgetvalue(res, 0, 0);
15840 tmplname = PQgetvalue(res, 0, 1);
15841
15842 appendPQExpBuffer(q, "CREATE TEXT SEARCH DICTIONARY %s (\n",
15844
15845 appendPQExpBufferStr(q, " TEMPLATE = ");
15846 appendPQExpBuffer(q, "%s.", fmtId(nspname));
15848
15849 PQclear(res);
15850
15851 /* the dictinitoption can be dumped straight into the command */
15852 if (dictinfo->dictinitoption)
15853 appendPQExpBuffer(q, ",\n %s", dictinfo->dictinitoption);
15854
15855 appendPQExpBufferStr(q, " );\n");
15856
15857 appendPQExpBuffer(delq, "DROP TEXT SEARCH DICTIONARY %s;\n",
15859
15860 if (dopt->binary_upgrade)
15862 "TEXT SEARCH DICTIONARY", qdictname,
15863 dictinfo->dobj.namespace->dobj.name);
15864
15865 if (dictinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15866 ArchiveEntry(fout, dictinfo->dobj.catId, dictinfo->dobj.dumpId,
15867 ARCHIVE_OPTS(.tag = dictinfo->dobj.name,
15868 .namespace = dictinfo->dobj.namespace->dobj.name,
15869 .owner = dictinfo->rolname,
15870 .description = "TEXT SEARCH DICTIONARY",
15871 .section = SECTION_PRE_DATA,
15872 .createStmt = q->data,
15873 .dropStmt = delq->data));
15874
15875 /* Dump Dictionary Comments */
15876 if (dictinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15877 dumpComment(fout, "TEXT SEARCH DICTIONARY", qdictname,
15878 dictinfo->dobj.namespace->dobj.name, dictinfo->rolname,
15879 dictinfo->dobj.catId, 0, dictinfo->dobj.dumpId);
15880
15883 destroyPQExpBuffer(query);
15884 free(qdictname);
15885}
15886
15887/*
15888 * dumpTSTemplate
15889 * write out a single text search template
15890 */
15891static void
15893{
15894 DumpOptions *dopt = fout->dopt;
15895 PQExpBuffer q;
15897 char *qtmplname;
15898
15899 /* Do nothing if not dumping schema */
15900 if (!dopt->dumpSchema)
15901 return;
15902
15903 q = createPQExpBuffer();
15905
15906 qtmplname = pg_strdup(fmtId(tmplinfo->dobj.name));
15907
15908 appendPQExpBuffer(q, "CREATE TEXT SEARCH TEMPLATE %s (\n",
15910
15911 if (tmplinfo->tmplinit != InvalidOid)
15912 appendPQExpBuffer(q, " INIT = %s,\n",
15913 convertTSFunction(fout, tmplinfo->tmplinit));
15914 appendPQExpBuffer(q, " LEXIZE = %s );\n",
15915 convertTSFunction(fout, tmplinfo->tmpllexize));
15916
15917 appendPQExpBuffer(delq, "DROP TEXT SEARCH TEMPLATE %s;\n",
15919
15920 if (dopt->binary_upgrade)
15922 "TEXT SEARCH TEMPLATE", qtmplname,
15923 tmplinfo->dobj.namespace->dobj.name);
15924
15925 if (tmplinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15926 ArchiveEntry(fout, tmplinfo->dobj.catId, tmplinfo->dobj.dumpId,
15927 ARCHIVE_OPTS(.tag = tmplinfo->dobj.name,
15928 .namespace = tmplinfo->dobj.namespace->dobj.name,
15929 .description = "TEXT SEARCH TEMPLATE",
15930 .section = SECTION_PRE_DATA,
15931 .createStmt = q->data,
15932 .dropStmt = delq->data));
15933
15934 /* Dump Template Comments */
15935 if (tmplinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15936 dumpComment(fout, "TEXT SEARCH TEMPLATE", qtmplname,
15937 tmplinfo->dobj.namespace->dobj.name, "",
15938 tmplinfo->dobj.catId, 0, tmplinfo->dobj.dumpId);
15939
15942 free(qtmplname);
15943}
15944
15945/*
15946 * dumpTSConfig
15947 * write out a single text search configuration
15948 */
15949static void
15951{
15952 DumpOptions *dopt = fout->dopt;
15953 PQExpBuffer q;
15955 PQExpBuffer query;
15956 char *qcfgname;
15957 PGresult *res;
15958 char *nspname;
15959 char *prsname;
15960 int ntups,
15961 i;
15962 int i_tokenname;
15963 int i_dictname;
15964
15965 /* Do nothing if not dumping schema */
15966 if (!dopt->dumpSchema)
15967 return;
15968
15969 q = createPQExpBuffer();
15971 query = createPQExpBuffer();
15972
15973 qcfgname = pg_strdup(fmtId(cfginfo->dobj.name));
15974
15975 /* Fetch name and namespace of the config's parser */
15976 appendPQExpBuffer(query, "SELECT nspname, prsname "
15977 "FROM pg_ts_parser p, pg_namespace n "
15978 "WHERE p.oid = '%u' AND n.oid = prsnamespace",
15979 cfginfo->cfgparser);
15980 res = ExecuteSqlQueryForSingleRow(fout, query->data);
15981 nspname = PQgetvalue(res, 0, 0);
15982 prsname = PQgetvalue(res, 0, 1);
15983
15984 appendPQExpBuffer(q, "CREATE TEXT SEARCH CONFIGURATION %s (\n",
15986
15987 appendPQExpBuffer(q, " PARSER = %s.", fmtId(nspname));
15988 appendPQExpBuffer(q, "%s );\n", fmtId(prsname));
15989
15990 PQclear(res);
15991
15992 resetPQExpBuffer(query);
15993 appendPQExpBuffer(query,
15994 "SELECT\n"
15995 " ( SELECT alias FROM pg_catalog.ts_token_type('%u'::pg_catalog.oid) AS t\n"
15996 " WHERE t.tokid = m.maptokentype ) AS tokenname,\n"
15997 " m.mapdict::pg_catalog.regdictionary AS dictname\n"
15998 "FROM pg_catalog.pg_ts_config_map AS m\n"
15999 "WHERE m.mapcfg = '%u'\n"
16000 "ORDER BY m.mapcfg, m.maptokentype, m.mapseqno",
16001 cfginfo->cfgparser, cfginfo->dobj.catId.oid);
16002
16003 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
16004 ntups = PQntuples(res);
16005
16006 i_tokenname = PQfnumber(res, "tokenname");
16007 i_dictname = PQfnumber(res, "dictname");
16008
16009 for (i = 0; i < ntups; i++)
16010 {
16011 char *tokenname = PQgetvalue(res, i, i_tokenname);
16012 char *dictname = PQgetvalue(res, i, i_dictname);
16013
16014 if (i == 0 ||
16015 strcmp(tokenname, PQgetvalue(res, i - 1, i_tokenname)) != 0)
16016 {
16017 /* starting a new token type, so start a new command */
16018 if (i > 0)
16019 appendPQExpBufferStr(q, ";\n");
16020 appendPQExpBuffer(q, "\nALTER TEXT SEARCH CONFIGURATION %s\n",
16022 /* tokenname needs quoting, dictname does NOT */
16023 appendPQExpBuffer(q, " ADD MAPPING FOR %s WITH %s",
16024 fmtId(tokenname), dictname);
16025 }
16026 else
16027 appendPQExpBuffer(q, ", %s", dictname);
16028 }
16029
16030 if (ntups > 0)
16031 appendPQExpBufferStr(q, ";\n");
16032
16033 PQclear(res);
16034
16035 appendPQExpBuffer(delq, "DROP TEXT SEARCH CONFIGURATION %s;\n",
16037
16038 if (dopt->binary_upgrade)
16040 "TEXT SEARCH CONFIGURATION", qcfgname,
16041 cfginfo->dobj.namespace->dobj.name);
16042
16043 if (cfginfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16044 ArchiveEntry(fout, cfginfo->dobj.catId, cfginfo->dobj.dumpId,
16045 ARCHIVE_OPTS(.tag = cfginfo->dobj.name,
16046 .namespace = cfginfo->dobj.namespace->dobj.name,
16047 .owner = cfginfo->rolname,
16048 .description = "TEXT SEARCH CONFIGURATION",
16049 .section = SECTION_PRE_DATA,
16050 .createStmt = q->data,
16051 .dropStmt = delq->data));
16052
16053 /* Dump Configuration Comments */
16054 if (cfginfo->dobj.dump & DUMP_COMPONENT_COMMENT)
16055 dumpComment(fout, "TEXT SEARCH CONFIGURATION", qcfgname,
16056 cfginfo->dobj.namespace->dobj.name, cfginfo->rolname,
16057 cfginfo->dobj.catId, 0, cfginfo->dobj.dumpId);
16058
16061 destroyPQExpBuffer(query);
16062 free(qcfgname);
16063}
16064
16065/*
16066 * dumpForeignDataWrapper
16067 * write out a single foreign-data wrapper definition
16068 */
16069static void
16071{
16072 DumpOptions *dopt = fout->dopt;
16073 PQExpBuffer q;
16075 char *qfdwname;
16076
16077 /* Do nothing if not dumping schema */
16078 if (!dopt->dumpSchema)
16079 return;
16080
16081 q = createPQExpBuffer();
16083
16084 qfdwname = pg_strdup(fmtId(fdwinfo->dobj.name));
16085
16086 appendPQExpBuffer(q, "CREATE FOREIGN DATA WRAPPER %s",
16087 qfdwname);
16088
16089 if (strcmp(fdwinfo->fdwhandler, "-") != 0)
16090 appendPQExpBuffer(q, " HANDLER %s", fdwinfo->fdwhandler);
16091
16092 if (strcmp(fdwinfo->fdwvalidator, "-") != 0)
16093 appendPQExpBuffer(q, " VALIDATOR %s", fdwinfo->fdwvalidator);
16094
16095 if (strlen(fdwinfo->fdwoptions) > 0)
16096 appendPQExpBuffer(q, " OPTIONS (\n %s\n)", fdwinfo->fdwoptions);
16097
16098 appendPQExpBufferStr(q, ";\n");
16099
16100 appendPQExpBuffer(delq, "DROP FOREIGN DATA WRAPPER %s;\n",
16101 qfdwname);
16102
16103 if (dopt->binary_upgrade)
16105 "FOREIGN DATA WRAPPER", qfdwname,
16106 NULL);
16107
16108 if (fdwinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16109 ArchiveEntry(fout, fdwinfo->dobj.catId, fdwinfo->dobj.dumpId,
16110 ARCHIVE_OPTS(.tag = fdwinfo->dobj.name,
16111 .owner = fdwinfo->rolname,
16112 .description = "FOREIGN DATA WRAPPER",
16113 .section = SECTION_PRE_DATA,
16114 .createStmt = q->data,
16115 .dropStmt = delq->data));
16116
16117 /* Dump Foreign Data Wrapper Comments */
16118 if (fdwinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
16119 dumpComment(fout, "FOREIGN DATA WRAPPER", qfdwname,
16120 NULL, fdwinfo->rolname,
16121 fdwinfo->dobj.catId, 0, fdwinfo->dobj.dumpId);
16122
16123 /* Handle the ACL */
16124 if (fdwinfo->dobj.dump & DUMP_COMPONENT_ACL)
16125 dumpACL(fout, fdwinfo->dobj.dumpId, InvalidDumpId,
16126 "FOREIGN DATA WRAPPER", qfdwname, NULL, NULL,
16127 NULL, fdwinfo->rolname, &fdwinfo->dacl);
16128
16129 free(qfdwname);
16130
16133}
16134
16135/*
16136 * dumpForeignServer
16137 * write out a foreign server definition
16138 */
16139static void
16141{
16142 DumpOptions *dopt = fout->dopt;
16143 PQExpBuffer q;
16145 PQExpBuffer query;
16146 PGresult *res;
16147 char *qsrvname;
16148 char *fdwname;
16149
16150 /* Do nothing if not dumping schema */
16151 if (!dopt->dumpSchema)
16152 return;
16153
16154 q = createPQExpBuffer();
16156 query = createPQExpBuffer();
16157
16158 qsrvname = pg_strdup(fmtId(srvinfo->dobj.name));
16159
16160 /* look up the foreign-data wrapper */
16161 appendPQExpBuffer(query, "SELECT fdwname "
16162 "FROM pg_foreign_data_wrapper w "
16163 "WHERE w.oid = '%u'",
16164 srvinfo->srvfdw);
16165 res = ExecuteSqlQueryForSingleRow(fout, query->data);
16166 fdwname = PQgetvalue(res, 0, 0);
16167
16168 appendPQExpBuffer(q, "CREATE SERVER %s", qsrvname);
16169 if (srvinfo->srvtype && strlen(srvinfo->srvtype) > 0)
16170 {
16171 appendPQExpBufferStr(q, " TYPE ");
16172 appendStringLiteralAH(q, srvinfo->srvtype, fout);
16173 }
16174 if (srvinfo->srvversion && strlen(srvinfo->srvversion) > 0)
16175 {
16176 appendPQExpBufferStr(q, " VERSION ");
16177 appendStringLiteralAH(q, srvinfo->srvversion, fout);
16178 }
16179
16180 appendPQExpBufferStr(q, " FOREIGN DATA WRAPPER ");
16181 appendPQExpBufferStr(q, fmtId(fdwname));
16182
16183 if (srvinfo->srvoptions && strlen(srvinfo->srvoptions) > 0)
16184 appendPQExpBuffer(q, " OPTIONS (\n %s\n)", srvinfo->srvoptions);
16185
16186 appendPQExpBufferStr(q, ";\n");
16187
16188 appendPQExpBuffer(delq, "DROP SERVER %s;\n",
16189 qsrvname);
16190
16191 if (dopt->binary_upgrade)
16193 "SERVER", qsrvname, NULL);
16194
16195 if (srvinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16196 ArchiveEntry(fout, srvinfo->dobj.catId, srvinfo->dobj.dumpId,
16197 ARCHIVE_OPTS(.tag = srvinfo->dobj.name,
16198 .owner = srvinfo->rolname,
16199 .description = "SERVER",
16200 .section = SECTION_PRE_DATA,
16201 .createStmt = q->data,
16202 .dropStmt = delq->data));
16203
16204 /* Dump Foreign Server Comments */
16205 if (srvinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
16206 dumpComment(fout, "SERVER", qsrvname,
16207 NULL, srvinfo->rolname,
16208 srvinfo->dobj.catId, 0, srvinfo->dobj.dumpId);
16209
16210 /* Handle the ACL */
16211 if (srvinfo->dobj.dump & DUMP_COMPONENT_ACL)
16212 dumpACL(fout, srvinfo->dobj.dumpId, InvalidDumpId,
16213 "FOREIGN SERVER", qsrvname, NULL, NULL,
16214 NULL, srvinfo->rolname, &srvinfo->dacl);
16215
16216 /* Dump user mappings */
16217 if (srvinfo->dobj.dump & DUMP_COMPONENT_USERMAP)
16219 srvinfo->dobj.name, NULL,
16220 srvinfo->rolname,
16221 srvinfo->dobj.catId, srvinfo->dobj.dumpId);
16222
16223 PQclear(res);
16224
16225 free(qsrvname);
16226
16229 destroyPQExpBuffer(query);
16230}
16231
16232/*
16233 * dumpUserMappings
16234 *
16235 * This routine is used to dump any user mappings associated with the
16236 * server handed to this routine. Should be called after ArchiveEntry()
16237 * for the server.
16238 */
16239static void
16241 const char *servername, const char *namespace,
16242 const char *owner,
16243 CatalogId catalogId, DumpId dumpId)
16244{
16245 PQExpBuffer q;
16247 PQExpBuffer query;
16248 PQExpBuffer tag;
16249 PGresult *res;
16250 int ntups;
16251 int i_usename;
16252 int i_umoptions;
16253 int i;
16254
16255 q = createPQExpBuffer();
16256 tag = createPQExpBuffer();
16258 query = createPQExpBuffer();
16259
16260 /*
16261 * We read from the publicly accessible view pg_user_mappings, so as not
16262 * to fail if run by a non-superuser. Note that the view will show
16263 * umoptions as null if the user hasn't got privileges for the associated
16264 * server; this means that pg_dump will dump such a mapping, but with no
16265 * OPTIONS clause. A possible alternative is to skip such mappings
16266 * altogether, but it's not clear that that's an improvement.
16267 */
16268 appendPQExpBuffer(query,
16269 "SELECT usename, "
16270 "array_to_string(ARRAY("
16271 "SELECT quote_ident(option_name) || ' ' || "
16272 "quote_literal(option_value) "
16273 "FROM pg_options_to_table(umoptions) "
16274 "ORDER BY option_name"
16275 "), E',\n ') AS umoptions "
16276 "FROM pg_user_mappings "
16277 "WHERE srvid = '%u' "
16278 "ORDER BY usename",
16279 catalogId.oid);
16280
16281 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
16282
16283 ntups = PQntuples(res);
16284 i_usename = PQfnumber(res, "usename");
16285 i_umoptions = PQfnumber(res, "umoptions");
16286
16287 for (i = 0; i < ntups; i++)
16288 {
16289 char *usename;
16290 char *umoptions;
16291
16292 usename = PQgetvalue(res, i, i_usename);
16294
16296 appendPQExpBuffer(q, "CREATE USER MAPPING FOR %s", fmtId(usename));
16297 appendPQExpBuffer(q, " SERVER %s", fmtId(servername));
16298
16299 if (umoptions && strlen(umoptions) > 0)
16300 appendPQExpBuffer(q, " OPTIONS (\n %s\n)", umoptions);
16301
16302 appendPQExpBufferStr(q, ";\n");
16303
16305 appendPQExpBuffer(delq, "DROP USER MAPPING FOR %s", fmtId(usename));
16306 appendPQExpBuffer(delq, " SERVER %s;\n", fmtId(servername));
16307
16308 resetPQExpBuffer(tag);
16309 appendPQExpBuffer(tag, "USER MAPPING %s SERVER %s",
16310 usename, servername);
16311
16313 ARCHIVE_OPTS(.tag = tag->data,
16314 .namespace = namespace,
16315 .owner = owner,
16316 .description = "USER MAPPING",
16317 .section = SECTION_PRE_DATA,
16318 .createStmt = q->data,
16319 .dropStmt = delq->data));
16320 }
16321
16322 PQclear(res);
16323
16324 destroyPQExpBuffer(query);
16326 destroyPQExpBuffer(tag);
16328}
16329
16330/*
16331 * Write out default privileges information
16332 */
16333static void
16335{
16336 DumpOptions *dopt = fout->dopt;
16337 PQExpBuffer q;
16338 PQExpBuffer tag;
16339 const char *type;
16340
16341 /* Do nothing if not dumping schema, or if we're skipping ACLs */
16342 if (!dopt->dumpSchema || dopt->aclsSkip)
16343 return;
16344
16345 q = createPQExpBuffer();
16346 tag = createPQExpBuffer();
16347
16348 switch (daclinfo->defaclobjtype)
16349 {
16350 case DEFACLOBJ_RELATION:
16351 type = "TABLES";
16352 break;
16353 case DEFACLOBJ_SEQUENCE:
16354 type = "SEQUENCES";
16355 break;
16356 case DEFACLOBJ_FUNCTION:
16357 type = "FUNCTIONS";
16358 break;
16359 case DEFACLOBJ_TYPE:
16360 type = "TYPES";
16361 break;
16363 type = "SCHEMAS";
16364 break;
16366 type = "LARGE OBJECTS";
16367 break;
16368 default:
16369 /* shouldn't get here */
16370 pg_fatal("unrecognized object type in default privileges: %d",
16371 (int) daclinfo->defaclobjtype);
16372 type = ""; /* keep compiler quiet */
16373 }
16374
16375 appendPQExpBuffer(tag, "DEFAULT PRIVILEGES FOR %s", type);
16376
16377 /* build the actual command(s) for this tuple */
16379 daclinfo->dobj.namespace != NULL ?
16380 daclinfo->dobj.namespace->dobj.name : NULL,
16381 daclinfo->dacl.acl,
16382 daclinfo->dacl.acldefault,
16383 daclinfo->defaclrole,
16385 q))
16386 pg_fatal("could not parse default ACL list (%s)",
16387 daclinfo->dacl.acl);
16388
16389 if (daclinfo->dobj.dump & DUMP_COMPONENT_ACL)
16390 ArchiveEntry(fout, daclinfo->dobj.catId, daclinfo->dobj.dumpId,
16391 ARCHIVE_OPTS(.tag = tag->data,
16392 .namespace = daclinfo->dobj.namespace ?
16393 daclinfo->dobj.namespace->dobj.name : NULL,
16394 .owner = daclinfo->defaclrole,
16395 .description = "DEFAULT ACL",
16396 .section = SECTION_POST_DATA,
16397 .createStmt = q->data));
16398
16399 destroyPQExpBuffer(tag);
16401}
16402
16403/*----------
16404 * Write out grant/revoke information
16405 *
16406 * 'objDumpId' is the dump ID of the underlying object.
16407 * 'altDumpId' can be a second dumpId that the ACL entry must also depend on,
16408 * or InvalidDumpId if there is no need for a second dependency.
16409 * 'type' must be one of
16410 * TABLE, SEQUENCE, FUNCTION, LANGUAGE, SCHEMA, DATABASE, TABLESPACE,
16411 * FOREIGN DATA WRAPPER, SERVER, or LARGE OBJECT.
16412 * 'name' is the formatted name of the object. Must be quoted etc. already.
16413 * 'subname' is the formatted name of the sub-object, if any. Must be quoted.
16414 * (Currently we assume that subname is only provided for table columns.)
16415 * 'nspname' is the namespace the object is in (NULL if none).
16416 * 'tag' is the tag to use for the ACL TOC entry; typically, this is NULL
16417 * to use the default for the object type.
16418 * 'owner' is the owner, NULL if there is no owner (for languages).
16419 * 'dacl' is the DumpableAcl struct for the object.
16420 *
16421 * Returns the dump ID assigned to the ACL TocEntry, or InvalidDumpId if
16422 * no ACL entry was created.
16423 *----------
16424 */
16425static DumpId
16427 const char *type, const char *name, const char *subname,
16428 const char *nspname, const char *tag, const char *owner,
16429 const DumpableAcl *dacl)
16430{
16432 DumpOptions *dopt = fout->dopt;
16433 const char *acls = dacl->acl;
16434 const char *acldefault = dacl->acldefault;
16435 char privtype = dacl->privtype;
16436 const char *initprivs = dacl->initprivs;
16437 const char *baseacls;
16438 PQExpBuffer sql;
16439
16440 /* Do nothing if ACL dump is not enabled */
16441 if (dopt->aclsSkip)
16442 return InvalidDumpId;
16443
16444 /* --data-only skips ACLs *except* large object ACLs */
16445 if (!dopt->dumpSchema && strcmp(type, "LARGE OBJECT") != 0)
16446 return InvalidDumpId;
16447
16448 sql = createPQExpBuffer();
16449
16450 /*
16451 * In binary upgrade mode, we don't run an extension's script but instead
16452 * dump out the objects independently and then recreate them. To preserve
16453 * any initial privileges which were set on extension objects, we need to
16454 * compute the set of GRANT and REVOKE commands necessary to get from the
16455 * default privileges of an object to its initial privileges as recorded
16456 * in pg_init_privs.
16457 *
16458 * At restore time, we apply these commands after having called
16459 * binary_upgrade_set_record_init_privs(true). That tells the backend to
16460 * copy the results into pg_init_privs. This is how we preserve the
16461 * contents of that catalog across binary upgrades.
16462 */
16463 if (dopt->binary_upgrade && privtype == 'e' &&
16464 initprivs && *initprivs != '\0')
16465 {
16466 appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\n");
16467 if (!buildACLCommands(name, subname, nspname, type,
16468 initprivs, acldefault, owner,
16469 "", fout->remoteVersion, sql))
16470 pg_fatal("could not parse initial ACL list (%s) or default (%s) for object \"%s\" (%s)",
16471 initprivs, acldefault, name, type);
16472 appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n");
16473 }
16474
16475 /*
16476 * Now figure the GRANT and REVOKE commands needed to get to the object's
16477 * actual current ACL, starting from the initprivs if given, else from the
16478 * object-type-specific default. Also, while buildACLCommands will assume
16479 * that a NULL/empty acls string means it needn't do anything, what that
16480 * actually represents is the object-type-specific default; so we need to
16481 * substitute the acldefault string to get the right results in that case.
16482 */
16483 if (initprivs && *initprivs != '\0')
16484 {
16485 baseacls = initprivs;
16486 if (acls == NULL || *acls == '\0')
16487 acls = acldefault;
16488 }
16489 else
16491
16492 if (!buildACLCommands(name, subname, nspname, type,
16493 acls, baseacls, owner,
16494 "", fout->remoteVersion, sql))
16495 pg_fatal("could not parse ACL list (%s) or default (%s) for object \"%s\" (%s)",
16496 acls, baseacls, name, type);
16497
16498 if (sql->len > 0)
16499 {
16501 DumpId aclDeps[2];
16502 int nDeps = 0;
16503
16504 if (tag)
16506 else if (subname)
16507 appendPQExpBuffer(tagbuf, "COLUMN %s.%s", name, subname);
16508 else
16509 appendPQExpBuffer(tagbuf, "%s %s", type, name);
16510
16511 aclDeps[nDeps++] = objDumpId;
16512 if (altDumpId != InvalidDumpId)
16513 aclDeps[nDeps++] = altDumpId;
16514
16516
16518 ARCHIVE_OPTS(.tag = tagbuf->data,
16519 .namespace = nspname,
16520 .owner = owner,
16521 .description = "ACL",
16522 .section = SECTION_NONE,
16523 .createStmt = sql->data,
16524 .deps = aclDeps,
16525 .nDeps = nDeps));
16526
16528 }
16529
16530 destroyPQExpBuffer(sql);
16531
16532 return aclDumpId;
16533}
16534
16535/*
16536 * dumpSecLabel
16537 *
16538 * This routine is used to dump any security labels associated with the
16539 * object handed to this routine. The routine takes the object type
16540 * and object name (ready to print, except for schema decoration), plus
16541 * the namespace and owner of the object (for labeling the ArchiveEntry),
16542 * plus catalog ID and subid which are the lookup key for pg_seclabel,
16543 * plus the dump ID for the object (for setting a dependency).
16544 * If a matching pg_seclabel entry is found, it is dumped.
16545 *
16546 * Note: although this routine takes a dumpId for dependency purposes,
16547 * that purpose is just to mark the dependency in the emitted dump file
16548 * for possible future use by pg_restore. We do NOT use it for determining
16549 * ordering of the label in the dump file, because this routine is called
16550 * after dependency sorting occurs. This routine should be called just after
16551 * calling ArchiveEntry() for the specified object.
16552 */
16553static void
16554dumpSecLabel(Archive *fout, const char *type, const char *name,
16555 const char *namespace, const char *owner,
16556 CatalogId catalogId, int subid, DumpId dumpId)
16557{
16558 DumpOptions *dopt = fout->dopt;
16560 int nlabels;
16561 int i;
16562 PQExpBuffer query;
16563
16564 /* do nothing, if --no-security-labels is supplied */
16565 if (dopt->no_security_labels)
16566 return;
16567
16568 /*
16569 * Security labels are schema not data ... except large object labels are
16570 * data
16571 */
16572 if (strcmp(type, "LARGE OBJECT") != 0)
16573 {
16574 if (!dopt->dumpSchema)
16575 return;
16576 }
16577 else
16578 {
16579 /* We do dump large object security labels in binary-upgrade mode */
16580 if (!dopt->dumpData && !dopt->binary_upgrade)
16581 return;
16582 }
16583
16584 /* Search for security labels associated with catalogId, using table */
16585 nlabels = findSecLabels(catalogId.tableoid, catalogId.oid, &labels);
16586
16587 query = createPQExpBuffer();
16588
16589 for (i = 0; i < nlabels; i++)
16590 {
16591 /*
16592 * Ignore label entries for which the subid doesn't match.
16593 */
16594 if (labels[i].objsubid != subid)
16595 continue;
16596
16597 appendPQExpBuffer(query,
16598 "SECURITY LABEL FOR %s ON %s ",
16600 if (namespace && *namespace)
16601 appendPQExpBuffer(query, "%s.", fmtId(namespace));
16602 appendPQExpBuffer(query, "%s IS ", name);
16604 appendPQExpBufferStr(query, ";\n");
16605 }
16606
16607 if (query->len > 0)
16608 {
16610
16611 appendPQExpBuffer(tag, "%s %s", type, name);
16613 ARCHIVE_OPTS(.tag = tag->data,
16614 .namespace = namespace,
16615 .owner = owner,
16616 .description = "SECURITY LABEL",
16617 .section = SECTION_NONE,
16618 .createStmt = query->data,
16619 .deps = &dumpId,
16620 .nDeps = 1));
16621 destroyPQExpBuffer(tag);
16622 }
16623
16624 destroyPQExpBuffer(query);
16625}
16626
16627/*
16628 * dumpTableSecLabel
16629 *
16630 * As above, but dump security label for both the specified table (or view)
16631 * and its columns.
16632 */
16633static void
16635{
16636 DumpOptions *dopt = fout->dopt;
16638 int nlabels;
16639 int i;
16640 PQExpBuffer query;
16641 PQExpBuffer target;
16642
16643 /* do nothing, if --no-security-labels is supplied */
16644 if (dopt->no_security_labels)
16645 return;
16646
16647 /* SecLabel are SCHEMA not data */
16648 if (!dopt->dumpSchema)
16649 return;
16650
16651 /* Search for comments associated with relation, using table */
16652 nlabels = findSecLabels(tbinfo->dobj.catId.tableoid,
16653 tbinfo->dobj.catId.oid,
16654 &labels);
16655
16656 /* If security labels exist, build SECURITY LABEL statements */
16657 if (nlabels <= 0)
16658 return;
16659
16660 query = createPQExpBuffer();
16661 target = createPQExpBuffer();
16662
16663 for (i = 0; i < nlabels; i++)
16664 {
16665 const char *colname;
16666 const char *provider = labels[i].provider;
16667 const char *label = labels[i].label;
16668 int objsubid = labels[i].objsubid;
16669
16670 resetPQExpBuffer(target);
16671 if (objsubid == 0)
16672 {
16673 appendPQExpBuffer(target, "%s %s", reltypename,
16675 }
16676 else
16677 {
16678 colname = getAttrName(objsubid, tbinfo);
16679 /* first fmtXXX result must be consumed before calling again */
16680 appendPQExpBuffer(target, "COLUMN %s",
16682 appendPQExpBuffer(target, ".%s", fmtId(colname));
16683 }
16684 appendPQExpBuffer(query, "SECURITY LABEL FOR %s ON %s IS ",
16685 fmtId(provider), target->data);
16687 appendPQExpBufferStr(query, ";\n");
16688 }
16689 if (query->len > 0)
16690 {
16691 resetPQExpBuffer(target);
16692 appendPQExpBuffer(target, "%s %s", reltypename,
16693 fmtId(tbinfo->dobj.name));
16695 ARCHIVE_OPTS(.tag = target->data,
16696 .namespace = tbinfo->dobj.namespace->dobj.name,
16697 .owner = tbinfo->rolname,
16698 .description = "SECURITY LABEL",
16699 .section = SECTION_NONE,
16700 .createStmt = query->data,
16701 .deps = &(tbinfo->dobj.dumpId),
16702 .nDeps = 1));
16703 }
16704 destroyPQExpBuffer(query);
16705 destroyPQExpBuffer(target);
16706}
16707
16708/*
16709 * findSecLabels
16710 *
16711 * Find the security label(s), if any, associated with the given object.
16712 * All the objsubid values associated with the given classoid/objoid are
16713 * found with one search.
16714 */
16715static int
16717{
16719 SecLabelItem *low;
16720 SecLabelItem *high;
16721 int nmatch;
16722
16723 if (nseclabels <= 0) /* no labels, so no match is possible */
16724 {
16725 *items = NULL;
16726 return 0;
16727 }
16728
16729 /*
16730 * Do binary search to find some item matching the object.
16731 */
16732 low = &seclabels[0];
16733 high = &seclabels[nseclabels - 1];
16734 while (low <= high)
16735 {
16736 middle = low + (high - low) / 2;
16737
16738 if (classoid < middle->classoid)
16739 high = middle - 1;
16740 else if (classoid > middle->classoid)
16741 low = middle + 1;
16742 else if (objoid < middle->objoid)
16743 high = middle - 1;
16744 else if (objoid > middle->objoid)
16745 low = middle + 1;
16746 else
16747 break; /* found a match */
16748 }
16749
16750 if (low > high) /* no matches */
16751 {
16752 *items = NULL;
16753 return 0;
16754 }
16755
16756 /*
16757 * Now determine how many items match the object. The search loop
16758 * invariant still holds: only items between low and high inclusive could
16759 * match.
16760 */
16761 nmatch = 1;
16762 while (middle > low)
16763 {
16764 if (classoid != middle[-1].classoid ||
16765 objoid != middle[-1].objoid)
16766 break;
16767 middle--;
16768 nmatch++;
16769 }
16770
16771 *items = middle;
16772
16773 middle += nmatch;
16774 while (middle <= high)
16775 {
16776 if (classoid != middle->classoid ||
16777 objoid != middle->objoid)
16778 break;
16779 middle++;
16780 nmatch++;
16781 }
16782
16783 return nmatch;
16784}
16785
16786/*
16787 * collectSecLabels
16788 *
16789 * Construct a table of all security labels available for database objects;
16790 * also set the has-seclabel component flag for each relevant object.
16791 *
16792 * The table is sorted by classoid/objid/objsubid for speed in lookup.
16793 */
16794static void
16796{
16797 PGresult *res;
16798 PQExpBuffer query;
16799 int i_label;
16800 int i_provider;
16801 int i_classoid;
16802 int i_objoid;
16803 int i_objsubid;
16804 int ntups;
16805 int i;
16806 DumpableObject *dobj;
16807
16808 query = createPQExpBuffer();
16809
16811 "SELECT label, provider, classoid, objoid, objsubid "
16812 "FROM pg_catalog.pg_seclabels "
16813 "ORDER BY classoid, objoid, objsubid");
16814
16815 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
16816
16817 /* Construct lookup table containing OIDs in numeric form */
16818 i_label = PQfnumber(res, "label");
16819 i_provider = PQfnumber(res, "provider");
16820 i_classoid = PQfnumber(res, "classoid");
16821 i_objoid = PQfnumber(res, "objoid");
16822 i_objsubid = PQfnumber(res, "objsubid");
16823
16824 ntups = PQntuples(res);
16825
16827 nseclabels = 0;
16828 dobj = NULL;
16829
16830 for (i = 0; i < ntups; i++)
16831 {
16832 CatalogId objId;
16833 int subid;
16834
16835 objId.tableoid = atooid(PQgetvalue(res, i, i_classoid));
16836 objId.oid = atooid(PQgetvalue(res, i, i_objoid));
16837 subid = atoi(PQgetvalue(res, i, i_objsubid));
16838
16839 /* We needn't remember labels that don't match any dumpable object */
16840 if (dobj == NULL ||
16841 dobj->catId.tableoid != objId.tableoid ||
16842 dobj->catId.oid != objId.oid)
16843 dobj = findObjectByCatalogId(objId);
16844 if (dobj == NULL)
16845 continue;
16846
16847 /*
16848 * Labels on columns of composite types are linked to the type's
16849 * pg_class entry, but we need to set the DUMP_COMPONENT_SECLABEL flag
16850 * in the type's own DumpableObject.
16851 */
16852 if (subid != 0 && dobj->objType == DO_TABLE &&
16853 ((TableInfo *) dobj)->relkind == RELKIND_COMPOSITE_TYPE)
16854 {
16856
16857 cTypeInfo = findTypeByOid(((TableInfo *) dobj)->reltype);
16858 if (cTypeInfo)
16859 cTypeInfo->dobj.components |= DUMP_COMPONENT_SECLABEL;
16860 }
16861 else
16862 dobj->components |= DUMP_COMPONENT_SECLABEL;
16863
16867 seclabels[nseclabels].objoid = objId.oid;
16868 seclabels[nseclabels].objsubid = subid;
16869 nseclabels++;
16870 }
16871
16872 PQclear(res);
16873 destroyPQExpBuffer(query);
16874}
16875
16876/*
16877 * dumpTable
16878 * write out to fout the declarations (not data) of a user-defined table
16879 */
16880static void
16882{
16883 DumpOptions *dopt = fout->dopt;
16885 char *namecopy;
16886
16887 /* Do nothing if not dumping schema */
16888 if (!dopt->dumpSchema)
16889 return;
16890
16891 if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16892 {
16893 if (tbinfo->relkind == RELKIND_SEQUENCE)
16895 else
16897 }
16898
16899 /* Handle the ACL here */
16900 namecopy = pg_strdup(fmtId(tbinfo->dobj.name));
16901 if (tbinfo->dobj.dump & DUMP_COMPONENT_ACL)
16902 {
16903 const char *objtype =
16904 (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE";
16905
16907 dumpACL(fout, tbinfo->dobj.dumpId, InvalidDumpId,
16908 objtype, namecopy, NULL,
16909 tbinfo->dobj.namespace->dobj.name,
16910 NULL, tbinfo->rolname, &tbinfo->dacl);
16911 }
16912
16913 /*
16914 * Handle column ACLs, if any. Note: we pull these with a separate query
16915 * rather than trying to fetch them during getTableAttrs, so that we won't
16916 * miss ACLs on system columns. Doing it this way also allows us to dump
16917 * ACLs for catalogs that we didn't mark "interesting" back in getTables.
16918 */
16919 if ((tbinfo->dobj.dump & DUMP_COMPONENT_ACL) && tbinfo->hascolumnACLs)
16920 {
16922 PGresult *res;
16923 int i;
16924
16926 {
16927 /* Set up query for column ACLs */
16929 "PREPARE getColumnACLs(pg_catalog.oid) AS\n");
16930
16931 if (fout->remoteVersion >= 90600)
16932 {
16933 /*
16934 * In principle we should call acldefault('c', relowner) to
16935 * get the default ACL for a column. However, we don't
16936 * currently store the numeric OID of the relowner in
16937 * TableInfo. We could convert the owner name using regrole,
16938 * but that creates a risk of failure due to concurrent role
16939 * renames. Given that the default ACL for columns is empty
16940 * and is likely to stay that way, it's not worth extra cycles
16941 * and risk to avoid hard-wiring that knowledge here.
16942 */
16944 "SELECT at.attname, "
16945 "at.attacl, "
16946 "'{}' AS acldefault, "
16947 "pip.privtype, pip.initprivs "
16948 "FROM pg_catalog.pg_attribute at "
16949 "LEFT JOIN pg_catalog.pg_init_privs pip ON "
16950 "(at.attrelid = pip.objoid "
16951 "AND pip.classoid = 'pg_catalog.pg_class'::pg_catalog.regclass "
16952 "AND at.attnum = pip.objsubid) "
16953 "WHERE at.attrelid = $1 AND "
16954 "NOT at.attisdropped "
16955 "AND (at.attacl IS NOT NULL OR pip.initprivs IS NOT NULL) "
16956 "ORDER BY at.attnum");
16957 }
16958 else
16959 {
16961 "SELECT attname, attacl, '{}' AS acldefault, "
16962 "NULL AS privtype, NULL AS initprivs "
16963 "FROM pg_catalog.pg_attribute "
16964 "WHERE attrelid = $1 AND NOT attisdropped "
16965 "AND attacl IS NOT NULL "
16966 "ORDER BY attnum");
16967 }
16968
16969 ExecuteSqlStatement(fout, query->data);
16970
16972 }
16973
16974 printfPQExpBuffer(query,
16975 "EXECUTE getColumnACLs('%u')",
16976 tbinfo->dobj.catId.oid);
16977
16978 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
16979
16980 for (i = 0; i < PQntuples(res); i++)
16981 {
16982 char *attname = PQgetvalue(res, i, 0);
16983 char *attacl = PQgetvalue(res, i, 1);
16984 char *acldefault = PQgetvalue(res, i, 2);
16985 char privtype = *(PQgetvalue(res, i, 3));
16986 char *initprivs = PQgetvalue(res, i, 4);
16988 char *attnamecopy;
16989
16990 coldacl.acl = attacl;
16991 coldacl.acldefault = acldefault;
16992 coldacl.privtype = privtype;
16993 coldacl.initprivs = initprivs;
16995
16996 /*
16997 * Column's GRANT type is always TABLE. Each column ACL depends
16998 * on the table-level ACL, since we can restore column ACLs in
16999 * parallel but the table-level ACL has to be done first.
17000 */
17001 dumpACL(fout, tbinfo->dobj.dumpId, tableAclDumpId,
17002 "TABLE", namecopy, attnamecopy,
17003 tbinfo->dobj.namespace->dobj.name,
17004 NULL, tbinfo->rolname, &coldacl);
17006 }
17007 PQclear(res);
17008 destroyPQExpBuffer(query);
17009 }
17010
17011 free(namecopy);
17012}
17013
17014/*
17015 * Create the AS clause for a view or materialized view. The semicolon is
17016 * stripped because a materialized view must add a WITH NO DATA clause.
17017 *
17018 * This returns a new buffer which must be freed by the caller.
17019 */
17020static PQExpBuffer
17022{
17024 PQExpBuffer result = createPQExpBuffer();
17025 PGresult *res;
17026 int len;
17027
17028 /* Fetch the view definition */
17029 appendPQExpBuffer(query,
17030 "SELECT pg_catalog.pg_get_viewdef('%u'::pg_catalog.oid) AS viewdef",
17031 tbinfo->dobj.catId.oid);
17032
17033 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
17034
17035 if (PQntuples(res) != 1)
17036 {
17037 if (PQntuples(res) < 1)
17038 pg_fatal("query to obtain definition of view \"%s\" returned no data",
17039 tbinfo->dobj.name);
17040 else
17041 pg_fatal("query to obtain definition of view \"%s\" returned more than one definition",
17042 tbinfo->dobj.name);
17043 }
17044
17045 len = PQgetlength(res, 0, 0);
17046
17047 if (len == 0)
17048 pg_fatal("definition of view \"%s\" appears to be empty (length zero)",
17049 tbinfo->dobj.name);
17050
17051 /* Strip off the trailing semicolon so that other things may follow. */
17052 Assert(PQgetvalue(res, 0, 0)[len - 1] == ';');
17053 appendBinaryPQExpBuffer(result, PQgetvalue(res, 0, 0), len - 1);
17054
17055 PQclear(res);
17056 destroyPQExpBuffer(query);
17057
17058 return result;
17059}
17060
17061/*
17062 * Create a dummy AS clause for a view. This is used when the real view
17063 * definition has to be postponed because of circular dependencies.
17064 * We must duplicate the view's external properties -- column names and types
17065 * (including collation) -- so that it works for subsequent references.
17066 *
17067 * This returns a new buffer which must be freed by the caller.
17068 */
17069static PQExpBuffer
17071{
17072 PQExpBuffer result = createPQExpBuffer();
17073 int j;
17074
17075 appendPQExpBufferStr(result, "SELECT");
17076
17077 for (j = 0; j < tbinfo->numatts; j++)
17078 {
17079 if (j > 0)
17080 appendPQExpBufferChar(result, ',');
17081 appendPQExpBufferStr(result, "\n ");
17082
17083 appendPQExpBuffer(result, "NULL::%s", tbinfo->atttypnames[j]);
17084
17085 /*
17086 * Must add collation if not default for the type, because CREATE OR
17087 * REPLACE VIEW won't change it
17088 */
17089 if (OidIsValid(tbinfo->attcollation[j]))
17090 {
17091 CollInfo *coll;
17092
17093 coll = findCollationByOid(tbinfo->attcollation[j]);
17094 if (coll)
17095 appendPQExpBuffer(result, " COLLATE %s",
17097 }
17098
17099 appendPQExpBuffer(result, " AS %s", fmtId(tbinfo->attnames[j]));
17100 }
17101
17102 return result;
17103}
17104
17105/*
17106 * dumpTableSchema
17107 * write the declaration (not data) of one user-defined table or view
17108 */
17109static void
17111{
17112 DumpOptions *dopt = fout->dopt;
17116 char *qrelname;
17117 char *qualrelname;
17118 int numParents;
17119 TableInfo **parents;
17120 int actual_atts; /* number of attrs in this CREATE statement */
17121 const char *reltypename;
17122 char *storage;
17123 int j,
17124 k;
17125
17126 /* We had better have loaded per-column details about this table */
17127 Assert(tbinfo->interesting);
17128
17129 qrelname = pg_strdup(fmtId(tbinfo->dobj.name));
17131
17132 if (tbinfo->hasoids)
17133 pg_log_warning("WITH OIDS is not supported anymore (table \"%s\")",
17134 qrelname);
17135
17136 if (dopt->binary_upgrade)
17138
17139 /* Is it a table or a view? */
17140 if (tbinfo->relkind == RELKIND_VIEW)
17141 {
17142 PQExpBuffer result;
17143
17144 /*
17145 * Note: keep this code in sync with the is_view case in dumpRule()
17146 */
17147
17148 reltypename = "VIEW";
17149
17150 appendPQExpBuffer(delq, "DROP VIEW %s;\n", qualrelname);
17151
17152 if (dopt->binary_upgrade)
17154 tbinfo->dobj.catId.oid);
17155
17156 appendPQExpBuffer(q, "CREATE VIEW %s", qualrelname);
17157
17158 if (tbinfo->dummy_view)
17160 else
17161 {
17162 if (nonemptyReloptions(tbinfo->reloptions))
17163 {
17164 appendPQExpBufferStr(q, " WITH (");
17165 appendReloptionsArrayAH(q, tbinfo->reloptions, "", fout);
17166 appendPQExpBufferChar(q, ')');
17167 }
17168 result = createViewAsClause(fout, tbinfo);
17169 }
17170 appendPQExpBuffer(q, " AS\n%s", result->data);
17171 destroyPQExpBuffer(result);
17172
17173 if (tbinfo->checkoption != NULL && !tbinfo->dummy_view)
17174 appendPQExpBuffer(q, "\n WITH %s CHECK OPTION", tbinfo->checkoption);
17175 appendPQExpBufferStr(q, ";\n");
17176 }
17177 else
17178 {
17179 char *partkeydef = NULL;
17180 char *ftoptions = NULL;
17181 char *srvname = NULL;
17182 const char *foreign = "";
17183
17184 /*
17185 * Set reltypename, and collect any relkind-specific data that we
17186 * didn't fetch during getTables().
17187 */
17188 switch (tbinfo->relkind)
17189 {
17191 {
17193 PGresult *res;
17194
17195 reltypename = "TABLE";
17196
17197 /* retrieve partition key definition */
17198 appendPQExpBuffer(query,
17199 "SELECT pg_get_partkeydef('%u')",
17200 tbinfo->dobj.catId.oid);
17201 res = ExecuteSqlQueryForSingleRow(fout, query->data);
17202 partkeydef = pg_strdup(PQgetvalue(res, 0, 0));
17203 PQclear(res);
17204 destroyPQExpBuffer(query);
17205 break;
17206 }
17208 {
17210 PGresult *res;
17211 int i_srvname;
17212 int i_ftoptions;
17213
17214 reltypename = "FOREIGN TABLE";
17215
17216 /* retrieve name of foreign server and generic options */
17217 appendPQExpBuffer(query,
17218 "SELECT fs.srvname, "
17219 "pg_catalog.array_to_string(ARRAY("
17220 "SELECT pg_catalog.quote_ident(option_name) || "
17221 "' ' || pg_catalog.quote_literal(option_value) "
17222 "FROM pg_catalog.pg_options_to_table(ftoptions) "
17223 "ORDER BY option_name"
17224 "), E',\n ') AS ftoptions "
17225 "FROM pg_catalog.pg_foreign_table ft "
17226 "JOIN pg_catalog.pg_foreign_server fs "
17227 "ON (fs.oid = ft.ftserver) "
17228 "WHERE ft.ftrelid = '%u'",
17229 tbinfo->dobj.catId.oid);
17230 res = ExecuteSqlQueryForSingleRow(fout, query->data);
17231 i_srvname = PQfnumber(res, "srvname");
17232 i_ftoptions = PQfnumber(res, "ftoptions");
17235 PQclear(res);
17236 destroyPQExpBuffer(query);
17237
17238 foreign = "FOREIGN ";
17239 break;
17240 }
17241 case RELKIND_MATVIEW:
17242 reltypename = "MATERIALIZED VIEW";
17243 break;
17244 default:
17245 reltypename = "TABLE";
17246 break;
17247 }
17248
17249 numParents = tbinfo->numParents;
17250 parents = tbinfo->parents;
17251
17252 appendPQExpBuffer(delq, "DROP %s %s;\n", reltypename, qualrelname);
17253
17254 if (dopt->binary_upgrade)
17256 tbinfo->dobj.catId.oid);
17257
17258 /*
17259 * PostgreSQL 18 has disabled UNLOGGED for partitioned tables, so
17260 * ignore it when dumping if it was set in this case.
17261 */
17262 appendPQExpBuffer(q, "CREATE %s%s %s",
17263 (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
17264 tbinfo->relkind != RELKIND_PARTITIONED_TABLE) ?
17265 "UNLOGGED " : "",
17267 qualrelname);
17268
17269 /*
17270 * Attach to type, if reloftype; except in case of a binary upgrade,
17271 * we dump the table normally and attach it to the type afterward.
17272 */
17273 if (OidIsValid(tbinfo->reloftype) && !dopt->binary_upgrade)
17274 appendPQExpBuffer(q, " OF %s",
17275 getFormattedTypeName(fout, tbinfo->reloftype,
17276 zeroIsError));
17277
17278 if (tbinfo->relkind != RELKIND_MATVIEW)
17279 {
17280 /* Dump the attributes */
17281 actual_atts = 0;
17282 for (j = 0; j < tbinfo->numatts; j++)
17283 {
17284 /*
17285 * Normally, dump if it's locally defined in this table, and
17286 * not dropped. But for binary upgrade, we'll dump all the
17287 * columns, and then fix up the dropped and nonlocal cases
17288 * below.
17289 */
17290 if (shouldPrintColumn(dopt, tbinfo, j))
17291 {
17292 bool print_default;
17293 bool print_notnull;
17294
17295 /*
17296 * Default value --- suppress if to be printed separately
17297 * or not at all.
17298 */
17299 print_default = (tbinfo->attrdefs[j] != NULL &&
17300 tbinfo->attrdefs[j]->dobj.dump &&
17301 !tbinfo->attrdefs[j]->separate);
17302
17303 /*
17304 * Not Null constraint --- print it if it is locally
17305 * defined, or if binary upgrade. (In the latter case, we
17306 * reset conislocal below.)
17307 */
17308 print_notnull = (tbinfo->notnull_constrs[j] != NULL &&
17309 (tbinfo->notnull_islocal[j] ||
17310 dopt->binary_upgrade ||
17311 tbinfo->ispartition));
17312
17313 /*
17314 * Skip column if fully defined by reloftype, except in
17315 * binary upgrade
17316 */
17317 if (OidIsValid(tbinfo->reloftype) &&
17319 !dopt->binary_upgrade)
17320 continue;
17321
17322 /* Format properly if not first attr */
17323 if (actual_atts == 0)
17324 appendPQExpBufferStr(q, " (");
17325 else
17326 appendPQExpBufferChar(q, ',');
17327 appendPQExpBufferStr(q, "\n ");
17328 actual_atts++;
17329
17330 /* Attribute name */
17331 appendPQExpBufferStr(q, fmtId(tbinfo->attnames[j]));
17332
17333 if (tbinfo->attisdropped[j])
17334 {
17335 /*
17336 * ALTER TABLE DROP COLUMN clears
17337 * pg_attribute.atttypid, so we will not have gotten a
17338 * valid type name; insert INTEGER as a stopgap. We'll
17339 * clean things up later.
17340 */
17341 appendPQExpBufferStr(q, " INTEGER /* dummy */");
17342 /* and skip to the next column */
17343 continue;
17344 }
17345
17346 /*
17347 * Attribute type; print it except when creating a typed
17348 * table ('OF type_name'), but in binary-upgrade mode,
17349 * print it in that case too.
17350 */
17351 if (dopt->binary_upgrade || !OidIsValid(tbinfo->reloftype))
17352 {
17353 appendPQExpBuffer(q, " %s",
17354 tbinfo->atttypnames[j]);
17355 }
17356
17357 if (print_default)
17358 {
17359 if (tbinfo->attgenerated[j] == ATTRIBUTE_GENERATED_STORED)
17360 appendPQExpBuffer(q, " GENERATED ALWAYS AS (%s) STORED",
17361 tbinfo->attrdefs[j]->adef_expr);
17362 else if (tbinfo->attgenerated[j] == ATTRIBUTE_GENERATED_VIRTUAL)
17363 appendPQExpBuffer(q, " GENERATED ALWAYS AS (%s)",
17364 tbinfo->attrdefs[j]->adef_expr);
17365 else
17366 appendPQExpBuffer(q, " DEFAULT %s",
17367 tbinfo->attrdefs[j]->adef_expr);
17368 }
17369
17370 if (print_notnull)
17371 {
17372 if (tbinfo->notnull_constrs[j][0] == '\0')
17373 appendPQExpBufferStr(q, " NOT NULL");
17374 else
17375 appendPQExpBuffer(q, " CONSTRAINT %s NOT NULL",
17376 fmtId(tbinfo->notnull_constrs[j]));
17377
17378 if (tbinfo->notnull_noinh[j])
17379 appendPQExpBufferStr(q, " NO INHERIT");
17380 }
17381
17382 /* Add collation if not default for the type */
17383 if (OidIsValid(tbinfo->attcollation[j]))
17384 {
17385 CollInfo *coll;
17386
17387 coll = findCollationByOid(tbinfo->attcollation[j]);
17388 if (coll)
17389 appendPQExpBuffer(q, " COLLATE %s",
17391 }
17392 }
17393
17394 /*
17395 * On the other hand, if we choose not to print a column
17396 * (likely because it is created by inheritance), but the
17397 * column has a locally-defined not-null constraint, we need
17398 * to dump the constraint as a standalone object.
17399 *
17400 * This syntax isn't SQL-conforming, but if you wanted
17401 * standard output you wouldn't be creating non-standard
17402 * objects to begin with.
17403 */
17404 if (!shouldPrintColumn(dopt, tbinfo, j) &&
17405 !tbinfo->attisdropped[j] &&
17406 tbinfo->notnull_constrs[j] != NULL &&
17407 tbinfo->notnull_islocal[j])
17408 {
17409 /* Format properly if not first attr */
17410 if (actual_atts == 0)
17411 appendPQExpBufferStr(q, " (");
17412 else
17413 appendPQExpBufferChar(q, ',');
17414 appendPQExpBufferStr(q, "\n ");
17415 actual_atts++;
17416
17417 if (tbinfo->notnull_constrs[j][0] == '\0')
17418 appendPQExpBuffer(q, "NOT NULL %s",
17419 fmtId(tbinfo->attnames[j]));
17420 else
17421 appendPQExpBuffer(q, "CONSTRAINT %s NOT NULL %s",
17422 tbinfo->notnull_constrs[j],
17423 fmtId(tbinfo->attnames[j]));
17424
17425 if (tbinfo->notnull_noinh[j])
17426 appendPQExpBufferStr(q, " NO INHERIT");
17427 }
17428 }
17429
17430 /*
17431 * Add non-inherited CHECK constraints, if any.
17432 *
17433 * For partitions, we need to include check constraints even if
17434 * they're not defined locally, because the ALTER TABLE ATTACH
17435 * PARTITION that we'll emit later expects the constraint to be
17436 * there. (No need to fix conislocal: ATTACH PARTITION does that)
17437 */
17438 for (j = 0; j < tbinfo->ncheck; j++)
17439 {
17440 ConstraintInfo *constr = &(tbinfo->checkexprs[j]);
17441
17442 if (constr->separate ||
17443 (!constr->conislocal && !tbinfo->ispartition))
17444 continue;
17445
17446 if (actual_atts == 0)
17447 appendPQExpBufferStr(q, " (\n ");
17448 else
17449 appendPQExpBufferStr(q, ",\n ");
17450
17451 appendPQExpBuffer(q, "CONSTRAINT %s ",
17452 fmtId(constr->dobj.name));
17453 appendPQExpBufferStr(q, constr->condef);
17454
17455 actual_atts++;
17456 }
17457
17458 if (actual_atts)
17459 appendPQExpBufferStr(q, "\n)");
17460 else if (!(OidIsValid(tbinfo->reloftype) && !dopt->binary_upgrade))
17461 {
17462 /*
17463 * No attributes? we must have a parenthesized attribute list,
17464 * even though empty, when not using the OF TYPE syntax.
17465 */
17466 appendPQExpBufferStr(q, " (\n)");
17467 }
17468
17469 /*
17470 * Emit the INHERITS clause (not for partitions), except in
17471 * binary-upgrade mode.
17472 */
17473 if (numParents > 0 && !tbinfo->ispartition &&
17474 !dopt->binary_upgrade)
17475 {
17476 appendPQExpBufferStr(q, "\nINHERITS (");
17477 for (k = 0; k < numParents; k++)
17478 {
17479 TableInfo *parentRel = parents[k];
17480
17481 if (k > 0)
17482 appendPQExpBufferStr(q, ", ");
17484 }
17485 appendPQExpBufferChar(q, ')');
17486 }
17487
17488 if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
17489 appendPQExpBuffer(q, "\nPARTITION BY %s", partkeydef);
17490
17491 if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
17492 appendPQExpBuffer(q, "\nSERVER %s", fmtId(srvname));
17493 }
17494
17495 if (nonemptyReloptions(tbinfo->reloptions) ||
17496 nonemptyReloptions(tbinfo->toast_reloptions))
17497 {
17498 bool addcomma = false;
17499
17500 appendPQExpBufferStr(q, "\nWITH (");
17501 if (nonemptyReloptions(tbinfo->reloptions))
17502 {
17503 addcomma = true;
17504 appendReloptionsArrayAH(q, tbinfo->reloptions, "", fout);
17505 }
17506 if (nonemptyReloptions(tbinfo->toast_reloptions))
17507 {
17508 if (addcomma)
17509 appendPQExpBufferStr(q, ", ");
17510 appendReloptionsArrayAH(q, tbinfo->toast_reloptions, "toast.",
17511 fout);
17512 }
17513 appendPQExpBufferChar(q, ')');
17514 }
17515
17516 /* Dump generic options if any */
17517 if (ftoptions && ftoptions[0])
17518 appendPQExpBuffer(q, "\nOPTIONS (\n %s\n)", ftoptions);
17519
17520 /*
17521 * For materialized views, create the AS clause just like a view. At
17522 * this point, we always mark the view as not populated.
17523 */
17524 if (tbinfo->relkind == RELKIND_MATVIEW)
17525 {
17526 PQExpBuffer result;
17527
17528 result = createViewAsClause(fout, tbinfo);
17529 appendPQExpBuffer(q, " AS\n%s\n WITH NO DATA;\n",
17530 result->data);
17531 destroyPQExpBuffer(result);
17532 }
17533 else
17534 appendPQExpBufferStr(q, ";\n");
17535
17536 /* Materialized views can depend on extensions */
17537 if (tbinfo->relkind == RELKIND_MATVIEW)
17539 "pg_catalog.pg_class",
17540 "MATERIALIZED VIEW",
17541 qualrelname);
17542
17543 /*
17544 * in binary upgrade mode, update the catalog with any missing values
17545 * that might be present.
17546 */
17547 if (dopt->binary_upgrade)
17548 {
17549 for (j = 0; j < tbinfo->numatts; j++)
17550 {
17551 if (tbinfo->attmissingval[j][0] != '\0')
17552 {
17553 appendPQExpBufferStr(q, "\n-- set missing value.\n");
17555 "SELECT pg_catalog.binary_upgrade_set_missing_value(");
17557 appendPQExpBufferStr(q, "::pg_catalog.regclass,");
17558 appendStringLiteralAH(q, tbinfo->attnames[j], fout);
17559 appendPQExpBufferChar(q, ',');
17560 appendStringLiteralAH(q, tbinfo->attmissingval[j], fout);
17561 appendPQExpBufferStr(q, ");\n\n");
17562 }
17563 }
17564 }
17565
17566 /*
17567 * To create binary-compatible heap files, we have to ensure the same
17568 * physical column order, including dropped columns, as in the
17569 * original. Therefore, we create dropped columns above and drop them
17570 * here, also updating their attlen/attalign values so that the
17571 * dropped column can be skipped properly. (We do not bother with
17572 * restoring the original attbyval setting.) Also, inheritance
17573 * relationships are set up by doing ALTER TABLE INHERIT rather than
17574 * using an INHERITS clause --- the latter would possibly mess up the
17575 * column order. That also means we have to take care about setting
17576 * attislocal correctly, plus fix up any inherited CHECK constraints.
17577 * Analogously, we set up typed tables using ALTER TABLE / OF here.
17578 *
17579 * We process foreign and partitioned tables here, even though they
17580 * lack heap storage, because they can participate in inheritance
17581 * relationships and we want this stuff to be consistent across the
17582 * inheritance tree. We can exclude indexes, toast tables, sequences
17583 * and matviews, even though they have storage, because we don't
17584 * support altering or dropping columns in them, nor can they be part
17585 * of inheritance trees.
17586 */
17587 if (dopt->binary_upgrade &&
17588 (tbinfo->relkind == RELKIND_RELATION ||
17589 tbinfo->relkind == RELKIND_FOREIGN_TABLE ||
17590 tbinfo->relkind == RELKIND_PARTITIONED_TABLE))
17591 {
17592 bool firstitem;
17593 bool firstitem_extra;
17594
17595 /*
17596 * Drop any dropped columns. Merge the pg_attribute manipulations
17597 * into a single SQL command, so that we don't cause repeated
17598 * relcache flushes on the target table. Otherwise we risk O(N^2)
17599 * relcache bloat while dropping N columns.
17600 */
17601 resetPQExpBuffer(extra);
17602 firstitem = true;
17603 for (j = 0; j < tbinfo->numatts; j++)
17604 {
17605 if (tbinfo->attisdropped[j])
17606 {
17607 if (firstitem)
17608 {
17609 appendPQExpBufferStr(q, "\n-- For binary upgrade, recreate dropped columns.\n"
17610 "UPDATE pg_catalog.pg_attribute\n"
17611 "SET attlen = v.dlen, "
17612 "attalign = v.dalign, "
17613 "attbyval = false\n"
17614 "FROM (VALUES ");
17615 firstitem = false;
17616 }
17617 else
17618 appendPQExpBufferStr(q, ",\n ");
17619 appendPQExpBufferChar(q, '(');
17620 appendStringLiteralAH(q, tbinfo->attnames[j], fout);
17621 appendPQExpBuffer(q, ", %d, '%c')",
17622 tbinfo->attlen[j],
17623 tbinfo->attalign[j]);
17624 /* The ALTER ... DROP COLUMN commands must come after */
17625 appendPQExpBuffer(extra, "ALTER %sTABLE ONLY %s ",
17627 appendPQExpBuffer(extra, "DROP COLUMN %s;\n",
17628 fmtId(tbinfo->attnames[j]));
17629 }
17630 }
17631 if (!firstitem)
17632 {
17633 appendPQExpBufferStr(q, ") v(dname, dlen, dalign)\n"
17634 "WHERE attrelid = ");
17636 appendPQExpBufferStr(q, "::pg_catalog.regclass\n"
17637 " AND attname = v.dname;\n");
17638 /* Now we can issue the actual DROP COLUMN commands */
17639 appendBinaryPQExpBuffer(q, extra->data, extra->len);
17640 }
17641
17642 /*
17643 * Fix up inherited columns. As above, do the pg_attribute
17644 * manipulations in a single SQL command.
17645 */
17646 firstitem = true;
17647 for (j = 0; j < tbinfo->numatts; j++)
17648 {
17649 if (!tbinfo->attisdropped[j] &&
17650 !tbinfo->attislocal[j])
17651 {
17652 if (firstitem)
17653 {
17654 appendPQExpBufferStr(q, "\n-- For binary upgrade, recreate inherited columns.\n");
17655 appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_attribute\n"
17656 "SET attislocal = false\n"
17657 "WHERE attrelid = ");
17659 appendPQExpBufferStr(q, "::pg_catalog.regclass\n"
17660 " AND attname IN (");
17661 firstitem = false;
17662 }
17663 else
17664 appendPQExpBufferStr(q, ", ");
17665 appendStringLiteralAH(q, tbinfo->attnames[j], fout);
17666 }
17667 }
17668 if (!firstitem)
17669 appendPQExpBufferStr(q, ");\n");
17670
17671 /*
17672 * Fix up not-null constraints that come from inheritance. As
17673 * above, do the pg_constraint manipulations in a single SQL
17674 * command. (Actually, two in special cases, if we're doing an
17675 * upgrade from < 18).
17676 */
17677 firstitem = true;
17678 firstitem_extra = true;
17679 resetPQExpBuffer(extra);
17680 for (j = 0; j < tbinfo->numatts; j++)
17681 {
17682 /*
17683 * If a not-null constraint comes from inheritance, reset
17684 * conislocal. The inhcount is fixed by ALTER TABLE INHERIT,
17685 * below. Special hack: in versions < 18, columns with no
17686 * local definition need their constraint to be matched by
17687 * column number in conkeys instead of by constraint name,
17688 * because the latter is not available. (We distinguish the
17689 * case because the constraint name is the empty string.)
17690 */
17691 if (tbinfo->notnull_constrs[j] != NULL &&
17692 !tbinfo->notnull_islocal[j])
17693 {
17694 if (tbinfo->notnull_constrs[j][0] != '\0')
17695 {
17696 if (firstitem)
17697 {
17698 appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_constraint\n"
17699 "SET conislocal = false\n"
17700 "WHERE contype = 'n' AND conrelid = ");
17702 appendPQExpBufferStr(q, "::pg_catalog.regclass AND\n"
17703 "conname IN (");
17704 firstitem = false;
17705 }
17706 else
17707 appendPQExpBufferStr(q, ", ");
17708 appendStringLiteralAH(q, tbinfo->notnull_constrs[j], fout);
17709 }
17710 else
17711 {
17712 if (firstitem_extra)
17713 {
17714 appendPQExpBufferStr(extra, "UPDATE pg_catalog.pg_constraint\n"
17715 "SET conislocal = false\n"
17716 "WHERE contype = 'n' AND conrelid = ");
17718 appendPQExpBufferStr(extra, "::pg_catalog.regclass AND\n"
17719 "conkey IN (");
17720 firstitem_extra = false;
17721 }
17722 else
17723 appendPQExpBufferStr(extra, ", ");
17724 appendPQExpBuffer(extra, "'{%d}'", j + 1);
17725 }
17726 }
17727 }
17728 if (!firstitem)
17729 appendPQExpBufferStr(q, ");\n");
17730 if (!firstitem_extra)
17731 appendPQExpBufferStr(extra, ");\n");
17732
17733 if (extra->len > 0)
17734 appendBinaryPQExpBuffer(q, extra->data, extra->len);
17735
17736 /*
17737 * Add inherited CHECK constraints, if any.
17738 *
17739 * For partitions, they were already dumped, and conislocal
17740 * doesn't need fixing.
17741 *
17742 * As above, issue only one direct manipulation of pg_constraint.
17743 * Although it is tempting to merge the ALTER ADD CONSTRAINT
17744 * commands into one as well, refrain for now due to concern about
17745 * possible backend memory bloat if there are many such
17746 * constraints.
17747 */
17748 resetPQExpBuffer(extra);
17749 firstitem = true;
17750 for (k = 0; k < tbinfo->ncheck; k++)
17751 {
17752 ConstraintInfo *constr = &(tbinfo->checkexprs[k]);
17753
17754 if (constr->separate || constr->conislocal || tbinfo->ispartition)
17755 continue;
17756
17757 if (firstitem)
17758 appendPQExpBufferStr(q, "\n-- For binary upgrade, set up inherited constraints.\n");
17759 appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ADD CONSTRAINT %s %s;\n",
17761 fmtId(constr->dobj.name),
17762 constr->condef);
17763 /* Update pg_constraint after all the ALTER TABLEs */
17764 if (firstitem)
17765 {
17766 appendPQExpBufferStr(extra, "UPDATE pg_catalog.pg_constraint\n"
17767 "SET conislocal = false\n"
17768 "WHERE contype = 'c' AND conrelid = ");
17770 appendPQExpBufferStr(extra, "::pg_catalog.regclass\n");
17771 appendPQExpBufferStr(extra, " AND conname IN (");
17772 firstitem = false;
17773 }
17774 else
17775 appendPQExpBufferStr(extra, ", ");
17776 appendStringLiteralAH(extra, constr->dobj.name, fout);
17777 }
17778 if (!firstitem)
17779 {
17780 appendPQExpBufferStr(extra, ");\n");
17781 appendBinaryPQExpBuffer(q, extra->data, extra->len);
17782 }
17783
17784 if (numParents > 0 && !tbinfo->ispartition)
17785 {
17786 appendPQExpBufferStr(q, "\n-- For binary upgrade, set up inheritance this way.\n");
17787 for (k = 0; k < numParents; k++)
17788 {
17789 TableInfo *parentRel = parents[k];
17790
17791 appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s INHERIT %s;\n", foreign,
17794 }
17795 }
17796
17797 if (OidIsValid(tbinfo->reloftype))
17798 {
17799 appendPQExpBufferStr(q, "\n-- For binary upgrade, set up typed tables this way.\n");
17800 appendPQExpBuffer(q, "ALTER TABLE ONLY %s OF %s;\n",
17802 getFormattedTypeName(fout, tbinfo->reloftype,
17803 zeroIsError));
17804 }
17805 }
17806
17807 /*
17808 * In binary_upgrade mode, arrange to restore the old relfrozenxid and
17809 * relminmxid of all vacuumable relations. (While vacuum.c processes
17810 * TOAST tables semi-independently, here we see them only as children
17811 * of other relations; so this "if" lacks RELKIND_TOASTVALUE, and the
17812 * child toast table is handled below.)
17813 */
17814 if (dopt->binary_upgrade &&
17815 (tbinfo->relkind == RELKIND_RELATION ||
17816 tbinfo->relkind == RELKIND_MATVIEW))
17817 {
17818 appendPQExpBufferStr(q, "\n-- For binary upgrade, set heap's relfrozenxid and relminmxid\n");
17819 appendPQExpBuffer(q, "UPDATE pg_catalog.pg_class\n"
17820 "SET relfrozenxid = '%u', relminmxid = '%u'\n"
17821 "WHERE oid = ",
17822 tbinfo->frozenxid, tbinfo->minmxid);
17824 appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
17825
17826 if (tbinfo->toast_oid)
17827 {
17828 /*
17829 * The toast table will have the same OID at restore, so we
17830 * can safely target it by OID.
17831 */
17832 appendPQExpBufferStr(q, "\n-- For binary upgrade, set toast's relfrozenxid and relminmxid\n");
17833 appendPQExpBuffer(q, "UPDATE pg_catalog.pg_class\n"
17834 "SET relfrozenxid = '%u', relminmxid = '%u'\n"
17835 "WHERE oid = '%u';\n",
17836 tbinfo->toast_frozenxid,
17837 tbinfo->toast_minmxid, tbinfo->toast_oid);
17838 }
17839 }
17840
17841 /*
17842 * In binary_upgrade mode, restore matviews' populated status by
17843 * poking pg_class directly. This is pretty ugly, but we can't use
17844 * REFRESH MATERIALIZED VIEW since it's possible that some underlying
17845 * matview is not populated even though this matview is; in any case,
17846 * we want to transfer the matview's heap storage, not run REFRESH.
17847 */
17848 if (dopt->binary_upgrade && tbinfo->relkind == RELKIND_MATVIEW &&
17849 tbinfo->relispopulated)
17850 {
17851 appendPQExpBufferStr(q, "\n-- For binary upgrade, mark materialized view as populated\n");
17852 appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_class\n"
17853 "SET relispopulated = 't'\n"
17854 "WHERE oid = ");
17856 appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
17857 }
17858
17859 /*
17860 * Dump additional per-column properties that we can't handle in the
17861 * main CREATE TABLE command.
17862 */
17863 for (j = 0; j < tbinfo->numatts; j++)
17864 {
17865 /* None of this applies to dropped columns */
17866 if (tbinfo->attisdropped[j])
17867 continue;
17868
17869 /*
17870 * Dump per-column statistics information. We only issue an ALTER
17871 * TABLE statement if the attstattarget entry for this column is
17872 * not the default value.
17873 */
17874 if (tbinfo->attstattarget[j] >= 0)
17875 appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET STATISTICS %d;\n",
17877 fmtId(tbinfo->attnames[j]),
17878 tbinfo->attstattarget[j]);
17879
17880 /*
17881 * Dump per-column storage information. The statement is only
17882 * dumped if the storage has been changed from the type's default.
17883 */
17884 if (tbinfo->attstorage[j] != tbinfo->typstorage[j])
17885 {
17886 switch (tbinfo->attstorage[j])
17887 {
17888 case TYPSTORAGE_PLAIN:
17889 storage = "PLAIN";
17890 break;
17892 storage = "EXTERNAL";
17893 break;
17895 storage = "EXTENDED";
17896 break;
17897 case TYPSTORAGE_MAIN:
17898 storage = "MAIN";
17899 break;
17900 default:
17901 storage = NULL;
17902 }
17903
17904 /*
17905 * Only dump the statement if it's a storage type we recognize
17906 */
17907 if (storage != NULL)
17908 appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET STORAGE %s;\n",
17910 fmtId(tbinfo->attnames[j]),
17911 storage);
17912 }
17913
17914 /*
17915 * Dump per-column compression, if it's been set.
17916 */
17917 if (!dopt->no_toast_compression)
17918 {
17919 const char *cmname;
17920
17921 switch (tbinfo->attcompression[j])
17922 {
17923 case 'p':
17924 cmname = "pglz";
17925 break;
17926 case 'l':
17927 cmname = "lz4";
17928 break;
17929 default:
17930 cmname = NULL;
17931 break;
17932 }
17933
17934 if (cmname != NULL)
17935 appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET COMPRESSION %s;\n",
17937 fmtId(tbinfo->attnames[j]),
17938 cmname);
17939 }
17940
17941 /*
17942 * Dump per-column attributes.
17943 */
17944 if (tbinfo->attoptions[j][0] != '\0')
17945 appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET (%s);\n",
17947 fmtId(tbinfo->attnames[j]),
17948 tbinfo->attoptions[j]);
17949
17950 /*
17951 * Dump per-column fdw options.
17952 */
17953 if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
17954 tbinfo->attfdwoptions[j][0] != '\0')
17956 "ALTER FOREIGN TABLE ONLY %s ALTER COLUMN %s OPTIONS (\n"
17957 " %s\n"
17958 ");\n",
17960 fmtId(tbinfo->attnames[j]),
17961 tbinfo->attfdwoptions[j]);
17962 } /* end loop over columns */
17963
17965 free(ftoptions);
17966 free(srvname);
17967 }
17968
17969 /*
17970 * dump properties we only have ALTER TABLE syntax for
17971 */
17972 if ((tbinfo->relkind == RELKIND_RELATION ||
17973 tbinfo->relkind == RELKIND_PARTITIONED_TABLE ||
17974 tbinfo->relkind == RELKIND_MATVIEW) &&
17975 tbinfo->relreplident != REPLICA_IDENTITY_DEFAULT)
17976 {
17977 if (tbinfo->relreplident == REPLICA_IDENTITY_INDEX)
17978 {
17979 /* nothing to do, will be set when the index is dumped */
17980 }
17981 else if (tbinfo->relreplident == REPLICA_IDENTITY_NOTHING)
17982 {
17983 appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY NOTHING;\n",
17984 qualrelname);
17985 }
17986 else if (tbinfo->relreplident == REPLICA_IDENTITY_FULL)
17987 {
17988 appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY FULL;\n",
17989 qualrelname);
17990 }
17991 }
17992
17993 if (tbinfo->forcerowsec)
17994 appendPQExpBuffer(q, "\nALTER TABLE ONLY %s FORCE ROW LEVEL SECURITY;\n",
17995 qualrelname);
17996
17997 if (dopt->binary_upgrade)
18000 tbinfo->dobj.namespace->dobj.name);
18001
18002 if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18003 {
18004 char *tablespace = NULL;
18005 char *tableam = NULL;
18006
18007 /*
18008 * _selectTablespace() relies on tablespace-enabled objects in the
18009 * default tablespace to have a tablespace of "" (empty string) versus
18010 * non-tablespace-enabled objects to have a tablespace of NULL.
18011 * getTables() sets tbinfo->reltablespace to "" for the default
18012 * tablespace (not NULL).
18013 */
18014 if (RELKIND_HAS_TABLESPACE(tbinfo->relkind))
18015 tablespace = tbinfo->reltablespace;
18016
18017 if (RELKIND_HAS_TABLE_AM(tbinfo->relkind) ||
18019 tableam = tbinfo->amname;
18020
18021 ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
18022 ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
18023 .namespace = tbinfo->dobj.namespace->dobj.name,
18024 .tablespace = tablespace,
18025 .tableam = tableam,
18026 .relkind = tbinfo->relkind,
18027 .owner = tbinfo->rolname,
18028 .description = reltypename,
18029 .section = tbinfo->postponed_def ?
18031 .createStmt = q->data,
18032 .dropStmt = delq->data));
18033 }
18034
18035 /* Dump Table Comments */
18036 if (tbinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18038
18039 /* Dump Table Security Labels */
18040 if (tbinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
18042
18043 /*
18044 * Dump comments for not-null constraints that aren't to be dumped
18045 * separately (those are processed by collectComments/dumpComment).
18046 */
18047 if (!fout->dopt->no_comments && dopt->dumpSchema &&
18048 fout->remoteVersion >= 180000)
18049 {
18051 PQExpBuffer tag = NULL;
18052
18053 for (j = 0; j < tbinfo->numatts; j++)
18054 {
18055 if (tbinfo->notnull_constrs[j] != NULL &&
18056 tbinfo->notnull_comment[j] != NULL)
18057 {
18058 if (comment == NULL)
18059 {
18061 tag = createPQExpBuffer();
18062 }
18063 else
18064 {
18066 resetPQExpBuffer(tag);
18067 }
18068
18069 appendPQExpBuffer(comment, "COMMENT ON CONSTRAINT %s ON %s IS ",
18070 fmtId(tbinfo->notnull_constrs[j]), qualrelname);
18071 appendStringLiteralAH(comment, tbinfo->notnull_comment[j], fout);
18073
18074 appendPQExpBuffer(tag, "CONSTRAINT %s ON %s",
18075 fmtId(tbinfo->notnull_constrs[j]), qrelname);
18076
18078 ARCHIVE_OPTS(.tag = tag->data,
18079 .namespace = tbinfo->dobj.namespace->dobj.name,
18080 .owner = tbinfo->rolname,
18081 .description = "COMMENT",
18082 .section = SECTION_NONE,
18083 .createStmt = comment->data,
18084 .deps = &(tbinfo->dobj.dumpId),
18085 .nDeps = 1));
18086 }
18087 }
18088
18090 destroyPQExpBuffer(tag);
18091 }
18092
18093 /* Dump comments on inlined table constraints */
18094 for (j = 0; j < tbinfo->ncheck; j++)
18095 {
18096 ConstraintInfo *constr = &(tbinfo->checkexprs[j]);
18097
18098 if (constr->separate || !constr->conislocal)
18099 continue;
18100
18101 if (constr->dobj.dump & DUMP_COMPONENT_COMMENT)
18103 }
18104
18107 destroyPQExpBuffer(extra);
18108 free(qrelname);
18110}
18111
18112/*
18113 * dumpTableAttach
18114 * write to fout the commands to attach a child partition
18115 *
18116 * Child partitions are always made by creating them separately
18117 * and then using ATTACH PARTITION, rather than using
18118 * CREATE TABLE ... PARTITION OF. This is important for preserving
18119 * any possible discrepancy in column layout, to allow assigning the
18120 * correct tablespace if different, and so that it's possible to restore
18121 * a partition without restoring its parent. (You'll get an error from
18122 * the ATTACH PARTITION command, but that can be ignored, or skipped
18123 * using "pg_restore -L" if you prefer.) The last point motivates
18124 * treating ATTACH PARTITION as a completely separate ArchiveEntry
18125 * rather than emitting it within the child partition's ArchiveEntry.
18126 */
18127static void
18129{
18130 DumpOptions *dopt = fout->dopt;
18131 PQExpBuffer q;
18132 PGresult *res;
18133 char *partbound;
18134
18135 /* Do nothing if not dumping schema */
18136 if (!dopt->dumpSchema)
18137 return;
18138
18139 q = createPQExpBuffer();
18140
18142 {
18143 /* Set up query for partbound details */
18145 "PREPARE dumpTableAttach(pg_catalog.oid) AS\n");
18146
18148 "SELECT pg_get_expr(c.relpartbound, c.oid) "
18149 "FROM pg_class c "
18150 "WHERE c.oid = $1");
18151
18153
18155 }
18156
18158 "EXECUTE dumpTableAttach('%u')",
18159 attachinfo->partitionTbl->dobj.catId.oid);
18160
18162 partbound = PQgetvalue(res, 0, 0);
18163
18164 /* Perform ALTER TABLE on the parent */
18166 "ALTER TABLE ONLY %s ",
18167 fmtQualifiedDumpable(attachinfo->parentTbl));
18169 "ATTACH PARTITION %s %s;\n",
18170 fmtQualifiedDumpable(attachinfo->partitionTbl),
18171 partbound);
18172
18173 /*
18174 * There is no point in creating a drop query as the drop is done by table
18175 * drop. (If you think to change this, see also _printTocEntry().)
18176 * Although this object doesn't really have ownership as such, set the
18177 * owner field anyway to ensure that the command is run by the correct
18178 * role at restore time.
18179 */
18180 ArchiveEntry(fout, attachinfo->dobj.catId, attachinfo->dobj.dumpId,
18181 ARCHIVE_OPTS(.tag = attachinfo->dobj.name,
18182 .namespace = attachinfo->dobj.namespace->dobj.name,
18183 .owner = attachinfo->partitionTbl->rolname,
18184 .description = "TABLE ATTACH",
18185 .section = SECTION_PRE_DATA,
18186 .createStmt = q->data));
18187
18188 PQclear(res);
18190}
18191
18192/*
18193 * dumpAttrDef --- dump an attribute's default-value declaration
18194 */
18195static void
18197{
18198 DumpOptions *dopt = fout->dopt;
18199 TableInfo *tbinfo = adinfo->adtable;
18200 int adnum = adinfo->adnum;
18201 PQExpBuffer q;
18203 char *qualrelname;
18204 char *tag;
18205 char *foreign;
18206
18207 /* Do nothing if not dumping schema */
18208 if (!dopt->dumpSchema)
18209 return;
18210
18211 /* Skip if not "separate"; it was dumped in the table's definition */
18212 if (!adinfo->separate)
18213 return;
18214
18215 q = createPQExpBuffer();
18217
18219
18220 foreign = tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "";
18221
18223 "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET DEFAULT %s;\n",
18224 foreign, qualrelname, fmtId(tbinfo->attnames[adnum - 1]),
18225 adinfo->adef_expr);
18226
18227 appendPQExpBuffer(delq, "ALTER %sTABLE %s ALTER COLUMN %s DROP DEFAULT;\n",
18229 fmtId(tbinfo->attnames[adnum - 1]));
18230
18231 tag = psprintf("%s %s", tbinfo->dobj.name, tbinfo->attnames[adnum - 1]);
18232
18233 if (adinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18234 ArchiveEntry(fout, adinfo->dobj.catId, adinfo->dobj.dumpId,
18235 ARCHIVE_OPTS(.tag = tag,
18236 .namespace = tbinfo->dobj.namespace->dobj.name,
18237 .owner = tbinfo->rolname,
18238 .description = "DEFAULT",
18239 .section = SECTION_PRE_DATA,
18240 .createStmt = q->data,
18241 .dropStmt = delq->data));
18242
18243 free(tag);
18247}
18248
18249/*
18250 * getAttrName: extract the correct name for an attribute
18251 *
18252 * The array tblInfo->attnames[] only provides names of user attributes;
18253 * if a system attribute number is supplied, we have to fake it.
18254 * We also do a little bit of bounds checking for safety's sake.
18255 */
18256static const char *
18257getAttrName(int attrnum, const TableInfo *tblInfo)
18258{
18259 if (attrnum > 0 && attrnum <= tblInfo->numatts)
18260 return tblInfo->attnames[attrnum - 1];
18261 switch (attrnum)
18262 {
18264 return "ctid";
18266 return "xmin";
18268 return "cmin";
18270 return "xmax";
18272 return "cmax";
18274 return "tableoid";
18275 }
18276 pg_fatal("invalid column number %d for table \"%s\"",
18277 attrnum, tblInfo->dobj.name);
18278 return NULL; /* keep compiler quiet */
18279}
18280
18281/*
18282 * dumpIndex
18283 * write out to fout a user-defined index
18284 */
18285static void
18287{
18288 DumpOptions *dopt = fout->dopt;
18289 TableInfo *tbinfo = indxinfo->indextable;
18290 bool is_constraint = (indxinfo->indexconstraint != 0);
18291 PQExpBuffer q;
18293 char *qindxname;
18294 char *qqindxname;
18295
18296 /* Do nothing if not dumping schema */
18297 if (!dopt->dumpSchema)
18298 return;
18299
18300 q = createPQExpBuffer();
18302
18303 qindxname = pg_strdup(fmtId(indxinfo->dobj.name));
18305
18306 /*
18307 * If there's an associated constraint, don't dump the index per se, but
18308 * do dump any comment for it. (This is safe because dependency ordering
18309 * will have ensured the constraint is emitted first.) Note that the
18310 * emitted comment has to be shown as depending on the constraint, not the
18311 * index, in such cases.
18312 */
18313 if (!is_constraint)
18314 {
18315 char *indstatcols = indxinfo->indstatcols;
18316 char *indstatvals = indxinfo->indstatvals;
18317 char **indstatcolsarray = NULL;
18318 char **indstatvalsarray = NULL;
18319 int nstatcols = 0;
18320 int nstatvals = 0;
18321
18322 if (dopt->binary_upgrade)
18324 indxinfo->dobj.catId.oid);
18325
18326 /* Plain secondary index */
18327 appendPQExpBuffer(q, "%s;\n", indxinfo->indexdef);
18328
18329 /*
18330 * Append ALTER TABLE commands as needed to set properties that we
18331 * only have ALTER TABLE syntax for. Keep this in sync with the
18332 * similar code in dumpConstraint!
18333 */
18334
18335 /* If the index is clustered, we need to record that. */
18336 if (indxinfo->indisclustered)
18337 {
18338 appendPQExpBuffer(q, "\nALTER TABLE %s CLUSTER",
18340 /* index name is not qualified in this syntax */
18341 appendPQExpBuffer(q, " ON %s;\n",
18342 qindxname);
18343 }
18344
18345 /*
18346 * If the index has any statistics on some of its columns, generate
18347 * the associated ALTER INDEX queries.
18348 */
18349 if (strlen(indstatcols) != 0 || strlen(indstatvals) != 0)
18350 {
18351 int j;
18352
18353 if (!parsePGArray(indstatcols, &indstatcolsarray, &nstatcols))
18354 pg_fatal("could not parse index statistic columns");
18355 if (!parsePGArray(indstatvals, &indstatvalsarray, &nstatvals))
18356 pg_fatal("could not parse index statistic values");
18357 if (nstatcols != nstatvals)
18358 pg_fatal("mismatched number of columns and values for index statistics");
18359
18360 for (j = 0; j < nstatcols; j++)
18361 {
18362 appendPQExpBuffer(q, "ALTER INDEX %s ", qqindxname);
18363
18364 /*
18365 * Note that this is a column number, so no quotes should be
18366 * used.
18367 */
18368 appendPQExpBuffer(q, "ALTER COLUMN %s ",
18370 appendPQExpBuffer(q, "SET STATISTICS %s;\n",
18372 }
18373 }
18374
18375 /* Indexes can depend on extensions */
18377 "pg_catalog.pg_class",
18378 "INDEX", qqindxname);
18379
18380 /* If the index defines identity, we need to record that. */
18381 if (indxinfo->indisreplident)
18382 {
18383 appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY USING",
18385 /* index name is not qualified in this syntax */
18386 appendPQExpBuffer(q, " INDEX %s;\n",
18387 qindxname);
18388 }
18389
18390 /*
18391 * If this index is a member of a partitioned index, the backend will
18392 * not allow us to drop it separately, so don't try. It will go away
18393 * automatically when we drop either the index's table or the
18394 * partitioned index. (If, in a selective restore with --clean, we
18395 * drop neither of those, then this index will not be dropped either.
18396 * But that's fine, and even if you think it's not, the backend won't
18397 * let us do differently.)
18398 */
18399 if (indxinfo->parentidx == 0)
18400 appendPQExpBuffer(delq, "DROP INDEX %s;\n", qqindxname);
18401
18402 if (indxinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18403 ArchiveEntry(fout, indxinfo->dobj.catId, indxinfo->dobj.dumpId,
18404 ARCHIVE_OPTS(.tag = indxinfo->dobj.name,
18405 .namespace = tbinfo->dobj.namespace->dobj.name,
18406 .tablespace = indxinfo->tablespace,
18407 .owner = tbinfo->rolname,
18408 .description = "INDEX",
18409 .section = SECTION_POST_DATA,
18410 .createStmt = q->data,
18411 .dropStmt = delq->data));
18412
18415 }
18416
18417 /* Dump Index Comments */
18418 if (indxinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18419 dumpComment(fout, "INDEX", qindxname,
18420 tbinfo->dobj.namespace->dobj.name,
18421 tbinfo->rolname,
18422 indxinfo->dobj.catId, 0,
18423 is_constraint ? indxinfo->indexconstraint :
18424 indxinfo->dobj.dumpId);
18425
18428 free(qindxname);
18430}
18431
18432/*
18433 * dumpIndexAttach
18434 * write out to fout a partitioned-index attachment clause
18435 */
18436static void
18438{
18439 /* Do nothing if not dumping schema */
18440 if (!fout->dopt->dumpSchema)
18441 return;
18442
18443 if (attachinfo->partitionIdx->dobj.dump & DUMP_COMPONENT_DEFINITION)
18444 {
18446
18447 appendPQExpBuffer(q, "ALTER INDEX %s ",
18448 fmtQualifiedDumpable(attachinfo->parentIdx));
18449 appendPQExpBuffer(q, "ATTACH PARTITION %s;\n",
18450 fmtQualifiedDumpable(attachinfo->partitionIdx));
18451
18452 /*
18453 * There is no need for a dropStmt since the drop is done implicitly
18454 * when we drop either the index's table or the partitioned index.
18455 * Moreover, since there's no ALTER INDEX DETACH PARTITION command,
18456 * there's no way to do it anyway. (If you think to change this,
18457 * consider also what to do with --if-exists.)
18458 *
18459 * Although this object doesn't really have ownership as such, set the
18460 * owner field anyway to ensure that the command is run by the correct
18461 * role at restore time.
18462 */
18463 ArchiveEntry(fout, attachinfo->dobj.catId, attachinfo->dobj.dumpId,
18464 ARCHIVE_OPTS(.tag = attachinfo->dobj.name,
18465 .namespace = attachinfo->dobj.namespace->dobj.name,
18466 .owner = attachinfo->parentIdx->indextable->rolname,
18467 .description = "INDEX ATTACH",
18468 .section = SECTION_POST_DATA,
18469 .createStmt = q->data));
18470
18472 }
18473}
18474
18475/*
18476 * dumpStatisticsExt
18477 * write out to fout an extended statistics object
18478 */
18479static void
18481{
18482 DumpOptions *dopt = fout->dopt;
18483 PQExpBuffer q;
18485 PQExpBuffer query;
18486 char *qstatsextname;
18487 PGresult *res;
18488 char *stxdef;
18489
18490 /* Do nothing if not dumping schema */
18491 if (!dopt->dumpSchema)
18492 return;
18493
18494 q = createPQExpBuffer();
18496 query = createPQExpBuffer();
18497
18499
18500 appendPQExpBuffer(query, "SELECT "
18501 "pg_catalog.pg_get_statisticsobjdef('%u'::pg_catalog.oid)",
18502 statsextinfo->dobj.catId.oid);
18503
18504 res = ExecuteSqlQueryForSingleRow(fout, query->data);
18505
18506 stxdef = PQgetvalue(res, 0, 0);
18507
18508 /* Result of pg_get_statisticsobjdef is complete except for semicolon */
18509 appendPQExpBuffer(q, "%s;\n", stxdef);
18510
18511 /*
18512 * We only issue an ALTER STATISTICS statement if the stxstattarget entry
18513 * for this statistics object is not the default value.
18514 */
18515 if (statsextinfo->stattarget >= 0)
18516 {
18517 appendPQExpBuffer(q, "ALTER STATISTICS %s ",
18519 appendPQExpBuffer(q, "SET STATISTICS %d;\n",
18520 statsextinfo->stattarget);
18521 }
18522
18523 appendPQExpBuffer(delq, "DROP STATISTICS %s;\n",
18525
18526 if (statsextinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18527 ArchiveEntry(fout, statsextinfo->dobj.catId,
18528 statsextinfo->dobj.dumpId,
18529 ARCHIVE_OPTS(.tag = statsextinfo->dobj.name,
18530 .namespace = statsextinfo->dobj.namespace->dobj.name,
18531 .owner = statsextinfo->rolname,
18532 .description = "STATISTICS",
18533 .section = SECTION_POST_DATA,
18534 .createStmt = q->data,
18535 .dropStmt = delq->data));
18536
18537 /* Dump Statistics Comments */
18538 if (statsextinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18539 dumpComment(fout, "STATISTICS", qstatsextname,
18540 statsextinfo->dobj.namespace->dobj.name,
18541 statsextinfo->rolname,
18542 statsextinfo->dobj.catId, 0,
18543 statsextinfo->dobj.dumpId);
18544
18545 PQclear(res);
18548 destroyPQExpBuffer(query);
18550}
18551
18552/*
18553 * dumpStatisticsExtStats
18554 * write out to fout the stats for an extended statistics object
18555 */
18556static void
18558{
18559 DumpOptions *dopt = fout->dopt;
18560 PQExpBuffer query;
18561 PGresult *res;
18562 int nstats;
18563
18564 /* Do nothing if not dumping statistics */
18565 if (!dopt->dumpStatistics)
18566 return;
18567
18569 {
18571
18572 /*---------
18573 * Set up query for details about extended statistics objects.
18574 *
18575 * The query depends on the backend version:
18576 * - In v19 and newer versions, query directly the pg_stats_ext*
18577 * catalogs.
18578 * - In v18 and older versions, ndistinct and dependencies have a
18579 * different format that needs translation.
18580 * - In v14 and older versions, inherited does not exist.
18581 * - In v11 and older versions, there is no pg_stats_ext, hence
18582 * the logic joins pg_statistic_ext and pg_namespace.
18583 *---------
18584 */
18585
18587 "PREPARE getExtStatsStats(pg_catalog.name, pg_catalog.name) AS\n"
18588 "SELECT ");
18589
18590 /*
18591 * Versions 15 and newer have inherited stats.
18592 *
18593 * Create this column in all versions because we need to order by it
18594 * later.
18595 */
18596 if (fout->remoteVersion >= 150000)
18597 appendPQExpBufferStr(pq, "e.inherited, ");
18598 else
18599 appendPQExpBufferStr(pq, "false AS inherited, ");
18600
18601 /*--------
18602 * The ndistinct and dependencies formats changed in v19, so
18603 * everything before that needs to be translated.
18604 *
18605 * The ndistinct translation converts this kind of data:
18606 * {"3, 4": 11, "3, 6": 11, "4, 6": 11, "3, 4, 6": 11}
18607 *
18608 * to this:
18609 * [ {"attributes": [3,4], "ndistinct": 11},
18610 * {"attributes": [3,6], "ndistinct": 11},
18611 * {"attributes": [4,6], "ndistinct": 11},
18612 * {"attributes": [3,4,6], "ndistinct": 11} ]
18613 *
18614 * The dependencies translation converts this kind of data:
18615 * {"3 => 4": 1.000000, "3 => 6": 1.000000,
18616 * "4 => 6": 1.000000, "3, 4 => 6": 1.000000,
18617 * "3, 6 => 4": 1.000000}
18618 *
18619 * to this:
18620 * [ {"attributes": [3], "dependency": 4, "degree": 1.000000},
18621 * {"attributes": [3], "dependency": 6, "degree": 1.000000},
18622 * {"attributes": [4], "dependency": 6, "degree": 1.000000},
18623 * {"attributes": [3,4], "dependency": 6, "degree": 1.000000},
18624 * {"attributes": [3,6], "dependency": 4, "degree": 1.000000} ]
18625 *--------
18626 */
18627 if (fout->remoteVersion >= 190000)
18628 appendPQExpBufferStr(pq, "e.n_distinct, e.dependencies, ");
18629 else
18631 "( "
18632 "SELECT json_agg( "
18633 " json_build_object( "
18635 " string_to_array(kv.key, ', ')::integer[], "
18637 " kv.value::bigint )) "
18638 "FROM json_each_text(e.n_distinct::text::json) AS kv"
18639 ") AS n_distinct, "
18640 "( "
18641 "SELECT json_agg( "
18642 " json_build_object( "
18644 " string_to_array( "
18645 " split_part(kv.key, ' => ', 1), "
18646 " ', ')::integer[], "
18648 " split_part(kv.key, ' => ', 2)::integer, "
18650 " kv.value::double precision )) "
18651 "FROM json_each_text(e.dependencies::text::json) AS kv "
18652 ") AS dependencies, ");
18653
18654 /* MCV was introduced v13 */
18655 if (fout->remoteVersion >= 130000)
18657 "e.most_common_vals, e.most_common_freqs, "
18658 "e.most_common_base_freqs ");
18659 else
18661 "NULL AS most_common_vals, NULL AS most_common_freqs, "
18662 "NULL AS most_common_base_freqs ");
18663
18664 /* pg_stats_ext introduced in v12 */
18665 if (fout->remoteVersion >= 120000)
18667 "FROM pg_catalog.pg_stats_ext AS e "
18668 "WHERE e.statistics_schemaname = $1 "
18669 "AND e.statistics_name = $2 ");
18670 else
18672 "FROM ( "
18673 "SELECT s.stxndistinct AS n_distinct, "
18674 " s.stxdependencies AS dependencies "
18675 "FROM pg_catalog.pg_statistic_ext AS s "
18676 "JOIN pg_catalog.pg_namespace AS n "
18677 "ON n.oid = s.stxnamespace "
18678 "WHERE n.nspname = $1 "
18679 "AND s.stxname = $2 "
18680 ") AS e ");
18681
18682 /* we always have an inherited column, but it may be a constant */
18683 appendPQExpBufferStr(pq, "ORDER BY inherited");
18684
18685 ExecuteSqlStatement(fout, pq->data);
18686
18688
18690 }
18691
18692 query = createPQExpBuffer();
18693
18694 appendPQExpBufferStr(query, "EXECUTE getExtStatsStats(");
18695 appendStringLiteralAH(query, statsextinfo->dobj.namespace->dobj.name, fout);
18696 appendPQExpBufferStr(query, "::pg_catalog.name, ");
18697 appendStringLiteralAH(query, statsextinfo->dobj.name, fout);
18698 appendPQExpBufferStr(query, "::pg_catalog.name)");
18699
18700 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
18701
18702 destroyPQExpBuffer(query);
18703
18704 nstats = PQntuples(res);
18705
18706 if (nstats > 0)
18707 {
18709
18710 int i_inherited = PQfnumber(res, "inherited");
18711 int i_ndistinct = PQfnumber(res, "n_distinct");
18712 int i_dependencies = PQfnumber(res, "dependencies");
18713 int i_mcv = PQfnumber(res, "most_common_vals");
18714 int i_mcf = PQfnumber(res, "most_common_freqs");
18715 int i_mcbf = PQfnumber(res, "most_common_base_freqs");
18716
18717 for (int i = 0; i < nstats; i++)
18718 {
18719 TableInfo *tbinfo = statsextinfo->stattable;
18720
18721 if (PQgetisnull(res, i, i_inherited))
18722 pg_fatal("inherited cannot be NULL");
18723
18725 "SELECT * FROM pg_catalog.pg_restore_extended_stats(\n");
18726 appendPQExpBuffer(out, "\t'version', '%d'::integer,\n",
18728
18729 /* Relation information */
18730 appendPQExpBufferStr(out, "\t'schemaname', ");
18731 appendStringLiteralAH(out, tbinfo->dobj.namespace->dobj.name, fout);
18732 appendPQExpBufferStr(out, ",\n\t'relname', ");
18733 appendStringLiteralAH(out, tbinfo->dobj.name, fout);
18734
18735 /* Extended statistics information */
18736 appendPQExpBufferStr(out, ",\n\t'statistics_schemaname', ");
18737 appendStringLiteralAH(out, statsextinfo->dobj.namespace->dobj.name, fout);
18738 appendPQExpBufferStr(out, ",\n\t'statistics_name', ");
18739 appendStringLiteralAH(out, statsextinfo->dobj.name, fout);
18740 appendNamedArgument(out, fout, "inherited", "boolean",
18741 PQgetvalue(res, i, i_inherited));
18742
18743 if (!PQgetisnull(res, i, i_ndistinct))
18744 appendNamedArgument(out, fout, "n_distinct", "pg_ndistinct",
18745 PQgetvalue(res, i, i_ndistinct));
18746
18747 if (!PQgetisnull(res, i, i_dependencies))
18748 appendNamedArgument(out, fout, "dependencies", "pg_dependencies",
18749 PQgetvalue(res, i, i_dependencies));
18750
18751 if (!PQgetisnull(res, i, i_mcv))
18752 appendNamedArgument(out, fout, "most_common_vals", "text[]",
18753 PQgetvalue(res, i, i_mcv));
18754
18755 if (!PQgetisnull(res, i, i_mcf))
18756 appendNamedArgument(out, fout, "most_common_freqs", "double precision[]",
18757 PQgetvalue(res, i, i_mcf));
18758
18759 if (!PQgetisnull(res, i, i_mcbf))
18760 appendNamedArgument(out, fout, "most_common_base_freqs", "double precision[]",
18761 PQgetvalue(res, i, i_mcbf));
18762
18763 appendPQExpBufferStr(out, "\n);\n");
18764 }
18765
18767 ARCHIVE_OPTS(.tag = statsextinfo->dobj.name,
18768 .namespace = statsextinfo->dobj.namespace->dobj.name,
18769 .owner = statsextinfo->rolname,
18770 .description = "EXTENDED STATISTICS DATA",
18771 .section = SECTION_POST_DATA,
18772 .createStmt = out->data,
18773 .deps = &statsextinfo->dobj.dumpId,
18774 .nDeps = 1));
18775 destroyPQExpBuffer(out);
18776 }
18777 PQclear(res);
18778}
18779
18780/*
18781 * dumpConstraint
18782 * write out to fout a user-defined constraint
18783 */
18784static void
18786{
18787 DumpOptions *dopt = fout->dopt;
18788 TableInfo *tbinfo = coninfo->contable;
18789 PQExpBuffer q;
18791 char *tag = NULL;
18792 char *foreign;
18793
18794 /* Do nothing if not dumping schema */
18795 if (!dopt->dumpSchema)
18796 return;
18797
18798 q = createPQExpBuffer();
18800
18801 foreign = tbinfo &&
18802 tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "";
18803
18804 if (coninfo->contype == 'p' ||
18805 coninfo->contype == 'u' ||
18806 coninfo->contype == 'x')
18807 {
18808 /* Index-related constraint */
18810 int k;
18811
18813
18814 if (indxinfo == NULL)
18815 pg_fatal("missing index for constraint \"%s\"",
18816 coninfo->dobj.name);
18817
18818 if (dopt->binary_upgrade)
18820 indxinfo->dobj.catId.oid);
18821
18822 appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s\n", foreign,
18824 appendPQExpBuffer(q, " ADD CONSTRAINT %s ",
18825 fmtId(coninfo->dobj.name));
18826
18827 if (coninfo->condef)
18828 {
18829 /* pg_get_constraintdef should have provided everything */
18830 appendPQExpBuffer(q, "%s;\n", coninfo->condef);
18831 }
18832 else
18833 {
18835 coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
18836
18837 /*
18838 * PRIMARY KEY constraints should not be using NULLS NOT DISTINCT
18839 * indexes. Being able to create this was fixed, but we need to
18840 * make the index distinct in order to be able to restore the
18841 * dump.
18842 */
18843 if (indxinfo->indnullsnotdistinct && coninfo->contype != 'p')
18844 appendPQExpBufferStr(q, " NULLS NOT DISTINCT");
18845 appendPQExpBufferStr(q, " (");
18846 for (k = 0; k < indxinfo->indnkeyattrs; k++)
18847 {
18848 int indkey = (int) indxinfo->indkeys[k];
18849 const char *attname;
18850
18852 break;
18854
18855 appendPQExpBuffer(q, "%s%s",
18856 (k == 0) ? "" : ", ",
18857 fmtId(attname));
18858 }
18859 if (coninfo->conperiod)
18860 appendPQExpBufferStr(q, " WITHOUT OVERLAPS");
18861
18862 if (indxinfo->indnkeyattrs < indxinfo->indnattrs)
18863 appendPQExpBufferStr(q, ") INCLUDE (");
18864
18865 for (k = indxinfo->indnkeyattrs; k < indxinfo->indnattrs; k++)
18866 {
18867 int indkey = (int) indxinfo->indkeys[k];
18868 const char *attname;
18869
18871 break;
18873
18874 appendPQExpBuffer(q, "%s%s",
18875 (k == indxinfo->indnkeyattrs) ? "" : ", ",
18876 fmtId(attname));
18877 }
18878
18879 appendPQExpBufferChar(q, ')');
18880
18881 if (nonemptyReloptions(indxinfo->indreloptions))
18882 {
18883 appendPQExpBufferStr(q, " WITH (");
18884 appendReloptionsArrayAH(q, indxinfo->indreloptions, "", fout);
18885 appendPQExpBufferChar(q, ')');
18886 }
18887
18888 if (coninfo->condeferrable)
18889 {
18890 appendPQExpBufferStr(q, " DEFERRABLE");
18891 if (coninfo->condeferred)
18892 appendPQExpBufferStr(q, " INITIALLY DEFERRED");
18893 }
18894
18895 appendPQExpBufferStr(q, ";\n");
18896 }
18897
18898 /*
18899 * Append ALTER TABLE commands as needed to set properties that we
18900 * only have ALTER TABLE syntax for. Keep this in sync with the
18901 * similar code in dumpIndex!
18902 */
18903
18904 /* If the index is clustered, we need to record that. */
18905 if (indxinfo->indisclustered)
18906 {
18907 appendPQExpBuffer(q, "\nALTER TABLE %s CLUSTER",
18909 /* index name is not qualified in this syntax */
18910 appendPQExpBuffer(q, " ON %s;\n",
18911 fmtId(indxinfo->dobj.name));
18912 }
18913
18914 /* If the index defines identity, we need to record that. */
18915 if (indxinfo->indisreplident)
18916 {
18917 appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY USING",
18919 /* index name is not qualified in this syntax */
18920 appendPQExpBuffer(q, " INDEX %s;\n",
18921 fmtId(indxinfo->dobj.name));
18922 }
18923
18924 /* Indexes can depend on extensions */
18926 "pg_catalog.pg_class", "INDEX",
18928
18929 appendPQExpBuffer(delq, "ALTER %sTABLE ONLY %s ", foreign,
18931 appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
18932 fmtId(coninfo->dobj.name));
18933
18934 tag = psprintf("%s %s", tbinfo->dobj.name, coninfo->dobj.name);
18935
18936 if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18937 ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
18938 ARCHIVE_OPTS(.tag = tag,
18939 .namespace = tbinfo->dobj.namespace->dobj.name,
18940 .tablespace = indxinfo->tablespace,
18941 .owner = tbinfo->rolname,
18942 .description = "CONSTRAINT",
18943 .section = SECTION_POST_DATA,
18944 .createStmt = q->data,
18945 .dropStmt = delq->data));
18946 }
18947 else if (coninfo->contype == 'f')
18948 {
18949 char *only;
18950
18951 /*
18952 * Foreign keys on partitioned tables are always declared as
18953 * inheriting to partitions; for all other cases, emit them as
18954 * applying ONLY directly to the named table, because that's how they
18955 * work for regular inherited tables.
18956 */
18957 only = tbinfo->relkind == RELKIND_PARTITIONED_TABLE ? "" : "ONLY ";
18958
18959 /*
18960 * XXX Potentially wrap in a 'SET CONSTRAINTS OFF' block so that the
18961 * current table data is not processed
18962 */
18963 appendPQExpBuffer(q, "ALTER %sTABLE %s%s\n", foreign,
18965 appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
18966 fmtId(coninfo->dobj.name),
18967 coninfo->condef);
18968
18969 appendPQExpBuffer(delq, "ALTER %sTABLE %s%s ", foreign,
18971 appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
18972 fmtId(coninfo->dobj.name));
18973
18974 tag = psprintf("%s %s", tbinfo->dobj.name, coninfo->dobj.name);
18975
18976 if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18977 ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
18978 ARCHIVE_OPTS(.tag = tag,
18979 .namespace = tbinfo->dobj.namespace->dobj.name,
18980 .owner = tbinfo->rolname,
18981 .description = "FK CONSTRAINT",
18982 .section = SECTION_POST_DATA,
18983 .createStmt = q->data,
18984 .dropStmt = delq->data));
18985 }
18986 else if ((coninfo->contype == 'c' || coninfo->contype == 'n') && tbinfo)
18987 {
18988 /* CHECK or invalid not-null constraint on a table */
18989
18990 /* Ignore if not to be dumped separately, or if it was inherited */
18991 if (coninfo->separate && coninfo->conislocal)
18992 {
18993 const char *keyword;
18994
18995 if (coninfo->contype == 'c')
18996 keyword = "CHECK CONSTRAINT";
18997 else
18998 keyword = "CONSTRAINT";
18999
19000 /* not ONLY since we want it to propagate to children */
19001 appendPQExpBuffer(q, "ALTER %sTABLE %s\n", foreign,
19003 appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
19004 fmtId(coninfo->dobj.name),
19005 coninfo->condef);
19006
19007 appendPQExpBuffer(delq, "ALTER %sTABLE %s ", foreign,
19009 appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
19010 fmtId(coninfo->dobj.name));
19011
19012 tag = psprintf("%s %s", tbinfo->dobj.name, coninfo->dobj.name);
19013
19014 if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19015 ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
19016 ARCHIVE_OPTS(.tag = tag,
19017 .namespace = tbinfo->dobj.namespace->dobj.name,
19018 .owner = tbinfo->rolname,
19019 .description = keyword,
19020 .section = SECTION_POST_DATA,
19021 .createStmt = q->data,
19022 .dropStmt = delq->data));
19023 }
19024 }
19025 else if (tbinfo == NULL)
19026 {
19027 /* CHECK, NOT NULL constraint on a domain */
19028 TypeInfo *tyinfo = coninfo->condomain;
19029
19030 Assert(coninfo->contype == 'c' || coninfo->contype == 'n');
19031
19032 /* Ignore if not to be dumped separately */
19033 if (coninfo->separate)
19034 {
19035 const char *keyword;
19036
19037 if (coninfo->contype == 'c')
19038 keyword = "CHECK CONSTRAINT";
19039 else
19040 keyword = "CONSTRAINT";
19041
19042 appendPQExpBuffer(q, "ALTER DOMAIN %s\n",
19044 appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
19045 fmtId(coninfo->dobj.name),
19046 coninfo->condef);
19047
19048 appendPQExpBuffer(delq, "ALTER DOMAIN %s ",
19050 appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
19051 fmtId(coninfo->dobj.name));
19052
19053 tag = psprintf("%s %s", tyinfo->dobj.name, coninfo->dobj.name);
19054
19055 if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19056 ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
19057 ARCHIVE_OPTS(.tag = tag,
19058 .namespace = tyinfo->dobj.namespace->dobj.name,
19059 .owner = tyinfo->rolname,
19060 .description = keyword,
19061 .section = SECTION_POST_DATA,
19062 .createStmt = q->data,
19063 .dropStmt = delq->data));
19064
19065 if (coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19066 {
19068 char *qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
19069
19070 appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
19071 fmtId(coninfo->dobj.name));
19072
19074 tyinfo->dobj.namespace->dobj.name,
19075 tyinfo->rolname,
19076 coninfo->dobj.catId, 0, coninfo->dobj.dumpId);
19078 free(qtypname);
19079 }
19080 }
19081 }
19082 else
19083 {
19084 pg_fatal("unrecognized constraint type: %c",
19085 coninfo->contype);
19086 }
19087
19088 /* Dump Constraint Comments --- only works for table constraints */
19089 if (tbinfo && coninfo->separate &&
19090 coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19092
19093 free(tag);
19096}
19097
19098/*
19099 * dumpTableConstraintComment --- dump a constraint's comment if any
19100 *
19101 * This is split out because we need the function in two different places
19102 * depending on whether the constraint is dumped as part of CREATE TABLE
19103 * or as a separate ALTER command.
19104 */
19105static void
19107{
19108 TableInfo *tbinfo = coninfo->contable;
19110 char *qtabname;
19111
19112 qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
19113
19114 appendPQExpBuffer(conprefix, "CONSTRAINT %s ON",
19115 fmtId(coninfo->dobj.name));
19116
19117 if (coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19119 tbinfo->dobj.namespace->dobj.name,
19120 tbinfo->rolname,
19121 coninfo->dobj.catId, 0,
19122 coninfo->separate ? coninfo->dobj.dumpId : tbinfo->dobj.dumpId);
19123
19125 free(qtabname);
19126}
19127
19128static inline SeqType
19130{
19131 for (int i = 0; i < lengthof(SeqTypeNames); i++)
19132 {
19133 if (strcmp(SeqTypeNames[i], name) == 0)
19134 return (SeqType) i;
19135 }
19136
19137 pg_fatal("unrecognized sequence type: %s", name);
19138 return (SeqType) 0; /* keep compiler quiet */
19139}
19140
19141/*
19142 * bsearch() comparator for SequenceItem
19143 */
19144static int
19145SequenceItemCmp(const void *p1, const void *p2)
19146{
19147 SequenceItem v1 = *((const SequenceItem *) p1);
19148 SequenceItem v2 = *((const SequenceItem *) p2);
19149
19150 return pg_cmp_u32(v1.oid, v2.oid);
19151}
19152
19153/*
19154 * collectSequences
19155 *
19156 * Construct a table of sequence information. This table is sorted by OID for
19157 * speed in lookup.
19158 */
19159static void
19161{
19162 PGresult *res;
19163 const char *query;
19164
19165 /*
19166 * Before Postgres 10, sequence metadata is in the sequence itself. With
19167 * some extra effort, we might be able to use the sorted table for those
19168 * versions, but for now it seems unlikely to be worth it.
19169 *
19170 * Since version 18, we can gather the sequence data in this query with
19171 * pg_get_sequence_data(), but we only do so for non-schema-only dumps.
19172 */
19173 if (fout->remoteVersion < 100000)
19174 return;
19175 else if (fout->remoteVersion < 180000 ||
19177 query = "SELECT seqrelid, format_type(seqtypid, NULL), "
19178 "seqstart, seqincrement, "
19179 "seqmax, seqmin, "
19180 "seqcache, seqcycle, "
19181 "NULL, 'f' "
19182 "FROM pg_catalog.pg_sequence "
19183 "ORDER BY seqrelid";
19184 else
19185 query = "SELECT seqrelid, format_type(seqtypid, NULL), "
19186 "seqstart, seqincrement, "
19187 "seqmax, seqmin, "
19188 "seqcache, seqcycle, "
19189 "last_value, is_called "
19190 "FROM pg_catalog.pg_sequence, "
19191 "pg_get_sequence_data(seqrelid) "
19192 "ORDER BY seqrelid;";
19193
19194 res = ExecuteSqlQuery(fout, query, PGRES_TUPLES_OK);
19195
19196 nsequences = PQntuples(res);
19198
19199 for (int i = 0; i < nsequences; i++)
19200 {
19201 sequences[i].oid = atooid(PQgetvalue(res, i, 0));
19203 sequences[i].startv = strtoi64(PQgetvalue(res, i, 2), NULL, 10);
19204 sequences[i].incby = strtoi64(PQgetvalue(res, i, 3), NULL, 10);
19205 sequences[i].maxv = strtoi64(PQgetvalue(res, i, 4), NULL, 10);
19206 sequences[i].minv = strtoi64(PQgetvalue(res, i, 5), NULL, 10);
19207 sequences[i].cache = strtoi64(PQgetvalue(res, i, 6), NULL, 10);
19208 sequences[i].cycled = (strcmp(PQgetvalue(res, i, 7), "t") == 0);
19209 sequences[i].last_value = strtoi64(PQgetvalue(res, i, 8), NULL, 10);
19210 sequences[i].is_called = (strcmp(PQgetvalue(res, i, 9), "t") == 0);
19211 sequences[i].null_seqtuple = (PQgetisnull(res, i, 8) || PQgetisnull(res, i, 9));
19212 }
19213
19214 PQclear(res);
19215}
19216
19217/*
19218 * dumpSequence
19219 * write the declaration (not data) of one user-defined sequence
19220 */
19221static void
19223{
19224 DumpOptions *dopt = fout->dopt;
19226 bool is_ascending;
19231 char *qseqname;
19232 TableInfo *owning_tab = NULL;
19233
19234 qseqname = pg_strdup(fmtId(tbinfo->dobj.name));
19235
19236 /*
19237 * For versions >= 10, the sequence information is gathered in a sorted
19238 * table before any calls to dumpSequence(). See collectSequences() for
19239 * more information.
19240 */
19241 if (fout->remoteVersion >= 100000)
19242 {
19243 SequenceItem key = {0};
19244
19246
19247 key.oid = tbinfo->dobj.catId.oid;
19249 sizeof(SequenceItem), SequenceItemCmp);
19250 }
19251 else
19252 {
19253 PGresult *res;
19254
19255 /*
19256 * Before PostgreSQL 10, sequence metadata is in the sequence itself.
19257 *
19258 * Note: it might seem that 'bigint' potentially needs to be
19259 * schema-qualified, but actually that's a keyword.
19260 */
19261 appendPQExpBuffer(query,
19262 "SELECT 'bigint' AS sequence_type, "
19263 "start_value, increment_by, max_value, min_value, "
19264 "cache_value, is_cycled FROM %s",
19266
19267 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19268
19269 if (PQntuples(res) != 1)
19270 pg_fatal(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
19271 "query to get data of sequence \"%s\" returned %d rows (expected 1)",
19272 PQntuples(res)),
19273 tbinfo->dobj.name, PQntuples(res));
19274
19276 seq->seqtype = parse_sequence_type(PQgetvalue(res, 0, 0));
19277 seq->startv = strtoi64(PQgetvalue(res, 0, 1), NULL, 10);
19278 seq->incby = strtoi64(PQgetvalue(res, 0, 2), NULL, 10);
19279 seq->maxv = strtoi64(PQgetvalue(res, 0, 3), NULL, 10);
19280 seq->minv = strtoi64(PQgetvalue(res, 0, 4), NULL, 10);
19281 seq->cache = strtoi64(PQgetvalue(res, 0, 5), NULL, 10);
19282 seq->cycled = (strcmp(PQgetvalue(res, 0, 6), "t") == 0);
19283
19284 PQclear(res);
19285 }
19286
19287 /* Calculate default limits for a sequence of this type */
19288 is_ascending = (seq->incby >= 0);
19289 if (seq->seqtype == SEQTYPE_SMALLINT)
19290 {
19293 }
19294 else if (seq->seqtype == SEQTYPE_INTEGER)
19295 {
19298 }
19299 else if (seq->seqtype == SEQTYPE_BIGINT)
19300 {
19303 }
19304 else
19305 {
19306 pg_fatal("unrecognized sequence type: %d", seq->seqtype);
19307 default_minv = default_maxv = 0; /* keep compiler quiet */
19308 }
19309
19310 /*
19311 * Identity sequences are not to be dropped separately.
19312 */
19313 if (!tbinfo->is_identity_sequence)
19314 {
19315 appendPQExpBuffer(delqry, "DROP SEQUENCE %s;\n",
19317 }
19318
19319 resetPQExpBuffer(query);
19320
19321 if (dopt->binary_upgrade)
19322 {
19324 tbinfo->dobj.catId.oid);
19325
19326 /*
19327 * In older PG versions a sequence will have a pg_type entry, but v14
19328 * and up don't use that, so don't attempt to preserve the type OID.
19329 */
19330 }
19331
19332 if (tbinfo->is_identity_sequence)
19333 {
19334 owning_tab = findTableByOid(tbinfo->owning_tab);
19335
19336 appendPQExpBuffer(query,
19337 "ALTER TABLE %s ",
19338 fmtQualifiedDumpable(owning_tab));
19339 appendPQExpBuffer(query,
19340 "ALTER COLUMN %s ADD GENERATED ",
19341 fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
19342 if (owning_tab->attidentity[tbinfo->owning_col - 1] == ATTRIBUTE_IDENTITY_ALWAYS)
19343 appendPQExpBufferStr(query, "ALWAYS");
19344 else if (owning_tab->attidentity[tbinfo->owning_col - 1] == ATTRIBUTE_IDENTITY_BY_DEFAULT)
19345 appendPQExpBufferStr(query, "BY DEFAULT");
19346 appendPQExpBuffer(query, " AS IDENTITY (\n SEQUENCE NAME %s\n",
19348
19349 /*
19350 * Emit persistence option only if it's different from the owning
19351 * table's. This avoids using this new syntax unnecessarily.
19352 */
19353 if (tbinfo->relpersistence != owning_tab->relpersistence)
19354 appendPQExpBuffer(query, " %s\n",
19355 tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED ?
19356 "UNLOGGED" : "LOGGED");
19357 }
19358 else
19359 {
19360 appendPQExpBuffer(query,
19361 "CREATE %sSEQUENCE %s\n",
19362 tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED ?
19363 "UNLOGGED " : "",
19365
19366 if (seq->seqtype != SEQTYPE_BIGINT)
19367 appendPQExpBuffer(query, " AS %s\n", SeqTypeNames[seq->seqtype]);
19368 }
19369
19370 appendPQExpBuffer(query, " START WITH " INT64_FORMAT "\n", seq->startv);
19371
19372 appendPQExpBuffer(query, " INCREMENT BY " INT64_FORMAT "\n", seq->incby);
19373
19374 if (seq->minv != default_minv)
19375 appendPQExpBuffer(query, " MINVALUE " INT64_FORMAT "\n", seq->minv);
19376 else
19377 appendPQExpBufferStr(query, " NO MINVALUE\n");
19378
19379 if (seq->maxv != default_maxv)
19380 appendPQExpBuffer(query, " MAXVALUE " INT64_FORMAT "\n", seq->maxv);
19381 else
19382 appendPQExpBufferStr(query, " NO MAXVALUE\n");
19383
19384 appendPQExpBuffer(query,
19385 " CACHE " INT64_FORMAT "%s",
19386 seq->cache, (seq->cycled ? "\n CYCLE" : ""));
19387
19388 if (tbinfo->is_identity_sequence)
19389 appendPQExpBufferStr(query, "\n);\n");
19390 else
19391 appendPQExpBufferStr(query, ";\n");
19392
19393 /* binary_upgrade: no need to clear TOAST table oid */
19394
19395 if (dopt->binary_upgrade)
19397 "SEQUENCE", qseqname,
19398 tbinfo->dobj.namespace->dobj.name);
19399
19400 if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19401 ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
19402 ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
19403 .namespace = tbinfo->dobj.namespace->dobj.name,
19404 .owner = tbinfo->rolname,
19405 .description = "SEQUENCE",
19406 .section = SECTION_PRE_DATA,
19407 .createStmt = query->data,
19408 .dropStmt = delqry->data));
19409
19410 /*
19411 * If the sequence is owned by a table column, emit the ALTER for it as a
19412 * separate TOC entry immediately following the sequence's own entry. It's
19413 * OK to do this rather than using full sorting logic, because the
19414 * dependency that tells us it's owned will have forced the table to be
19415 * created first. We can't just include the ALTER in the TOC entry
19416 * because it will fail if we haven't reassigned the sequence owner to
19417 * match the table's owner.
19418 *
19419 * We need not schema-qualify the table reference because both sequence
19420 * and table must be in the same schema.
19421 */
19422 if (OidIsValid(tbinfo->owning_tab) && !tbinfo->is_identity_sequence)
19423 {
19424 owning_tab = findTableByOid(tbinfo->owning_tab);
19425
19426 if (owning_tab == NULL)
19427 pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
19428 tbinfo->owning_tab, tbinfo->dobj.catId.oid);
19429
19430 if (owning_tab->dobj.dump & DUMP_COMPONENT_DEFINITION)
19431 {
19432 resetPQExpBuffer(query);
19433 appendPQExpBuffer(query, "ALTER SEQUENCE %s",
19435 appendPQExpBuffer(query, " OWNED BY %s",
19436 fmtQualifiedDumpable(owning_tab));
19437 appendPQExpBuffer(query, ".%s;\n",
19438 fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
19439
19440 if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19442 ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
19443 .namespace = tbinfo->dobj.namespace->dobj.name,
19444 .owner = tbinfo->rolname,
19445 .description = "SEQUENCE OWNED BY",
19446 .section = SECTION_PRE_DATA,
19447 .createStmt = query->data,
19448 .deps = &(tbinfo->dobj.dumpId),
19449 .nDeps = 1));
19450 }
19451 }
19452
19453 /* Dump Sequence Comments and Security Labels */
19454 if (tbinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19455 dumpComment(fout, "SEQUENCE", qseqname,
19456 tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
19457 tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId);
19458
19459 if (tbinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
19460 dumpSecLabel(fout, "SEQUENCE", qseqname,
19461 tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
19462 tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId);
19463
19464 if (fout->remoteVersion < 100000)
19465 pg_free(seq);
19466 destroyPQExpBuffer(query);
19468 free(qseqname);
19469}
19470
19471/*
19472 * dumpSequenceData
19473 * write the data of one user-defined sequence
19474 */
19475static void
19477{
19478 TableInfo *tbinfo = tdinfo->tdtable;
19479 int64 last;
19480 bool called;
19481 PQExpBuffer query;
19482
19483 /* needn't bother if not dumping sequence data */
19484 if (!fout->dopt->dumpData && !fout->dopt->sequence_data)
19485 return;
19486
19487 query = createPQExpBuffer();
19488
19489 /*
19490 * For versions >= 18, the sequence information is gathered in the sorted
19491 * array before any calls to dumpSequenceData(). See collectSequences()
19492 * for more information.
19493 *
19494 * For older versions, we have to query the sequence relations
19495 * individually.
19496 */
19497 if (fout->remoteVersion < 180000)
19498 {
19499 PGresult *res;
19500
19501 appendPQExpBuffer(query,
19502 "SELECT last_value, is_called FROM %s",
19504
19505 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19506
19507 if (PQntuples(res) != 1)
19508 pg_fatal(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
19509 "query to get data of sequence \"%s\" returned %d rows (expected 1)",
19510 PQntuples(res)),
19511 tbinfo->dobj.name, PQntuples(res));
19512
19513 last = strtoi64(PQgetvalue(res, 0, 0), NULL, 10);
19514 called = (strcmp(PQgetvalue(res, 0, 1), "t") == 0);
19515
19516 PQclear(res);
19517 }
19518 else
19519 {
19520 SequenceItem key = {0};
19521 SequenceItem *entry;
19522
19524 Assert(tbinfo->dobj.catId.oid);
19525
19526 key.oid = tbinfo->dobj.catId.oid;
19527 entry = bsearch(&key, sequences, nsequences,
19528 sizeof(SequenceItem), SequenceItemCmp);
19529
19530 if (entry->null_seqtuple)
19531 pg_fatal("failed to get data for sequence \"%s\"; user may lack "
19532 "SELECT privilege on the sequence or the sequence may "
19533 "have been concurrently dropped",
19534 tbinfo->dobj.name);
19535
19536 last = entry->last_value;
19537 called = entry->is_called;
19538 }
19539
19540 resetPQExpBuffer(query);
19541 appendPQExpBufferStr(query, "SELECT pg_catalog.setval(");
19543 appendPQExpBuffer(query, ", " INT64_FORMAT ", %s);\n",
19544 last, (called ? "true" : "false"));
19545
19546 if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
19548 ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
19549 .namespace = tbinfo->dobj.namespace->dobj.name,
19550 .owner = tbinfo->rolname,
19551 .description = "SEQUENCE SET",
19552 .section = SECTION_DATA,
19553 .createStmt = query->data,
19554 .deps = &(tbinfo->dobj.dumpId),
19555 .nDeps = 1));
19556
19557 destroyPQExpBuffer(query);
19558}
19559
19560/*
19561 * dumpTrigger
19562 * write the declaration of one user-defined table trigger
19563 */
19564static void
19566{
19567 DumpOptions *dopt = fout->dopt;
19568 TableInfo *tbinfo = tginfo->tgtable;
19569 PQExpBuffer query;
19573 char *qtabname;
19574 char *tag;
19575
19576 /* Do nothing if not dumping schema */
19577 if (!dopt->dumpSchema)
19578 return;
19579
19580 query = createPQExpBuffer();
19584
19585 qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
19586
19587 appendPQExpBuffer(trigidentity, "%s ", fmtId(tginfo->dobj.name));
19589
19590 appendPQExpBuffer(query, "%s;\n", tginfo->tgdef);
19591 appendPQExpBuffer(delqry, "DROP TRIGGER %s;\n", trigidentity->data);
19592
19593 /* Triggers can depend on extensions */
19595 "pg_catalog.pg_trigger", "TRIGGER",
19597
19598 if (tginfo->tgispartition)
19599 {
19600 Assert(tbinfo->ispartition);
19601
19602 /*
19603 * Partition triggers only appear here because their 'tgenabled' flag
19604 * differs from its parent's. The trigger is created already, so
19605 * remove the CREATE and replace it with an ALTER. (Clear out the
19606 * DROP query too, so that pg_dump --create does not cause errors.)
19607 */
19608 resetPQExpBuffer(query);
19610 appendPQExpBuffer(query, "\nALTER %sTABLE %s ",
19611 tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "",
19613 switch (tginfo->tgenabled)
19614 {
19615 case 'f':
19616 case 'D':
19617 appendPQExpBufferStr(query, "DISABLE");
19618 break;
19619 case 't':
19620 case 'O':
19621 appendPQExpBufferStr(query, "ENABLE");
19622 break;
19623 case 'R':
19624 appendPQExpBufferStr(query, "ENABLE REPLICA");
19625 break;
19626 case 'A':
19627 appendPQExpBufferStr(query, "ENABLE ALWAYS");
19628 break;
19629 }
19630 appendPQExpBuffer(query, " TRIGGER %s;\n",
19631 fmtId(tginfo->dobj.name));
19632 }
19633 else if (tginfo->tgenabled != 't' && tginfo->tgenabled != 'O')
19634 {
19635 appendPQExpBuffer(query, "\nALTER %sTABLE %s ",
19636 tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "",
19638 switch (tginfo->tgenabled)
19639 {
19640 case 'D':
19641 case 'f':
19642 appendPQExpBufferStr(query, "DISABLE");
19643 break;
19644 case 'A':
19645 appendPQExpBufferStr(query, "ENABLE ALWAYS");
19646 break;
19647 case 'R':
19648 appendPQExpBufferStr(query, "ENABLE REPLICA");
19649 break;
19650 default:
19651 appendPQExpBufferStr(query, "ENABLE");
19652 break;
19653 }
19654 appendPQExpBuffer(query, " TRIGGER %s;\n",
19655 fmtId(tginfo->dobj.name));
19656 }
19657
19658 appendPQExpBuffer(trigprefix, "TRIGGER %s ON",
19659 fmtId(tginfo->dobj.name));
19660
19661 tag = psprintf("%s %s", tbinfo->dobj.name, tginfo->dobj.name);
19662
19663 if (tginfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19664 ArchiveEntry(fout, tginfo->dobj.catId, tginfo->dobj.dumpId,
19665 ARCHIVE_OPTS(.tag = tag,
19666 .namespace = tbinfo->dobj.namespace->dobj.name,
19667 .owner = tbinfo->rolname,
19668 .description = "TRIGGER",
19669 .section = SECTION_POST_DATA,
19670 .createStmt = query->data,
19671 .dropStmt = delqry->data));
19672
19673 if (tginfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19675 tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
19676 tginfo->dobj.catId, 0, tginfo->dobj.dumpId);
19677
19678 free(tag);
19679 destroyPQExpBuffer(query);
19683 free(qtabname);
19684}
19685
19686/*
19687 * dumpEventTrigger
19688 * write the declaration of one user-defined event trigger
19689 */
19690static void
19692{
19693 DumpOptions *dopt = fout->dopt;
19694 PQExpBuffer query;
19696 char *qevtname;
19697
19698 /* Do nothing if not dumping schema */
19699 if (!dopt->dumpSchema)
19700 return;
19701
19702 query = createPQExpBuffer();
19704
19705 qevtname = pg_strdup(fmtId(evtinfo->dobj.name));
19706
19707 appendPQExpBufferStr(query, "CREATE EVENT TRIGGER ");
19709 appendPQExpBufferStr(query, " ON ");
19710 appendPQExpBufferStr(query, fmtId(evtinfo->evtevent));
19711
19712 if (strcmp("", evtinfo->evttags) != 0)
19713 {
19714 appendPQExpBufferStr(query, "\n WHEN TAG IN (");
19715 appendPQExpBufferStr(query, evtinfo->evttags);
19716 appendPQExpBufferChar(query, ')');
19717 }
19718
19719 appendPQExpBufferStr(query, "\n EXECUTE FUNCTION ");
19720 appendPQExpBufferStr(query, evtinfo->evtfname);
19721 appendPQExpBufferStr(query, "();\n");
19722
19723 if (evtinfo->evtenabled != 'O')
19724 {
19725 appendPQExpBuffer(query, "\nALTER EVENT TRIGGER %s ",
19726 qevtname);
19727 switch (evtinfo->evtenabled)
19728 {
19729 case 'D':
19730 appendPQExpBufferStr(query, "DISABLE");
19731 break;
19732 case 'A':
19733 appendPQExpBufferStr(query, "ENABLE ALWAYS");
19734 break;
19735 case 'R':
19736 appendPQExpBufferStr(query, "ENABLE REPLICA");
19737 break;
19738 default:
19739 appendPQExpBufferStr(query, "ENABLE");
19740 break;
19741 }
19742 appendPQExpBufferStr(query, ";\n");
19743 }
19744
19745 appendPQExpBuffer(delqry, "DROP EVENT TRIGGER %s;\n",
19746 qevtname);
19747
19748 if (dopt->binary_upgrade)
19750 "EVENT TRIGGER", qevtname, NULL);
19751
19752 if (evtinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19753 ArchiveEntry(fout, evtinfo->dobj.catId, evtinfo->dobj.dumpId,
19754 ARCHIVE_OPTS(.tag = evtinfo->dobj.name,
19755 .owner = evtinfo->evtowner,
19756 .description = "EVENT TRIGGER",
19757 .section = SECTION_POST_DATA,
19758 .createStmt = query->data,
19759 .dropStmt = delqry->data));
19760
19761 if (evtinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19762 dumpComment(fout, "EVENT TRIGGER", qevtname,
19763 NULL, evtinfo->evtowner,
19764 evtinfo->dobj.catId, 0, evtinfo->dobj.dumpId);
19765
19766 if (evtinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
19767 dumpSecLabel(fout, "EVENT TRIGGER", qevtname,
19768 NULL, evtinfo->evtowner,
19769 evtinfo->dobj.catId, 0, evtinfo->dobj.dumpId);
19770
19771 destroyPQExpBuffer(query);
19773 free(qevtname);
19774}
19775
19776/*
19777 * dumpRule
19778 * Dump a rule
19779 */
19780static void
19782{
19783 DumpOptions *dopt = fout->dopt;
19784 TableInfo *tbinfo = rinfo->ruletable;
19785 bool is_view;
19786 PQExpBuffer query;
19787 PQExpBuffer cmd;
19790 char *qtabname;
19791 PGresult *res;
19792 char *tag;
19793
19794 /* Do nothing if not dumping schema */
19795 if (!dopt->dumpSchema)
19796 return;
19797
19798 /*
19799 * If it is an ON SELECT rule that is created implicitly by CREATE VIEW,
19800 * we do not want to dump it as a separate object.
19801 */
19802 if (!rinfo->separate)
19803 return;
19804
19805 /*
19806 * If it's an ON SELECT rule, we want to print it as a view definition,
19807 * instead of a rule.
19808 */
19809 is_view = (rinfo->ev_type == '1' && rinfo->is_instead);
19810
19811 query = createPQExpBuffer();
19812 cmd = createPQExpBuffer();
19815
19816 qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
19817
19818 if (is_view)
19819 {
19820 PQExpBuffer result;
19821
19822 /*
19823 * We need OR REPLACE here because we'll be replacing a dummy view.
19824 * Otherwise this should look largely like the regular view dump code.
19825 */
19826 appendPQExpBuffer(cmd, "CREATE OR REPLACE VIEW %s",
19828 if (nonemptyReloptions(tbinfo->reloptions))
19829 {
19830 appendPQExpBufferStr(cmd, " WITH (");
19831 appendReloptionsArrayAH(cmd, tbinfo->reloptions, "", fout);
19832 appendPQExpBufferChar(cmd, ')');
19833 }
19834 result = createViewAsClause(fout, tbinfo);
19835 appendPQExpBuffer(cmd, " AS\n%s", result->data);
19836 destroyPQExpBuffer(result);
19837 if (tbinfo->checkoption != NULL)
19838 appendPQExpBuffer(cmd, "\n WITH %s CHECK OPTION",
19839 tbinfo->checkoption);
19840 appendPQExpBufferStr(cmd, ";\n");
19841 }
19842 else
19843 {
19844 /* In the rule case, just print pg_get_ruledef's result verbatim */
19845 appendPQExpBuffer(query,
19846 "SELECT pg_catalog.pg_get_ruledef('%u'::pg_catalog.oid)",
19847 rinfo->dobj.catId.oid);
19848
19849 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19850
19851 if (PQntuples(res) != 1)
19852 pg_fatal("query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned",
19853 rinfo->dobj.name, tbinfo->dobj.name);
19854
19855 printfPQExpBuffer(cmd, "%s\n", PQgetvalue(res, 0, 0));
19856
19857 PQclear(res);
19858 }
19859
19860 /*
19861 * Add the command to alter the rules replication firing semantics if it
19862 * differs from the default.
19863 */
19864 if (rinfo->ev_enabled != 'O')
19865 {
19866 appendPQExpBuffer(cmd, "ALTER TABLE %s ", fmtQualifiedDumpable(tbinfo));
19867 switch (rinfo->ev_enabled)
19868 {
19869 case 'A':
19870 appendPQExpBuffer(cmd, "ENABLE ALWAYS RULE %s;\n",
19871 fmtId(rinfo->dobj.name));
19872 break;
19873 case 'R':
19874 appendPQExpBuffer(cmd, "ENABLE REPLICA RULE %s;\n",
19875 fmtId(rinfo->dobj.name));
19876 break;
19877 case 'D':
19878 appendPQExpBuffer(cmd, "DISABLE RULE %s;\n",
19879 fmtId(rinfo->dobj.name));
19880 break;
19881 }
19882 }
19883
19884 if (is_view)
19885 {
19886 /*
19887 * We can't DROP a view's ON SELECT rule. Instead, use CREATE OR
19888 * REPLACE VIEW to replace the rule with something with minimal
19889 * dependencies.
19890 */
19891 PQExpBuffer result;
19892
19893 appendPQExpBuffer(delcmd, "CREATE OR REPLACE VIEW %s",
19896 appendPQExpBuffer(delcmd, " AS\n%s;\n", result->data);
19897 destroyPQExpBuffer(result);
19898 }
19899 else
19900 {
19901 appendPQExpBuffer(delcmd, "DROP RULE %s ",
19902 fmtId(rinfo->dobj.name));
19903 appendPQExpBuffer(delcmd, "ON %s;\n",
19905 }
19906
19907 appendPQExpBuffer(ruleprefix, "RULE %s ON",
19908 fmtId(rinfo->dobj.name));
19909
19910 tag = psprintf("%s %s", tbinfo->dobj.name, rinfo->dobj.name);
19911
19912 if (rinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19913 ArchiveEntry(fout, rinfo->dobj.catId, rinfo->dobj.dumpId,
19914 ARCHIVE_OPTS(.tag = tag,
19915 .namespace = tbinfo->dobj.namespace->dobj.name,
19916 .owner = tbinfo->rolname,
19917 .description = "RULE",
19918 .section = SECTION_POST_DATA,
19919 .createStmt = cmd->data,
19920 .dropStmt = delcmd->data));
19921
19922 /* Dump rule comments */
19923 if (rinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19925 tbinfo->dobj.namespace->dobj.name,
19926 tbinfo->rolname,
19927 rinfo->dobj.catId, 0, rinfo->dobj.dumpId);
19928
19929 free(tag);
19930 destroyPQExpBuffer(query);
19931 destroyPQExpBuffer(cmd);
19934 free(qtabname);
19935}
19936
19937/*
19938 * getExtensionMembership --- obtain extension membership data
19939 *
19940 * We need to identify objects that are extension members as soon as they're
19941 * loaded, so that we can correctly determine whether they need to be dumped.
19942 * Generally speaking, extension member objects will get marked as *not* to
19943 * be dumped, as they will be recreated by the single CREATE EXTENSION
19944 * command. However, in binary upgrade mode we still need to dump the members
19945 * individually.
19946 */
19947void
19949 int numExtensions)
19950{
19951 PQExpBuffer query;
19952 PGresult *res;
19953 int ntups,
19954 i;
19955 int i_classid,
19956 i_objid,
19957 i_refobjid;
19958 ExtensionInfo *ext;
19959
19960 /* Nothing to do if no extensions */
19961 if (numExtensions == 0)
19962 return;
19963
19964 query = createPQExpBuffer();
19965
19966 /* refclassid constraint is redundant but may speed the search */
19967 appendPQExpBufferStr(query, "SELECT "
19968 "classid, objid, refobjid "
19969 "FROM pg_depend "
19970 "WHERE refclassid = 'pg_extension'::regclass "
19971 "AND deptype = 'e' "
19972 "ORDER BY 3");
19973
19974 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19975
19976 ntups = PQntuples(res);
19977
19978 i_classid = PQfnumber(res, "classid");
19979 i_objid = PQfnumber(res, "objid");
19980 i_refobjid = PQfnumber(res, "refobjid");
19981
19982 /*
19983 * Since we ordered the SELECT by referenced ID, we can expect that
19984 * multiple entries for the same extension will appear together; this
19985 * saves on searches.
19986 */
19987 ext = NULL;
19988
19989 for (i = 0; i < ntups; i++)
19990 {
19991 CatalogId objId;
19992 Oid extId;
19993
19994 objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
19995 objId.oid = atooid(PQgetvalue(res, i, i_objid));
19997
19998 if (ext == NULL ||
19999 ext->dobj.catId.oid != extId)
20001
20002 if (ext == NULL)
20003 {
20004 /* shouldn't happen */
20005 pg_log_warning("could not find referenced extension %u", extId);
20006 continue;
20007 }
20008
20009 recordExtensionMembership(objId, ext);
20010 }
20011
20012 PQclear(res);
20013
20014 destroyPQExpBuffer(query);
20015}
20016
20017/*
20018 * processExtensionTables --- deal with extension configuration tables
20019 *
20020 * There are two parts to this process:
20021 *
20022 * 1. Identify and create dump records for extension configuration tables.
20023 *
20024 * Extensions can mark tables as "configuration", which means that the user
20025 * is able and expected to modify those tables after the extension has been
20026 * loaded. For these tables, we dump out only the data- the structure is
20027 * expected to be handled at CREATE EXTENSION time, including any indexes or
20028 * foreign keys, which brings us to-
20029 *
20030 * 2. Record FK dependencies between configuration tables.
20031 *
20032 * Due to the FKs being created at CREATE EXTENSION time and therefore before
20033 * the data is loaded, we have to work out what the best order for reloading
20034 * the data is, to avoid FK violations when the tables are restored. This is
20035 * not perfect- we can't handle circular dependencies and if any exist they
20036 * will cause an invalid dump to be produced (though at least all of the data
20037 * is included for a user to manually restore). This is currently documented
20038 * but perhaps we can provide a better solution in the future.
20039 */
20040void
20042 int numExtensions)
20043{
20044 DumpOptions *dopt = fout->dopt;
20045 PQExpBuffer query;
20046 PGresult *res;
20047 int ntups,
20048 i;
20049 int i_conrelid,
20051
20052 /* Nothing to do if no extensions */
20053 if (numExtensions == 0)
20054 return;
20055
20056 /*
20057 * Identify extension configuration tables and create TableDataInfo
20058 * objects for them, ensuring their data will be dumped even though the
20059 * tables themselves won't be.
20060 *
20061 * Note that we create TableDataInfo objects even in schema-only mode, ie,
20062 * user data in a configuration table is treated like schema data. This
20063 * seems appropriate since system data in a config table would get
20064 * reloaded by CREATE EXTENSION. If the extension is not listed in the
20065 * list of extensions to be included, none of its data is dumped.
20066 */
20067 for (i = 0; i < numExtensions; i++)
20068 {
20070 char *extconfig = curext->extconfig;
20071 char *extcondition = curext->extcondition;
20072 char **extconfigarray = NULL;
20073 char **extconditionarray = NULL;
20074 int nconfigitems = 0;
20075 int nconditionitems = 0;
20076
20077 /*
20078 * Check if this extension is listed as to include in the dump. If
20079 * not, any table data associated with it is discarded.
20080 */
20083 curext->dobj.catId.oid))
20084 continue;
20085
20086 /*
20087 * Check if this extension is listed as to exclude in the dump. If
20088 * yes, any table data associated with it is discarded.
20089 */
20092 curext->dobj.catId.oid))
20093 continue;
20094
20095 if (strlen(extconfig) != 0 || strlen(extcondition) != 0)
20096 {
20097 int j;
20098
20099 if (!parsePGArray(extconfig, &extconfigarray, &nconfigitems))
20100 pg_fatal("could not parse %s array", "extconfig");
20101 if (!parsePGArray(extcondition, &extconditionarray, &nconditionitems))
20102 pg_fatal("could not parse %s array", "extcondition");
20104 pg_fatal("mismatched number of configurations and conditions for extension");
20105
20106 for (j = 0; j < nconfigitems; j++)
20107 {
20110 bool dumpobj =
20111 curext->dobj.dump & DUMP_COMPONENT_DEFINITION;
20112
20114 if (configtbl == NULL)
20115 continue;
20116
20117 /*
20118 * Tables of not-to-be-dumped extensions shouldn't be dumped
20119 * unless the table or its schema is explicitly included
20120 */
20121 if (!(curext->dobj.dump & DUMP_COMPONENT_DEFINITION))
20122 {
20123 /* check table explicitly requested */
20124 if (table_include_oids.head != NULL &&
20126 configtbloid))
20127 dumpobj = true;
20128
20129 /* check table's schema explicitly requested */
20130 if (configtbl->dobj.namespace->dobj.dump &
20132 dumpobj = true;
20133 }
20134
20135 /* check table excluded by an exclusion switch */
20136 if (table_exclude_oids.head != NULL &&
20138 configtbloid))
20139 dumpobj = false;
20140
20141 /* check schema excluded by an exclusion switch */
20143 configtbl->dobj.namespace->dobj.catId.oid))
20144 dumpobj = false;
20145
20146 if (dumpobj)
20147 {
20149 if (configtbl->dataObj != NULL)
20150 {
20151 if (strlen(extconditionarray[j]) > 0)
20152 configtbl->dataObj->filtercond = pg_strdup(extconditionarray[j]);
20153 }
20154 }
20155 }
20156 }
20157 if (extconfigarray)
20161 }
20162
20163 /*
20164 * Now that all the TableDataInfo objects have been created for all the
20165 * extensions, check their FK dependencies and register them to try and
20166 * dump the data out in an order that they can be restored in.
20167 *
20168 * Note that this is not a problem for user tables as their FKs are
20169 * recreated after the data has been loaded.
20170 */
20171
20172 query = createPQExpBuffer();
20173
20174 printfPQExpBuffer(query,
20175 "SELECT conrelid, confrelid "
20176 "FROM pg_constraint "
20177 "JOIN pg_depend ON (objid = confrelid) "
20178 "WHERE contype = 'f' "
20179 "AND refclassid = 'pg_extension'::regclass "
20180 "AND classid = 'pg_class'::regclass;");
20181
20182 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
20183 ntups = PQntuples(res);
20184
20185 i_conrelid = PQfnumber(res, "conrelid");
20186 i_confrelid = PQfnumber(res, "confrelid");
20187
20188 /* Now get the dependencies and register them */
20189 for (i = 0; i < ntups; i++)
20190 {
20191 Oid conrelid,
20192 confrelid;
20194 *contable;
20195
20196 conrelid = atooid(PQgetvalue(res, i, i_conrelid));
20197 confrelid = atooid(PQgetvalue(res, i, i_confrelid));
20198 contable = findTableByOid(conrelid);
20199 reftable = findTableByOid(confrelid);
20200
20201 if (reftable == NULL ||
20202 reftable->dataObj == NULL ||
20203 contable == NULL ||
20204 contable->dataObj == NULL)
20205 continue;
20206
20207 /*
20208 * Make referencing TABLE_DATA object depend on the referenced table's
20209 * TABLE_DATA object.
20210 */
20211 addObjectDependency(&contable->dataObj->dobj,
20212 reftable->dataObj->dobj.dumpId);
20213 }
20214 PQclear(res);
20215 destroyPQExpBuffer(query);
20216}
20217
20218/*
20219 * getDependencies --- obtain available dependency data
20220 */
20221static void
20223{
20224 PQExpBuffer query;
20225 PGresult *res;
20226 int ntups,
20227 i;
20228 int i_classid,
20229 i_objid,
20231 i_refobjid,
20232 i_deptype;
20233 DumpableObject *dobj,
20234 *refdobj;
20235
20236 pg_log_info("reading dependency data");
20237
20238 query = createPQExpBuffer();
20239
20240 /*
20241 * Messy query to collect the dependency data we need. Note that we
20242 * ignore the sub-object column, so that dependencies of or on a column
20243 * look the same as dependencies of or on a whole table.
20244 *
20245 * PIN dependencies aren't interesting, and EXTENSION dependencies were
20246 * already processed by getExtensionMembership.
20247 */
20248 appendPQExpBufferStr(query, "SELECT "
20249 "classid, objid, refclassid, refobjid, deptype "
20250 "FROM pg_depend "
20251 "WHERE deptype != 'p' AND deptype != 'e'\n");
20252
20253 /*
20254 * Since we don't treat pg_amop entries as separate DumpableObjects, we
20255 * have to translate their dependencies into dependencies of their parent
20256 * opfamily. Ignore internal dependencies though, as those will point to
20257 * their parent opclass, which we needn't consider here (and if we did,
20258 * it'd just result in circular dependencies). Also, "loose" opfamily
20259 * entries will have dependencies on their parent opfamily, which we
20260 * should drop since they'd likewise become useless self-dependencies.
20261 * (But be sure to keep deps on *other* opfamilies; see amopsortfamily.)
20262 */
20263 appendPQExpBufferStr(query, "UNION ALL\n"
20264 "SELECT 'pg_opfamily'::regclass AS classid, amopfamily AS objid, refclassid, refobjid, deptype "
20265 "FROM pg_depend d, pg_amop o "
20266 "WHERE deptype NOT IN ('p', 'e', 'i') AND "
20267 "classid = 'pg_amop'::regclass AND objid = o.oid "
20268 "AND NOT (refclassid = 'pg_opfamily'::regclass AND amopfamily = refobjid)\n");
20269
20270 /* Likewise for pg_amproc entries */
20271 appendPQExpBufferStr(query, "UNION ALL\n"
20272 "SELECT 'pg_opfamily'::regclass AS classid, amprocfamily AS objid, refclassid, refobjid, deptype "
20273 "FROM pg_depend d, pg_amproc p "
20274 "WHERE deptype NOT IN ('p', 'e', 'i') AND "
20275 "classid = 'pg_amproc'::regclass AND objid = p.oid "
20276 "AND NOT (refclassid = 'pg_opfamily'::regclass AND amprocfamily = refobjid)\n");
20277
20278 /* Sort the output for efficiency below */
20279 appendPQExpBufferStr(query, "ORDER BY 1,2");
20280
20281 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
20282
20283 ntups = PQntuples(res);
20284
20285 i_classid = PQfnumber(res, "classid");
20286 i_objid = PQfnumber(res, "objid");
20287 i_refclassid = PQfnumber(res, "refclassid");
20288 i_refobjid = PQfnumber(res, "refobjid");
20289 i_deptype = PQfnumber(res, "deptype");
20290
20291 /*
20292 * Since we ordered the SELECT by referencing ID, we can expect that
20293 * multiple entries for the same object will appear together; this saves
20294 * on searches.
20295 */
20296 dobj = NULL;
20297
20298 for (i = 0; i < ntups; i++)
20299 {
20300 CatalogId objId;
20302 char deptype;
20303
20304 objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
20305 objId.oid = atooid(PQgetvalue(res, i, i_objid));
20306 refobjId.tableoid = atooid(PQgetvalue(res, i, i_refclassid));
20307 refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
20308 deptype = *(PQgetvalue(res, i, i_deptype));
20309
20310 if (dobj == NULL ||
20311 dobj->catId.tableoid != objId.tableoid ||
20312 dobj->catId.oid != objId.oid)
20313 dobj = findObjectByCatalogId(objId);
20314
20315 /*
20316 * Failure to find objects mentioned in pg_depend is not unexpected,
20317 * since for example we don't collect info about TOAST tables.
20318 */
20319 if (dobj == NULL)
20320 {
20321#ifdef NOT_USED
20322 pg_log_warning("no referencing object %u %u",
20323 objId.tableoid, objId.oid);
20324#endif
20325 continue;
20326 }
20327
20329
20330 if (refdobj == NULL)
20331 {
20332#ifdef NOT_USED
20333 pg_log_warning("no referenced object %u %u",
20334 refobjId.tableoid, refobjId.oid);
20335#endif
20336 continue;
20337 }
20338
20339 /*
20340 * For 'x' dependencies, mark the object for later; we still add the
20341 * normal dependency, for possible ordering purposes. Currently
20342 * pg_dump_sort.c knows to put extensions ahead of all object types
20343 * that could possibly depend on them, but this is safer.
20344 */
20345 if (deptype == 'x')
20346 dobj->depends_on_ext = true;
20347
20348 /*
20349 * Ordinarily, table rowtypes have implicit dependencies on their
20350 * tables. However, for a composite type the implicit dependency goes
20351 * the other way in pg_depend; which is the right thing for DROP but
20352 * it doesn't produce the dependency ordering we need. So in that one
20353 * case, we reverse the direction of the dependency.
20354 */
20355 if (deptype == 'i' &&
20356 dobj->objType == DO_TABLE &&
20357 refdobj->objType == DO_TYPE)
20359 else
20360 /* normal case */
20362 }
20363
20364 PQclear(res);
20365
20366 destroyPQExpBuffer(query);
20367}
20368
20369
20370/*
20371 * createBoundaryObjects - create dummy DumpableObjects to represent
20372 * dump section boundaries.
20373 */
20374static DumpableObject *
20376{
20378
20380
20381 dobjs[0].objType = DO_PRE_DATA_BOUNDARY;
20382 dobjs[0].catId = nilCatalogId;
20383 AssignDumpId(dobjs + 0);
20384 dobjs[0].name = pg_strdup("PRE-DATA BOUNDARY");
20385
20386 dobjs[1].objType = DO_POST_DATA_BOUNDARY;
20387 dobjs[1].catId = nilCatalogId;
20388 AssignDumpId(dobjs + 1);
20389 dobjs[1].name = pg_strdup("POST-DATA BOUNDARY");
20390
20391 return dobjs;
20392}
20393
20394/*
20395 * addBoundaryDependencies - add dependencies as needed to enforce the dump
20396 * section boundaries.
20397 */
20398static void
20401{
20404 int i;
20405
20406 for (i = 0; i < numObjs; i++)
20407 {
20408 DumpableObject *dobj = dobjs[i];
20409
20410 /*
20411 * The classification of object types here must match the SECTION_xxx
20412 * values assigned during subsequent ArchiveEntry calls!
20413 */
20414 switch (dobj->objType)
20415 {
20416 case DO_NAMESPACE:
20417 case DO_EXTENSION:
20418 case DO_TYPE:
20419 case DO_SHELL_TYPE:
20420 case DO_FUNC:
20421 case DO_AGG:
20422 case DO_OPERATOR:
20423 case DO_ACCESS_METHOD:
20424 case DO_OPCLASS:
20425 case DO_OPFAMILY:
20426 case DO_COLLATION:
20427 case DO_CONVERSION:
20428 case DO_TABLE:
20429 case DO_TABLE_ATTACH:
20430 case DO_ATTRDEF:
20431 case DO_PROCLANG:
20432 case DO_CAST:
20433 case DO_DUMMY_TYPE:
20434 case DO_TSPARSER:
20435 case DO_TSDICT:
20436 case DO_TSTEMPLATE:
20437 case DO_TSCONFIG:
20438 case DO_FDW:
20439 case DO_FOREIGN_SERVER:
20440 case DO_TRANSFORM:
20441 /* Pre-data objects: must come before the pre-data boundary */
20443 break;
20444 case DO_TABLE_DATA:
20445 case DO_SEQUENCE_SET:
20446 case DO_LARGE_OBJECT:
20448 /* Data objects: must come between the boundaries */
20451 break;
20452 case DO_INDEX:
20453 case DO_INDEX_ATTACH:
20454 case DO_STATSEXT:
20455 case DO_REFRESH_MATVIEW:
20456 case DO_TRIGGER:
20457 case DO_EVENT_TRIGGER:
20458 case DO_DEFAULT_ACL:
20459 case DO_POLICY:
20460 case DO_PUBLICATION:
20461 case DO_PUBLICATION_REL:
20463 case DO_SUBSCRIPTION:
20465 /* Post-data objects: must come after the post-data boundary */
20467 break;
20468 case DO_RULE:
20469 /* Rules are post-data, but only if dumped separately */
20470 if (((RuleInfo *) dobj)->separate)
20471 addObjectDependency(dobj, postDataBound->dumpId);
20472 break;
20473 case DO_CONSTRAINT:
20474 case DO_FK_CONSTRAINT:
20475 /* Constraints are post-data, but only if dumped separately */
20476 if (((ConstraintInfo *) dobj)->separate)
20477 addObjectDependency(dobj, postDataBound->dumpId);
20478 break;
20480 /* nothing to do */
20481 break;
20483 /* must come after the pre-data boundary */
20484 addObjectDependency(dobj, preDataBound->dumpId);
20485 break;
20486 case DO_REL_STATS:
20487 /* stats section varies by parent object type, DATA or POST */
20488 if (((RelStatsInfo *) dobj)->section == SECTION_DATA)
20489 {
20490 addObjectDependency(dobj, preDataBound->dumpId);
20491 addObjectDependency(postDataBound, dobj->dumpId);
20492 }
20493 else
20494 addObjectDependency(dobj, postDataBound->dumpId);
20495 break;
20496 }
20497 }
20498}
20499
20500
20501/*
20502 * BuildArchiveDependencies - create dependency data for archive TOC entries
20503 *
20504 * The raw dependency data obtained by getDependencies() is not terribly
20505 * useful in an archive dump, because in many cases there are dependency
20506 * chains linking through objects that don't appear explicitly in the dump.
20507 * For example, a view will depend on its _RETURN rule while the _RETURN rule
20508 * will depend on other objects --- but the rule will not appear as a separate
20509 * object in the dump. We need to adjust the view's dependencies to include
20510 * whatever the rule depends on that is included in the dump.
20511 *
20512 * Just to make things more complicated, there are also "special" dependencies
20513 * such as the dependency of a TABLE DATA item on its TABLE, which we must
20514 * not rearrange because pg_restore knows that TABLE DATA only depends on
20515 * its table. In these cases we must leave the dependencies strictly as-is
20516 * even if they refer to not-to-be-dumped objects.
20517 *
20518 * To handle this, the convention is that "special" dependencies are created
20519 * during ArchiveEntry calls, and an archive TOC item that has any such
20520 * entries will not be touched here. Otherwise, we recursively search the
20521 * DumpableObject data structures to build the correct dependencies for each
20522 * archive TOC item.
20523 */
20524static void
20526{
20528 TocEntry *te;
20529
20530 /* Scan all TOC entries in the archive */
20531 for (te = AH->toc->next; te != AH->toc; te = te->next)
20532 {
20533 DumpableObject *dobj;
20534 DumpId *dependencies;
20535 int nDeps;
20536 int allocDeps;
20537
20538 /* No need to process entries that will not be dumped */
20539 if (te->reqs == 0)
20540 continue;
20541 /* Ignore entries that already have "special" dependencies */
20542 if (te->nDeps > 0)
20543 continue;
20544 /* Otherwise, look up the item's original DumpableObject, if any */
20545 dobj = findObjectByDumpId(te->dumpId);
20546 if (dobj == NULL)
20547 continue;
20548 /* No work if it has no dependencies */
20549 if (dobj->nDeps <= 0)
20550 continue;
20551 /* Set up work array */
20552 allocDeps = 64;
20553 dependencies = pg_malloc_array(DumpId, allocDeps);
20554 nDeps = 0;
20555 /* Recursively find all dumpable dependencies */
20556 findDumpableDependencies(AH, dobj,
20557 &dependencies, &nDeps, &allocDeps);
20558 /* And save 'em ... */
20559 if (nDeps > 0)
20560 {
20561 dependencies = pg_realloc_array(dependencies, DumpId, nDeps);
20562 te->dependencies = dependencies;
20563 te->nDeps = nDeps;
20564 }
20565 else
20566 free(dependencies);
20567 }
20568}
20569
20570/* Recursive search subroutine for BuildArchiveDependencies */
20571static void
20573 DumpId **dependencies, int *nDeps, int *allocDeps)
20574{
20575 int i;
20576
20577 /*
20578 * Ignore section boundary objects: if we search through them, we'll
20579 * report lots of bogus dependencies.
20580 */
20581 if (dobj->objType == DO_PRE_DATA_BOUNDARY ||
20583 return;
20584
20585 for (i = 0; i < dobj->nDeps; i++)
20586 {
20587 DumpId depid = dobj->dependencies[i];
20588
20589 if (TocIDRequired(AH, depid) != 0)
20590 {
20591 /* Object will be dumped, so just reference it as a dependency */
20592 if (*nDeps >= *allocDeps)
20593 {
20594 *allocDeps *= 2;
20595 *dependencies = pg_realloc_array(*dependencies, DumpId, *allocDeps);
20596 }
20597 (*dependencies)[*nDeps] = depid;
20598 (*nDeps)++;
20599 }
20600 else
20601 {
20602 /*
20603 * Object will not be dumped, so recursively consider its deps. We
20604 * rely on the assumption that sortDumpableObjects already broke
20605 * any dependency loops, else we might recurse infinitely.
20606 */
20608
20609 if (otherdobj)
20611 dependencies, nDeps, allocDeps);
20612 }
20613 }
20614}
20615
20616
20617/*
20618 * getFormattedTypeName - retrieve a nicely-formatted type name for the
20619 * given type OID.
20620 *
20621 * This does not guarantee to schema-qualify the output, so it should not
20622 * be used to create the target object name for CREATE or ALTER commands.
20623 *
20624 * Note that the result is cached and must not be freed by the caller.
20625 */
20626static const char *
20628{
20630 char *result;
20631 PQExpBuffer query;
20632 PGresult *res;
20633
20634 if (oid == 0)
20635 {
20636 if ((opts & zeroAsStar) != 0)
20637 return "*";
20638 else if ((opts & zeroAsNone) != 0)
20639 return "NONE";
20640 }
20641
20642 /* see if we have the result cached in the type's TypeInfo record */
20643 typeInfo = findTypeByOid(oid);
20644 if (typeInfo && typeInfo->ftypname)
20645 return typeInfo->ftypname;
20646
20647 query = createPQExpBuffer();
20648 appendPQExpBuffer(query, "SELECT pg_catalog.format_type('%u'::pg_catalog.oid, NULL)",
20649 oid);
20650
20651 res = ExecuteSqlQueryForSingleRow(fout, query->data);
20652
20653 /* result of format_type is already quoted */
20654 result = pg_strdup(PQgetvalue(res, 0, 0));
20655
20656 PQclear(res);
20657 destroyPQExpBuffer(query);
20658
20659 /*
20660 * Cache the result for re-use in later requests, if possible. If we
20661 * don't have a TypeInfo for the type, the string will be leaked once the
20662 * caller is done with it ... but that case really should not happen, so
20663 * leaking if it does seems acceptable.
20664 */
20665 if (typeInfo)
20666 typeInfo->ftypname = result;
20667
20668 return result;
20669}
20670
20671/*
20672 * Return a column list clause for the given relation.
20673 *
20674 * Special case: if there are no undropped columns in the relation, return
20675 * "", not an invalid "()" column list.
20676 */
20677static const char *
20679{
20680 int numatts = ti->numatts;
20681 char **attnames = ti->attnames;
20682 bool *attisdropped = ti->attisdropped;
20683 char *attgenerated = ti->attgenerated;
20684 bool needComma;
20685 int i;
20686
20687 appendPQExpBufferChar(buffer, '(');
20688 needComma = false;
20689 for (i = 0; i < numatts; i++)
20690 {
20691 if (attisdropped[i])
20692 continue;
20693 if (attgenerated[i])
20694 continue;
20695 if (needComma)
20696 appendPQExpBufferStr(buffer, ", ");
20697 appendPQExpBufferStr(buffer, fmtId(attnames[i]));
20698 needComma = true;
20699 }
20700
20701 if (!needComma)
20702 return ""; /* no undropped columns */
20703
20704 appendPQExpBufferChar(buffer, ')');
20705 return buffer->data;
20706}
20707
20708/*
20709 * Check if a reloptions array is nonempty.
20710 */
20711static bool
20712nonemptyReloptions(const char *reloptions)
20713{
20714 /* Don't want to print it if it's just "{}" */
20715 return (reloptions != NULL && strlen(reloptions) > 2);
20716}
20717
20718/*
20719 * Format a reloptions array and append it to the given buffer.
20720 *
20721 * "prefix" is prepended to the option names; typically it's "" or "toast.".
20722 */
20723static void
20724appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
20725 const char *prefix, Archive *fout)
20726{
20727 bool res;
20728
20729 res = appendReloptionsArray(buffer, reloptions, prefix, fout->encoding,
20730 fout->std_strings);
20731 if (!res)
20732 pg_log_warning("could not parse %s array", "reloptions");
20733}
20734
20735/*
20736 * read_dump_filters - retrieve object identifier patterns from file
20737 *
20738 * Parse the specified filter file for include and exclude patterns, and add
20739 * them to the relevant lists. If the filename is "-" then filters will be
20740 * read from STDIN rather than a file.
20741 */
20742static void
20744{
20746 char *objname;
20748 FilterObjectType objtype;
20749
20751
20752 while (filter_read_item(&fstate, &objname, &comtype, &objtype))
20753 {
20755 {
20756 switch (objtype)
20757 {
20759 break;
20766 pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed"),
20767 "include",
20768 filter_object_type_name(objtype));
20769 exit_nicely(1);
20770 break; /* unreachable */
20771
20774 break;
20777 break;
20780 dopt->include_everything = false;
20781 break;
20784 dopt->include_everything = false;
20785 break;
20788 objname);
20789 dopt->include_everything = false;
20790 break;
20791 }
20792 }
20794 {
20795 switch (objtype)
20796 {
20798 break;
20804 pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed"),
20805 "exclude",
20806 filter_object_type_name(objtype));
20807 exit_nicely(1);
20808 break;
20809
20812 break;
20815 objname);
20816 break;
20819 objname);
20820 break;
20823 break;
20826 break;
20829 objname);
20830 break;
20831 }
20832 }
20833 else
20834 {
20836 Assert(objtype == FILTER_OBJECT_TYPE_NONE);
20837 }
20838
20839 if (objname)
20840 free(objname);
20841 }
20842
20844}
Acl * acldefault(ObjectType objtype, Oid ownerId)
Definition acl.c:804
#define InvalidAttrNumber
Definition attnum.h:23
int lo_read(int fd, char *buf, int len)
Definition be-fsstubs.c:154
static void help(void)
Definition pg_config.c:71
void recordAdditionalCatalogID(CatalogId catId, DumpableObject *dobj)
Definition common.c:719
void recordExtensionMembership(CatalogId catId, ExtensionInfo *ext)
Definition common.c:1063
FuncInfo * findFuncByOid(Oid oid)
Definition common.c:918
TableInfo * findTableByOid(Oid oid)
Definition common.c:863
ExtensionInfo * findExtensionByOid(Oid oid)
Definition common.c:1008
CollInfo * findCollationByOid(Oid oid)
Definition common.c:972
SubscriptionInfo * findSubscriptionByOid(Oid oid)
Definition common.c:1044
OprInfo * findOprByOid(Oid oid)
Definition common.c:936
NamespaceInfo * findNamespaceByOid(Oid oid)
Definition common.c:990
DumpId createDumpId(void)
Definition common.c:745
void addObjectDependency(DumpableObject *dobj, DumpId refId)
Definition common.c:818
DumpableObject * findObjectByDumpId(DumpId dumpId)
Definition common.c:765
void parseOidArray(const char *str, Oid *array, int arraysize)
Definition common.c:1111
ExtensionInfo * findOwningExtension(CatalogId catalogId)
Definition common.c:1087
TableInfo * getSchemaData(Archive *fout, int *numTablesPtr)
Definition common.c:98
TypeInfo * findTypeByOid(Oid oid)
Definition common.c:899
DumpableObject * findObjectByCatalogId(CatalogId catalogId)
Definition common.c:778
void AssignDumpId(DumpableObject *dobj)
Definition common.c:657
void getDumpableObjects(DumpableObject ***objs, int *numObjs)
Definition common.c:797
PublicationInfo * findPublicationByOid(Oid oid)
Definition common.c:1026
void on_exit_close_archive(Archive *AHX)
Definition parallel.c:330
void init_parallel_dump_utils(void)
Definition parallel.c:238
#define PG_MAX_JOBS
Definition parallel.h:48
bool is_superuser(void)
Definition common.c:2480
uint32 BlockNumber
Definition block.h:31
static void cleanup(void)
Definition bootstrap.c:838
static const gbtree_vinfo tinfo
Definition btree_bit.c:109
#define PG_INT32_MAX
Definition c.h:615
#define ngettext(s, p, n)
Definition c.h:1198
#define INT64_FORMAT
Definition c.h:576
#define Assert(condition)
Definition c.h:885
#define PG_TEXTDOMAIN(domain)
Definition c.h:1231
int64_t int64
Definition c.h:555
#define PG_INT16_MIN
Definition c.h:611
#define CppAsString2(x)
Definition c.h:440
int32_t int32
Definition c.h:554
#define PG_INT64_MAX
Definition c.h:618
#define PG_INT64_MIN
Definition c.h:617
uint32_t uint32
Definition c.h:558
#define lengthof(array)
Definition c.h:815
#define PG_INT32_MIN
Definition c.h:614
#define StaticAssertDecl(condition, errmessage)
Definition c.h:950
#define PG_INT16_MAX
Definition c.h:612
#define OidIsValid(objectId)
Definition c.h:800
int nspid
void set_pglocale_pgservice(const char *argv0, const char *app)
Definition exec.c:430
int main(void)
char * supports_compression(const pg_compress_specification compression_spec)
Definition compress_io.c:87
char * validate_compress_specification(pg_compress_specification *spec)
bool parse_compress_algorithm(char *name, pg_compress_algorithm *algorithm)
Definition compression.c:49
void parse_compress_specification(pg_compress_algorithm algorithm, char *specification, pg_compress_specification *result)
#define PG_COMPRESSION_OPTION_WORKERS
Definition compression.h:29
pg_compress_algorithm
Definition compression.h:22
@ PG_COMPRESSION_NONE
Definition compression.h:23
void parse_compress_options(const char *option, char **algorithm, char **detail)
#define ALWAYS_SECURE_SEARCH_PATH_SQL
Definition connect.h:25
char * generate_restrict_key(void)
Definition dumputils.c:973
bool buildACLCommands(const char *name, const char *subname, const char *nspname, const char *type, const char *acls, const char *baseacls, const char *owner, const char *prefix, int remoteVersion, PQExpBuffer sql)
Definition dumputils.c:104
bool valid_restrict_key(const char *restrict_key)
Definition dumputils.c:997
void buildShSecLabelQuery(const char *catalog_name, Oid objectId, PQExpBuffer sql)
Definition dumputils.c:678
void makeAlterConfigCommand(PGconn *conn, const char *configitem, const char *type, const char *name, const char *type2, const char *name2, PQExpBuffer buf)
Definition dumputils.c:865
bool buildDefaultACLCommands(const char *type, const char *nspname, const char *acls, const char *acldefault, const char *owner, int remoteVersion, PQExpBuffer sql)
Definition dumputils.c:366
char * sanitize_line(const char *str, bool want_hyphen)
Definition dumputils.c:52
bool variable_is_guc_list_quote(const char *name)
Definition dumputils.c:730
void quoteAclUserName(PQExpBuffer output, const char *input)
Definition dumputils.c:585
void emitShSecLabels(PGconn *conn, PGresult *res, PQExpBuffer buffer, const char *objtype, const char *objname)
Definition dumputils.c:696
Datum arg
Definition elog.c:1322
#define _(x)
Definition elog.c:95
char * PQdb(const PGconn *conn)
const char * PQparameterStatus(const PGconn *conn, const char *paramName)
int PQclientEncoding(const PGconn *conn)
char * PQerrorMessage(const PGconn *conn)
int PQsetClientEncoding(PGconn *conn, const char *encoding)
void PQfreemem(void *ptr)
Definition fe-exec.c:4049
Oid PQftype(const PGresult *res, int field_num)
Definition fe-exec.c:3736
int PQfnumber(const PGresult *res, const char *field_name)
Definition fe-exec.c:3606
int PQgetCopyData(PGconn *conn, char **buffer, int async)
Definition fe-exec.c:2833
int lo_close(PGconn *conn, int fd)
Definition fe-lobj.c:96
int lo_open(PGconn *conn, Oid lobjId, int mode)
Definition fe-lobj.c:57
void * pg_malloc(size_t size)
Definition fe_memutils.c:47
char * pg_strdup(const char *in)
Definition fe_memutils.c:85
void pg_free(void *ptr)
#define pg_realloc_array(pointer, type, count)
Definition fe_memutils.h:63
#define pg_malloc_array(type, count)
Definition fe_memutils.h:56
#define pg_malloc0_object(type)
Definition fe_memutils.h:51
#define pg_malloc_object(type)
Definition fe_memutils.h:50
#define pg_malloc0_array(type, count)
Definition fe_memutils.h:57
DataDirSyncMethod
Definition file_utils.h:28
@ DATA_DIR_SYNC_METHOD_FSYNC
Definition file_utils.h:29
void filter_init(FilterStateData *fstate, const char *filename, exit_function f_exit)
Definition filter.c:36
void filter_free(FilterStateData *fstate)
Definition filter.c:60
const char * filter_object_type_name(FilterObjectType fot)
Definition filter.c:82
bool filter_read_item(FilterStateData *fstate, char **objname, FilterCommandType *comtype, FilterObjectType *objtype)
Definition filter.c:392
void pg_log_filter_error(FilterStateData *fstate, const char *fmt,...)
Definition filter.c:154
FilterObjectType
Definition filter.h:48
@ FILTER_OBJECT_TYPE_TABLE_DATA_AND_CHILDREN
Definition filter.h:51
@ FILTER_OBJECT_TYPE_SCHEMA
Definition filter.h:57
@ FILTER_OBJECT_TYPE_INDEX
Definition filter.h:56
@ FILTER_OBJECT_TYPE_TRIGGER
Definition filter.h:60
@ FILTER_OBJECT_TYPE_FOREIGN_DATA
Definition filter.h:54
@ FILTER_OBJECT_TYPE_DATABASE
Definition filter.h:52
@ FILTER_OBJECT_TYPE_FUNCTION
Definition filter.h:55
@ FILTER_OBJECT_TYPE_TABLE_DATA
Definition filter.h:50
@ FILTER_OBJECT_TYPE_NONE
Definition filter.h:49
@ FILTER_OBJECT_TYPE_TABLE_AND_CHILDREN
Definition filter.h:59
@ FILTER_OBJECT_TYPE_EXTENSION
Definition filter.h:53
@ FILTER_OBJECT_TYPE_TABLE
Definition filter.h:58
FilterCommandType
Definition filter.h:38
@ FILTER_COMMAND_TYPE_NONE
Definition filter.h:39
@ FILTER_COMMAND_TYPE_EXCLUDE
Definition filter.h:41
@ FILTER_COMMAND_TYPE_INCLUDE
Definition filter.h:40
int getopt_long(int argc, char *const argv[], const char *optstring, const struct option *longopts, int *longindex)
Definition getopt_long.c:60
#define no_argument
Definition getopt_long.h:25
#define required_argument
Definition getopt_long.h:26
#define comment
#define storage
long val
Definition informix.c:689
static struct @174 value
static char * encoding
Definition initdb.c:139
static DataDirSyncMethod sync_method
Definition initdb.c:170
static int pg_cmp_u32(uint32 a, uint32 b)
Definition int.h:719
int j
Definition isn.c:78
int i
Definition isn.c:77
#define PQgetvalue
#define PQgetResult
#define PQgetlength
#define PQclear
#define PQnfields
#define PQresultStatus
#define PQgetisnull
#define PQfname
#define PQntuples
@ PGRES_COMMAND_OK
Definition libpq-fe.h:125
@ PGRES_COPY_OUT
Definition libpq-fe.h:131
@ PGRES_TUPLES_OK
Definition libpq-fe.h:128
#define INV_READ
Definition libpq-fs.h:22
void pg_logging_increase_verbosity(void)
Definition logging.c:185
void pg_logging_init(const char *argv0)
Definition logging.c:83
void pg_logging_set_level(enum pg_log_level new_level)
Definition logging.c:176
#define pg_log_error(...)
Definition logging.h:106
#define pg_log_error_hint(...)
Definition logging.h:112
#define pg_log_info(...)
Definition logging.h:124
@ PG_LOG_WARNING
Definition logging.h:38
#define pg_log_error_detail(...)
Definition logging.h:109
const char * progname
Definition main.c:44
char * pstrdup(const char *in)
Definition mcxt.c:1781
bool option_parse_int(const char *optarg, const char *optname, int min_range, int max_range, int *result)
bool parse_sync_method(const char *optarg, DataDirSyncMethod *sync_method)
Oid oprid(Operator op)
Definition parse_oper.c:240
static AmcheckOptions opts
Definition pg_amcheck.c:112
NameData attname
char attalign
int16 attlen
NameData rolname
Definition pg_authid.h:36
@ SECTION_NONE
Definition pg_backup.h:57
@ SECTION_POST_DATA
Definition pg_backup.h:60
@ SECTION_PRE_DATA
Definition pg_backup.h:58
@ SECTION_DATA
Definition pg_backup.h:59
int DumpId
Definition pg_backup.h:285
int EndLO(Archive *AHX, Oid oid)
void ProcessArchiveRestoreOptions(Archive *AHX)
RestoreOptions * NewRestoreOptions(void)
#define InvalidDumpId
Definition pg_backup.h:287
#define appendStringLiteralAH(buf, str, AH)
Definition pg_backup.h:344
int StartLO(Archive *AHX, Oid oid)
enum _archiveFormat ArchiveFormat
void RestoreArchive(Archive *AHX, bool append_data)
void ConnectDatabaseAhx(Archive *AHX, const ConnParams *cparams, bool isReconnect)
void CloseArchive(Archive *AHX)
Archive * CreateArchive(const char *FileSpec, const ArchiveFormat fmt, const pg_compress_specification compression_spec, bool dosync, ArchiveMode mode, SetupWorkerPtrType setupDumpWorker, DataDirSyncMethod sync_method)
@ archModeWrite
Definition pg_backup.h:51
@ archModeAppend
Definition pg_backup.h:50
@ PREPQUERY_DUMPFUNC
Definition pg_backup.h:72
@ PREPQUERY_DUMPTABLEATTACH
Definition pg_backup.h:75
@ PREPQUERY_DUMPBASETYPE
Definition pg_backup.h:67
@ PREPQUERY_DUMPRANGETYPE
Definition pg_backup.h:74
@ PREPQUERY_DUMPOPR
Definition pg_backup.h:73
@ PREPQUERY_DUMPEXTSTATSOBJSTATS
Definition pg_backup.h:71
@ PREPQUERY_GETATTRIBUTESTATS
Definition pg_backup.h:76
@ PREPQUERY_DUMPDOMAIN
Definition pg_backup.h:69
@ PREPQUERY_DUMPCOMPOSITETYPE
Definition pg_backup.h:68
@ PREPQUERY_DUMPAGG
Definition pg_backup.h:66
@ PREPQUERY_GETCOLUMNACLS
Definition pg_backup.h:77
@ PREPQUERY_GETDOMAINCONSTRAINTS
Definition pg_backup.h:78
@ PREPQUERY_DUMPENUMTYPE
Definition pg_backup.h:70
int archprintf(Archive *AH, const char *fmt,...) pg_attribute_printf(2
void SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ropt)
#define NUM_PREP_QUERIES
Definition pg_backup.h:81
void archputs(const char *s, Archive *AH)
@ archUnknown
Definition pg_backup.h:41
@ archTar
Definition pg_backup.h:43
@ archCustom
Definition pg_backup.h:42
@ archDirectory
Definition pg_backup.h:45
@ archNull
Definition pg_backup.h:44
void InitDumpOptions(DumpOptions *opts)
void WriteData(Archive *AHX, const void *data, size_t dLen)
int TocIDRequired(ArchiveHandle *AH, DumpId id)
TocEntry * ArchiveEntry(Archive *AHX, CatalogId catalogId, DumpId dumpId, ArchiveOpts *opts)
#define ARCHIVE_OPTS(...)
#define LOBBUFSIZE
#define REQ_STATS
int(* DataDumperPtr)(Archive *AH, const void *userArg)
void ExecuteSqlStatement(Archive *AHX, const char *query)
PGresult * ExecuteSqlQuery(Archive *AHX, const char *query, ExecStatusType status)
PGresult * ExecuteSqlQueryForSingleRow(Archive *fout, const char *query)
void exit_nicely(int code)
void set_dump_section(const char *arg, int *dumpSections)
#define pg_fatal(...)
static char format
static char * label
static PgChecksumMode mode
#define FUNC_MAX_ARGS
const void size_t len
char datlocprovider
Definition pg_database.h:46
NameData datname
Definition pg_database.h:37
bool datistemplate
Definition pg_database.h:49
int32 datconnlimit
Definition pg_database.h:61
static void expand_schema_name_patterns(Archive *fout, SimpleStringList *patterns, SimpleOidList *oids, bool strict_names)
Definition pg_dump.c:1660
static const CatalogId nilCatalogId
Definition pg_dump.c:191
static void dumpEncoding(Archive *AH)
Definition pg_dump.c:3835
void getConstraints(Archive *fout, TableInfo tblinfo[], int numTables)
Definition pg_dump.c:8297
static DumpId dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId, const char *type, const char *name, const char *subname, const char *nspname, const char *tag, const char *owner, const DumpableAcl *dacl)
Definition pg_dump.c:16426
static SimpleStringList schema_include_patterns
Definition pg_dump.c:167
static void dumpAttrDef(Archive *fout, const AttrDefInfo *adinfo)
Definition pg_dump.c:18196
ExtensionInfo * getExtensions(Archive *fout, int *numExtensions)
Definition pg_dump.c:6126
static void selectDumpableProcLang(ProcLangInfo *plang, Archive *fout)
Definition pg_dump.c:2212
static void collectBinaryUpgradeClassOids(Archive *fout)
Definition pg_dump.c:5826
static PQExpBuffer createDummyViewAsClause(Archive *fout, const TableInfo *tbinfo)
Definition pg_dump.c:17070
static void dumpUserMappings(Archive *fout, const char *servername, const char *namespace, const char *owner, CatalogId catalogId, DumpId dumpId)
Definition pg_dump.c:16240
static void dumpPublicationNamespace(Archive *fout, const PublicationSchemaInfo *pubsinfo)
Definition pg_dump.c:4952
static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs, DumpableObject *boundaryObjs)
Definition pg_dump.c:20399
void getPublicationNamespaces(Archive *fout)
Definition pg_dump.c:4745
static void dumpSearchPath(Archive *AH)
Definition pg_dump.c:3884
static int ncomments
Definition pg_dump.c:203
static void selectDumpableTable(TableInfo *tbinfo, Archive *fout)
Definition pg_dump.c:2081
static DumpableObject * createBoundaryObjects(void)
Definition pg_dump.c:20375
static char * convertTSFunction(Archive *fout, Oid funcOid)
Definition pg_dump.c:14413
static void dumpDatabase(Archive *fout)
Definition pg_dump.c:3282
static SimpleStringList table_include_patterns
Definition pg_dump.c:172
static void append_depends_on_extension(Archive *fout, PQExpBuffer create, const DumpableObject *dobj, const char *catalog, const char *keyword, const char *objname)
Definition pg_dump.c:5639
static Oid get_next_possible_free_pg_type_oid(Archive *fout, PQExpBuffer upgrade_query)
Definition pg_dump.c:5684
static void dumpNamespace(Archive *fout, const NamespaceInfo *nspinfo)
Definition pg_dump.c:11875
static bool forcePartitionRootLoad(const TableInfo *tbinfo)
Definition pg_dump.c:2842
static void dumpCast(Archive *fout, const CastInfo *cast)
Definition pg_dump.c:13889
static SimpleOidList schema_exclude_oids
Definition pg_dump.c:170
static bool have_extra_float_digits
Definition pg_dump.c:194
static void dumpIndex(Archive *fout, const IndxInfo *indxinfo)
Definition pg_dump.c:18286
void getPartitioningInfo(Archive *fout)
Definition pg_dump.c:7786
static int nbinaryUpgradeClassOids
Definition pg_dump.c:211
static void dumpBaseType(Archive *fout, const TypeInfo *tyinfo)
Definition pg_dump.c:12468
OidOptions
Definition pg_dump.c:145
@ zeroIsError
Definition pg_dump.c:146
@ zeroAsStar
Definition pg_dump.c:147
@ zeroAsNone
Definition pg_dump.c:148
static char * dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
Definition pg_dump.c:11119
static SimpleOidList extension_include_oids
Definition pg_dump.c:186
static void dumpTSDictionary(Archive *fout, const TSDictInfo *dictinfo)
Definition pg_dump.c:15812
static void dumpAgg(Archive *fout, const AggInfo *agginfo)
Definition pg_dump.c:15388
static int extra_float_digits
Definition pg_dump.c:195
static int SequenceItemCmp(const void *p1, const void *p2)
Definition pg_dump.c:19145
static void dumpRelationStats(Archive *fout, const RelStatsInfo *rsinfo)
Definition pg_dump.c:11392
static void dumpTableComment(Archive *fout, const TableInfo *tbinfo, const char *reltypename)
Definition pg_dump.c:11418
static SimpleStringList extension_include_patterns
Definition pg_dump.c:185
static void selectDumpableNamespace(NamespaceInfo *nsinfo, Archive *fout)
Definition pg_dump.c:1995
InhInfo * getInherits(Archive *fout, int *numInherits)
Definition pg_dump.c:7730
void getForeignDataWrappers(Archive *fout)
Definition pg_dump.c:10428
static void dumpTrigger(Archive *fout, const TriggerInfo *tginfo)
Definition pg_dump.c:19565
static void binary_upgrade_set_type_oids_by_rel(Archive *fout, PQExpBuffer upgrade_buffer, const TableInfo *tbinfo)
Definition pg_dump.c:5795
static void dumpTable(Archive *fout, const TableInfo *tbinfo)
Definition pg_dump.c:16881
static SimpleOidList extension_exclude_oids
Definition pg_dump.c:189
static SimpleStringList table_exclude_patterns
Definition pg_dump.c:175
static PQExpBuffer createViewAsClause(Archive *fout, const TableInfo *tbinfo)
Definition pg_dump.c:17021
static void dumpStatisticsExt(Archive *fout, const StatsExtInfo *statsextinfo)
Definition pg_dump.c:18480
void getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
Definition pg_dump.c:4236
static void dumpRangeType(Archive *fout, const TypeInfo *tyinfo)
Definition pg_dump.c:12246
void getExtensionMembership(Archive *fout, ExtensionInfo extinfo[], int numExtensions)
Definition pg_dump.c:19948
static void dumpComment(Archive *fout, const char *type, const char *name, const char *namespace, const char *owner, CatalogId catalogId, int subid, DumpId dumpId)
Definition pg_dump.c:10997
static char * getFormattedOperatorName(const char *oproid)
Definition pg_dump.c:14383
static char * format_function_signature(Archive *fout, const FuncInfo *finfo, bool honor_quotes)
Definition pg_dump.c:13438
static int nseclabels
Definition pg_dump.c:207
static pg_compress_algorithm compression_algorithm
Definition pg_dump.c:159
static void dumpStdStrings(Archive *AH)
Definition pg_dump.c:3860
static void dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
Definition pg_dump.c:18785
static void dumpType(Archive *fout, const TypeInfo *tyinfo)
Definition pg_dump.c:12075
static void dumpTableAttach(Archive *fout, const TableAttachInfo *attachinfo)
Definition pg_dump.c:18128
void getTypes(Archive *fout)
Definition pg_dump.c:6201
static void dumpAccessMethod(Archive *fout, const AccessMethodInfo *aminfo)
Definition pg_dump.c:14435
static void dumpOpr(Archive *fout, const OprInfo *oprinfo)
Definition pg_dump.c:14123
static void selectDumpableStatisticsObject(StatsExtInfo *sobj, Archive *fout)
Definition pg_dump.c:2337
static void selectDumpablePublicationObject(DumpableObject *dobj, Archive *fout)
Definition pg_dump.c:2319
static void dumpSequenceData(Archive *fout, const TableDataInfo *tdinfo)
Definition pg_dump.c:19476
static void dumpFunc(Archive *fout, const FuncInfo *finfo)
Definition pg_dump.c:13467
static void selectDumpableDefaultACL(DefaultACLInfo *dinfo, DumpOptions *dopt)
Definition pg_dump.c:2165
static void BuildArchiveDependencies(Archive *fout)
Definition pg_dump.c:20525
static RelStatsInfo * getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages, char *reltuples, int32 relallvisible, int32 relallfrozen, char relkind, char **indAttNames, int nindAttNames)
Definition pg_dump.c:7111
static const char *const SeqTypeNames[]
Definition pg_dump.c:119
void getOwnedSeqs(Archive *fout, TableInfo tblinfo[], int numTables)
Definition pg_dump.c:7665
static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo)
Definition pg_dump.c:3039
static int nsequences
Definition pg_dump.c:215
static const char * getAttrName(int attrnum, const TableInfo *tblInfo)
Definition pg_dump.c:18257
static void dumpForeignServer(Archive *fout, const ForeignServerInfo *srvinfo)
Definition pg_dump.c:16140
static RoleNameItem * rolenames
Definition pg_dump.c:198
static void collectRoleNames(Archive *fout)
Definition pg_dump.c:10733
static PGresult * fetchAttributeStats(Archive *fout)
Definition pg_dump.c:11031
static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions, const char *prefix, Archive *fout)
Definition pg_dump.c:20724
void getOpclasses(Archive *fout)
Definition pg_dump.c:6647
void getForeignServers(Archive *fout)
Definition pg_dump.c:10512
void getFuncs(Archive *fout)
Definition pg_dump.c:6916
static void dumpTableData(Archive *fout, const TableDataInfo *tdinfo)
Definition pg_dump.c:2870
static void prohibit_crossdb_refs(PGconn *conn, const char *dbname, const char *pattern)
Definition pg_dump.c:1920
static bool dosync
Definition pg_dump.c:152
static int dumpTableData_copy(Archive *fout, const void *dcontext)
Definition pg_dump.c:2377
#define MAX_BLOBS_PER_ARCHIVE_ENTRY
Definition pg_dump.c:231
static const char * getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts)
Definition pg_dump.c:20627
static void getDependencies(Archive *fout)
Definition pg_dump.c:20222
static void buildMatViewRefreshDependencies(Archive *fout)
Definition pg_dump.c:3126
void getTSDictionaries(Archive *fout)
Definition pg_dump.c:10244
static void binary_upgrade_set_type_oids_by_type_oid(Archive *fout, PQExpBuffer upgrade_buffer, Oid pg_type_oid, bool force_array_type, bool include_multirange_type)
Definition pg_dump.c:5715
#define DUMP_DEFAULT_ROWS_PER_INSERT
Definition pg_dump.c:224
void getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables)
Definition pg_dump.c:4825
static SeqType parse_sequence_type(const char *name)
Definition pg_dump.c:19129
static const char * getRoleName(const char *roleoid_str)
Definition pg_dump.c:10697
static void dumpShellType(Archive *fout, const ShellTypeInfo *stinfo)
Definition pg_dump.c:13237
static SequenceItem * sequences
Definition pg_dump.c:214
static void refreshMatViewData(Archive *fout, const TableDataInfo *tdinfo)
Definition pg_dump.c:2985
static int findComments(Oid classoid, Oid objoid, CommentItem **items)
Definition pg_dump.c:11516
static SimpleStringList foreign_servers_include_patterns
Definition pg_dump.c:182
static void selectDumpableCast(CastInfo *cast, Archive *fout)
Definition pg_dump.c:2187
void getCasts(Archive *fout)
Definition pg_dump.c:9046
static void dumpPublication(Archive *fout, const PublicationInfo *pubinfo)
Definition pg_dump.c:4642
static void dumpPolicy(Archive *fout, const PolicyInfo *polinfo)
Definition pg_dump.c:4408
void getIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
Definition pg_dump.c:7846
static void setupDumpWorker(Archive *AH)
Definition pg_dump.c:1593
static void addConstrChildIdxDeps(DumpableObject *dobj, const IndxInfo *refidx)
Definition pg_dump.c:8456
void getTSConfigurations(Archive *fout)
Definition pg_dump.c:10369
static int nrolenames
Definition pg_dump.c:199
static int findSecLabels(Oid classoid, Oid objoid, SecLabelItem **items)
Definition pg_dump.c:16716
static SimpleStringList table_include_patterns_and_children
Definition pg_dump.c:173
static char * convertRegProcReference(const char *proc)
Definition pg_dump.c:14342
static void getAdditionalACLs(Archive *fout)
Definition pg_dump.c:10768
static void getTableDataFKConstraints(void)
Definition pg_dump.c:3241
static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind)
Definition pg_dump.c:3020
static SimpleOidList table_exclude_oids
Definition pg_dump.c:177
SeqType
Definition pg_dump.c:113
@ SEQTYPE_BIGINT
Definition pg_dump.c:116
@ SEQTYPE_INTEGER
Definition pg_dump.c:115
@ SEQTYPE_SMALLINT
Definition pg_dump.c:114
void getAccessMethods(Archive *fout)
Definition pg_dump.c:6573
void getConversions(Archive *fout)
Definition pg_dump.c:6511
void getRules(Archive *fout)
Definition pg_dump.c:8591
static void dumpDomain(Archive *fout, const TypeInfo *tyinfo)
Definition pg_dump.c:12717
void getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
Definition pg_dump.c:9240
static void collectComments(Archive *fout)
Definition pg_dump.c:11593
static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo)
Definition pg_dump.c:8479
static void dumpOpfamily(Archive *fout, const OpfamilyInfo *opfinfo)
Definition pg_dump.c:14784
static void dumpDefaultACL(Archive *fout, const DefaultACLInfo *daclinfo)
Definition pg_dump.c:16334
static void selectDumpableObject(DumpableObject *dobj, Archive *fout)
Definition pg_dump.c:2355
static void dumpTSTemplate(Archive *fout, const TSTemplateInfo *tmplinfo)
Definition pg_dump.c:15892
static void dumpIndexAttach(Archive *fout, const IndexAttachInfo *attachinfo)
Definition pg_dump.c:18437
static void dumpSubscriptionTable(Archive *fout, const SubRelInfo *subrinfo)
Definition pg_dump.c:5413
void getCollations(Archive *fout)
Definition pg_dump.c:6445
static char * format_function_arguments(const FuncInfo *finfo, const char *funcargs, bool is_agg)
Definition pg_dump.c:13415
static int strict_names
Definition pg_dump.c:157
static void dumpTransform(Archive *fout, const TransformInfo *transform)
Definition pg_dump.c:13994
void getAggregates(Archive *fout)
Definition pg_dump.c:6775
static void dumpLO(Archive *fout, const LoInfo *loinfo)
Definition pg_dump.c:4100
void getNamespaces(Archive *fout)
Definition pg_dump.c:5994
static void dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo)
Definition pg_dump.c:4995
void getPublications(Archive *fout)
Definition pg_dump.c:4526
static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer, const DumpableObject *dobj, const char *objtype, const char *objname, const char *objnamespace)
Definition pg_dump.c:5950
static void dumpDumpableObject(Archive *fout, DumpableObject *dobj)
Definition pg_dump.c:11678
static void getLOs(Archive *fout)
Definition pg_dump.c:3946
static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf, const char *dbname, Oid dboid)
Definition pg_dump.c:3791
void getTSParsers(Archive *fout)
Definition pg_dump.c:10170
static void setup_connection(Archive *AH, const char *dumpencoding, const char *dumpsnapshot, char *use_role)
Definition pg_dump.c:1412
static void dumpTableConstraintComment(Archive *fout, const ConstraintInfo *coninfo)
Definition pg_dump.c:19106
static void dumpUndefinedType(Archive *fout, const TypeInfo *tyinfo)
Definition pg_dump.c:12404
static void selectDumpableAccessMethod(AccessMethodInfo *method, Archive *fout)
Definition pg_dump.c:2245
static const char * fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer)
Definition pg_dump.c:20678
static void expand_table_name_patterns(Archive *fout, SimpleStringList *patterns, SimpleOidList *oids, bool strict_names, bool with_child_tables)
Definition pg_dump.c:1824
static void findDumpableDependencies(ArchiveHandle *AH, const DumpableObject *dobj, DumpId **dependencies, int *nDeps, int *allocDeps)
Definition pg_dump.c:20572
static void determineNotNullFlags(Archive *fout, PGresult *res, int r, TableInfo *tbinfo, int j, int i_notnull_name, int i_notnull_comment, int i_notnull_invalidoid, int i_notnull_noinherit, int i_notnull_islocal, PQExpBuffer *invalidnotnulloids)
Definition pg_dump.c:10028
static void dumpTableSchema(Archive *fout, const TableInfo *tbinfo)
Definition pg_dump.c:17110
static void dumpTSParser(Archive *fout, const TSParserInfo *prsinfo)
Definition pg_dump.c:15748
static void expand_foreign_server_name_patterns(Archive *fout, SimpleStringList *patterns, SimpleOidList *oids)
Definition pg_dump.c:1772
TableInfo * getTables(Archive *fout, int *numTables)
Definition pg_dump.c:7188
static void dumpRule(Archive *fout, const RuleInfo *rinfo)
Definition pg_dump.c:19781
static void dumpCompositeType(Archive *fout, const TypeInfo *tyinfo)
Definition pg_dump.c:12942
static void dumpEnumType(Archive *fout, const TypeInfo *tyinfo)
Definition pg_dump.c:12106
static void dumpExtension(Archive *fout, const ExtensionInfo *extinfo)
Definition pg_dump.c:11952
#define fmtQualifiedDumpable(obj)
Definition pg_dump.c:236
static bool nonemptyReloptions(const char *reloptions)
Definition pg_dump.c:20712
static SimpleStringList extension_exclude_patterns
Definition pg_dump.c:188
static BinaryUpgradeClassOidItem * binaryUpgradeClassOids
Definition pg_dump.c:210
static SimpleOidList table_include_oids
Definition pg_dump.c:174
static void dumpStatisticsExtStats(Archive *fout, const StatsExtInfo *statsextinfo)
Definition pg_dump.c:18557
void getExtendedStatistics(Archive *fout)
Definition pg_dump.c:8215
static NamespaceInfo * findNamespace(Oid nsoid)
Definition pg_dump.c:6108
static char * get_synchronized_snapshot(Archive *fout)
Definition pg_dump.c:1608
static int dumpLOs(Archive *fout, const void *arg)
Definition pg_dump.c:4190
static void dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo)
Definition pg_dump.c:5482
static void appendNamedArgument(PQExpBuffer out, Archive *fout, const char *argname, const char *argtype, const char *argval)
Definition pg_dump.c:11013
void processExtensionTables(Archive *fout, ExtensionInfo extinfo[], int numExtensions)
Definition pg_dump.c:20041
static void dumpEventTrigger(Archive *fout, const EventTriggerInfo *evtinfo)
Definition pg_dump.c:19691
static int BinaryUpgradeClassOidItemCmp(const void *p1, const void *p2)
Definition pg_dump.c:5810
static void dumpCommentExtended(Archive *fout, const char *type, const char *name, const char *namespace, const char *owner, CatalogId catalogId, int subid, DumpId dumpId, const char *initdb_comment)
Definition pg_dump.c:10897
void getDefaultACLs(Archive *fout)
Definition pg_dump.c:10600
static SimpleStringList tabledata_exclude_patterns
Definition pg_dump.c:178
static void dumpConversion(Archive *fout, const ConvInfo *convinfo)
Definition pg_dump.c:15260
static void dumpForeignDataWrapper(Archive *fout, const FdwInfo *fdwinfo)
Definition pg_dump.c:16070
static void dumpProcLang(Archive *fout, const ProcLangInfo *plang)
Definition pg_dump.c:13283
static void dumpSecLabel(Archive *fout, const char *type, const char *name, const char *namespace, const char *owner, CatalogId catalogId, int subid, DumpId dumpId)
Definition pg_dump.c:16554
void getSubscriptions(Archive *fout)
Definition pg_dump.c:5096
static void collectSecLabels(Archive *fout)
Definition pg_dump.c:16795
static void selectDumpableExtension(ExtensionInfo *extinfo, DumpOptions *dopt)
Definition pg_dump.c:2280
static void collectSequences(Archive *fout)
Definition pg_dump.c:19160
static Oid g_last_builtin_oid
Definition pg_dump.c:154
#define MAX_ATTR_STATS_RELS
Definition pg_dump.c:218
void getTriggers(Archive *fout, TableInfo tblinfo[], int numTables)
Definition pg_dump.c:8688
void getTransforms(Archive *fout)
Definition pg_dump.c:9156
void getEventTriggers(Archive *fout)
Definition pg_dump.c:8884
static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode)
Definition pg_dump.c:1622
static void read_dump_filters(const char *filename, DumpOptions *dopt)
Definition pg_dump.c:20743
static void dumpTSConfig(Archive *fout, const TSConfigInfo *cfginfo)
Definition pg_dump.c:15950
static SecLabelItem * seclabels
Definition pg_dump.c:206
static SimpleStringList tabledata_exclude_patterns_and_children
Definition pg_dump.c:179
static char * get_language_name(Archive *fout, Oid langid)
Definition pg_dump.c:9135
static bool checkExtensionMembership(DumpableObject *dobj, Archive *fout)
Definition pg_dump.c:1945
static CommentItem * comments
Definition pg_dump.c:202
static int dumpTableData_insert(Archive *fout, const void *dcontext)
Definition pg_dump.c:2548
static SimpleOidList tabledata_exclude_oids
Definition pg_dump.c:180
static SimpleStringList table_exclude_patterns_and_children
Definition pg_dump.c:176
static void binary_upgrade_set_pg_class_oids(Archive *fout, PQExpBuffer upgrade_buffer, Oid pg_class_oid)
Definition pg_dump.c:5860
void getTSTemplates(Archive *fout)
Definition pg_dump.c:10310
static void set_restrict_relation_kind(Archive *AH, const char *value)
Definition pg_dump.c:5075
static char * format_aggregate_signature(const AggInfo *agginfo, Archive *fout, bool honor_quotes)
Definition pg_dump.c:15356
void getProcLangs(Archive *fout)
Definition pg_dump.c:8962
static void dumpSequence(Archive *fout, const TableInfo *tbinfo)
Definition pg_dump.c:19222
bool shouldPrintColumn(const DumpOptions *dopt, const TableInfo *tbinfo, int colno)
Definition pg_dump.c:10155
static TableInfo * getRootTableInfo(const TableInfo *tbinfo)
Definition pg_dump.c:2817
void getSubscriptionRelations(Archive *fout)
Definition pg_dump.c:5327
void getOperators(Archive *fout)
Definition pg_dump.c:6369
static SimpleOidList foreign_servers_include_oids
Definition pg_dump.c:183
static void dumpCollation(Archive *fout, const CollInfo *collinfo)
Definition pg_dump.c:15003
static void dumpTableSecLabel(Archive *fout, const TableInfo *tbinfo, const char *reltypename)
Definition pg_dump.c:16634
static void dumpCompositeTypeColComments(Archive *fout, const TypeInfo *tyinfo, PGresult *res)
Definition pg_dump.c:13148
static void expand_extension_name_patterns(Archive *fout, SimpleStringList *patterns, SimpleOidList *oids, bool strict_names)
Definition pg_dump.c:1719
void getOpfamilies(Archive *fout)
Definition pg_dump.c:6710
static void selectDumpableType(TypeInfo *tyinfo, Archive *fout)
Definition pg_dump.c:2120
static SimpleOidList schema_include_oids
Definition pg_dump.c:168
static void dumpOpclass(Archive *fout, const OpclassInfo *opcinfo)
Definition pg_dump.c:14503
static SimpleStringList schema_exclude_patterns
Definition pg_dump.c:169
#define DUMP_COMPONENT_COMMENT
Definition pg_dump.h:111
#define DUMP_COMPONENT_DATA
Definition pg_dump.h:110
#define DUMP_COMPONENT_USERMAP
Definition pg_dump.h:115
#define DUMP_COMPONENT_POLICY
Definition pg_dump.h:114
#define DUMP_COMPONENT_SECLABEL
Definition pg_dump.h:112
#define DUMP_COMPONENT_ALL
Definition pg_dump.h:117
#define DUMP_COMPONENT_ACL
Definition pg_dump.h:113
#define DUMP_COMPONENT_NONE
Definition pg_dump.h:108
#define DUMP_COMPONENTS_REQUIRING_LOCK
Definition pg_dump.h:141
void sortDumpableObjects(DumpableObject **objs, int numObjs, DumpId preBoundaryId, DumpId postBoundaryId)
#define DUMP_COMPONENT_DEFINITION
Definition pg_dump.h:109
@ DO_EVENT_TRIGGER
Definition pg_dump.h:80
@ DO_REFRESH_MATVIEW
Definition pg_dump.h:81
@ DO_POLICY
Definition pg_dump.h:82
@ DO_CAST
Definition pg_dump.h:64
@ DO_FOREIGN_SERVER
Definition pg_dump.h:73
@ DO_PRE_DATA_BOUNDARY
Definition pg_dump.h:78
@ DO_PROCLANG
Definition pg_dump.h:63
@ DO_TYPE
Definition pg_dump.h:43
@ DO_INDEX
Definition pg_dump.h:56
@ DO_COLLATION
Definition pg_dump.h:51
@ DO_LARGE_OBJECT
Definition pg_dump.h:76
@ DO_TSCONFIG
Definition pg_dump.h:71
@ DO_OPERATOR
Definition pg_dump.h:47
@ DO_FK_CONSTRAINT
Definition pg_dump.h:62
@ DO_CONSTRAINT
Definition pg_dump.h:61
@ DO_SUBSCRIPTION
Definition pg_dump.h:87
@ DO_DEFAULT_ACL
Definition pg_dump.h:74
@ DO_FDW
Definition pg_dump.h:72
@ DO_SUBSCRIPTION_REL
Definition pg_dump.h:88
@ DO_REL_STATS
Definition pg_dump.h:86
@ DO_SEQUENCE_SET
Definition pg_dump.h:66
@ DO_ATTRDEF
Definition pg_dump.h:55
@ DO_PUBLICATION_REL
Definition pg_dump.h:84
@ DO_TABLE_ATTACH
Definition pg_dump.h:54
@ DO_OPCLASS
Definition pg_dump.h:49
@ DO_INDEX_ATTACH
Definition pg_dump.h:57
@ DO_TSTEMPLATE
Definition pg_dump.h:70
@ DO_STATSEXT
Definition pg_dump.h:58
@ DO_FUNC
Definition pg_dump.h:45
@ DO_POST_DATA_BOUNDARY
Definition pg_dump.h:79
@ DO_LARGE_OBJECT_DATA
Definition pg_dump.h:77
@ DO_OPFAMILY
Definition pg_dump.h:50
@ DO_TRANSFORM
Definition pg_dump.h:75
@ DO_ACCESS_METHOD
Definition pg_dump.h:48
@ DO_PUBLICATION_TABLE_IN_SCHEMA
Definition pg_dump.h:85
@ DO_CONVERSION
Definition pg_dump.h:52
@ DO_TRIGGER
Definition pg_dump.h:60
@ DO_RULE
Definition pg_dump.h:59
@ DO_DUMMY_TYPE
Definition pg_dump.h:67
@ DO_TSDICT
Definition pg_dump.h:69
@ DO_TSPARSER
Definition pg_dump.h:68
@ DO_EXTENSION
Definition pg_dump.h:42
@ DO_TABLE_DATA
Definition pg_dump.h:65
@ DO_PUBLICATION
Definition pg_dump.h:83
@ DO_TABLE
Definition pg_dump.h:53
@ DO_NAMESPACE
Definition pg_dump.h:41
@ DO_AGG
Definition pg_dump.h:46
@ DO_SHELL_TYPE
Definition pg_dump.h:44
void sortDumpableObjectsByTypeName(DumpableObject **objs, int numObjs)
#define DUMP_COMPONENT_STATISTICS
Definition pg_dump.h:116
static int statistics_only
Definition pg_dumpall.c:124
static Archive * fout
Definition pg_dumpall.c:138
static int no_statistics
Definition pg_dumpall.c:115
static int no_data
Definition pg_dumpall.c:113
static int no_schema
Definition pg_dumpall.c:114
static char * filename
Definition pg_dumpall.c:132
static int with_statistics
Definition pg_dumpall.c:120
PGDLLIMPORT int optind
Definition getopt.c:51
PGDLLIMPORT char * optarg
Definition getopt.c:53
NameData subname
static char buf[DEFAULT_XLOG_SEG_SIZE]
char typalign
Definition pg_type.h:178
#define pg_encoding_to_char
Definition pg_wchar.h:630
static char * tablespace
Definition pgbench.c:217
#define pg_log_warning(...)
Definition pgfnames.c:24
int pg_strcasecmp(const char *s1, const char *s2)
#define snprintf
Definition port.h:260
const char * get_progname(const char *argv0)
Definition path.c:652
#define printf(...)
Definition port.h:266
off_t pgoff_t
Definition port.h:421
#define InvalidOid
unsigned int Oid
#define atooid(x)
void printfPQExpBuffer(PQExpBuffer str, const char *fmt,...)
PQExpBuffer createPQExpBuffer(void)
Definition pqexpbuffer.c:72
void initPQExpBuffer(PQExpBuffer str)
Definition pqexpbuffer.c:90
void resetPQExpBuffer(PQExpBuffer str)
void appendPQExpBuffer(PQExpBuffer str, const char *fmt,...)
void appendBinaryPQExpBuffer(PQExpBuffer str, const char *data, size_t datalen)
void destroyPQExpBuffer(PQExpBuffer str)
void appendPQExpBufferChar(PQExpBuffer str, char ch)
void appendPQExpBufferStr(PQExpBuffer str, const char *data)
void termPQExpBuffer(PQExpBuffer str)
char * c
static int fb(int x)
char * psprintf(const char *fmt,...)
Definition psprintf.c:43
Oid RelFileNumber
Definition relpath.h:25
#define RelFileNumberIsValid(relnumber)
Definition relpath.h:27
bool quote_all_identifiers
Definition ruleutils.c:339
void simple_string_list_append(SimpleStringList *list, const char *val)
Definition simple_list.c:63
bool simple_oid_list_member(SimpleOidList *list, Oid val)
Definition simple_list.c:45
void simple_oid_list_append(SimpleOidList *list, Oid val)
Definition simple_list.c:26
#define free(a)
#define PG_DEPENDENCIES_KEY_ATTRIBUTES
#define PG_DEPENDENCIES_KEY_DEGREE
#define PG_DEPENDENCIES_KEY_DEPENDENCY
#define PG_NDISTINCT_KEY_ATTRIBUTES
#define PG_NDISTINCT_KEY_NDISTINCT
PGconn * GetConnection(void)
Definition streamutil.c:60
char * dbname
Definition streamutil.c:49
PGconn * conn
Definition streamutil.c:52
const char * fmtId(const char *rawid)
void setFmtEncoding(int encoding)
void appendStringLiteralConn(PQExpBuffer buf, const char *str, PGconn *conn)
void appendPGArray(PQExpBuffer buffer, const char *value)
bool processSQLNamePattern(PGconn *conn, PQExpBuffer buf, const char *pattern, bool have_where, bool force_escape, const char *schemavar, const char *namevar, const char *altnamevar, const char *visibilityrule, PQExpBuffer dbnamebuf, int *dotcnt)
bool parsePGArray(const char *atext, char ***itemarray, int *nitems)
bool appendReloptionsArray(PQExpBuffer buffer, const char *reloptions, const char *prefix, int encoding, bool std_strings)
void appendStringLiteralDQ(PQExpBuffer buf, const char *str, const char *dqprefix)
int minRemoteVersion
Definition pg_backup.h:237
int remoteVersion
Definition pg_backup.h:234
DumpOptions * dopt
Definition pg_backup.h:229
bool * is_prepared
Definition pg_backup.h:256
char * searchpath
Definition pg_backup.h:248
bool isStandby
Definition pg_backup.h:235
int maxRemoteVersion
Definition pg_backup.h:238
bool std_strings
Definition pg_backup.h:245
int numWorkers
Definition pg_backup.h:240
int encoding
Definition pg_backup.h:244
char * use_role
Definition pg_backup.h:249
char * sync_snapshot_id
Definition pg_backup.h:241
int verbose
Definition pg_backup.h:232
RelFileNumber toast_index_relfilenumber
Definition pg_dump.c:108
RelFileNumber toast_relfilenumber
Definition pg_dump.c:106
RelFileNumber relfilenumber
Definition pg_dump.c:104
Oid tableoid
Definition pg_backup.h:281
Oid classoid
Definition pg_dump.c:86
Oid objoid
Definition pg_dump.c:87
int objsubid
Definition pg_dump.c:88
const char * descr
Definition pg_dump.c:85
const char * rolename
Definition pg_dump.c:80
Oid roleoid
Definition pg_dump.c:79
const char * provider
Definition pg_dump.c:93
Oid classoid
Definition pg_dump.c:95
int objsubid
Definition pg_dump.c:97
const char * label
Definition pg_dump.c:94
int64 minv
Definition pg_dump.c:134
int64 cache
Definition pg_dump.c:138
int64 startv
Definition pg_dump.c:136
int64 maxv
Definition pg_dump.c:135
bool is_called
Definition pg_dump.c:140
int64 incby
Definition pg_dump.c:137
int64 last_value
Definition pg_dump.c:139
SeqType seqtype
Definition pg_dump.c:132
bool cycled
Definition pg_dump.c:133
bool null_seqtuple
Definition pg_dump.c:141
SimpleOidListCell * head
Definition simple_list.h:28
struct SimplePtrListCell * next
Definition simple_list.h:48
char val[FLEXIBLE_ARRAY_MEMBER]
Definition simple_list.h:37
struct SimpleStringListCell * next
Definition simple_list.h:34
SimpleStringListCell * head
Definition simple_list.h:42
char * suboriginremotelsn
Definition pg_dump.h:728
bool subpasswordrequired
Definition pg_dump.h:717
const char * rolname
Definition pg_dump.h:711
char * subsynccommit
Definition pg_dump.h:724
char * subpublications
Definition pg_dump.h:726
char * subwalrcvtimeout
Definition pg_dump.h:725
char * subslotname
Definition pg_dump.h:723
char subtwophasestate
Definition pg_dump.h:715
bool subretaindeadtuples
Definition pg_dump.h:720
char * subconninfo
Definition pg_dump.h:722
DumpableObject dobj
Definition pg_dump.h:710
DumpableObject dobj
Definition pg_dump.h:270
ArchiveFormat format
struct _tocEntry * toc
DumpableObject dobj
Definition pg_dump.h:404
char * adef_expr
Definition pg_dump.h:407
TableInfo * adtable
Definition pg_dump.h:405
bool separate
Definition pg_dump.h:408
char * pgport
Definition pg_backup.h:88
char * pghost
Definition pg_backup.h:89
trivalue promptPassword
Definition pg_backup.h:91
char * username
Definition pg_backup.h:90
char * dbname
Definition pg_backup.h:87
TypeInfo * condomain
Definition pg_dump.h:519
TableInfo * contable
Definition pg_dump.h:518
DumpableObject dobj
Definition pg_dump.h:517
DumpId conindex
Definition pg_dump.h:523
bool condeferrable
Definition pg_dump.h:524
char * condef
Definition pg_dump.h:521
int no_toast_compression
Definition pg_backup.h:192
char * restrict_key
Definition pg_backup.h:220
int column_inserts
Definition pg_backup.h:185
bool dontOutputLOs
Definition pg_backup.h:208
int use_setsessauth
Definition pg_backup.h:198
int outputCreateDB
Definition pg_backup.h:206
bool include_everything
Definition pg_backup.h:203
int sequence_data
Definition pg_backup.h:212
int disable_dollar_quoting
Definition pg_backup.h:184
bool dumpSchema
Definition pg_backup.h:216
int serializable_deferrable
Definition pg_backup.h:194
int outputNoTableAm
Definition pg_backup.h:196
int enable_row_security
Definition pg_backup.h:199
char * outputSuperuser
Definition pg_backup.h:210
int no_security_labels
Definition pg_backup.h:190
int no_unlogged_table_data
Definition pg_backup.h:193
bool dumpStatistics
Definition pg_backup.h:218
int no_publications
Definition pg_backup.h:189
ConnParams cparams
Definition pg_backup.h:173
const char * lockWaitTimeout
Definition pg_backup.h:180
int no_subscriptions
Definition pg_backup.h:191
int load_via_partition_root
Definition pg_backup.h:200
int outputNoTablespaces
Definition pg_backup.h:197
int disable_triggers
Definition pg_backup.h:195
int outputNoOwner
Definition pg_backup.h:209
int binary_upgrade
Definition pg_backup.h:175
char privtype
Definition pg_dump.h:173
char * acldefault
Definition pg_dump.h:171
char * acl
Definition pg_dump.h:170
char * initprivs
Definition pg_dump.h:174
DumpComponents dump
Definition pg_dump.h:153
DumpId * dependencies
Definition pg_dump.h:159
DumpId dumpId
Definition pg_dump.h:151
DumpComponents components
Definition pg_dump.h:156
DumpableObjectType objType
Definition pg_dump.h:149
CatalogId catId
Definition pg_dump.h:150
DumpComponents dump_contains
Definition pg_dump.h:155
bool depends_on_ext
Definition pg_dump.h:158
DumpableObject dobj
Definition pg_dump.h:195
char * extconfig
Definition pg_dump.h:199
bool postponed_def
Definition pg_dump.h:248
Oid lang
Definition pg_dump.h:244
const char * rolname
Definition pg_dump.h:243
Oid * argtypes
Definition pg_dump.h:246
Oid prorettype
Definition pg_dump.h:247
DumpableObject dobj
Definition pg_dump.h:241
int nargs
Definition pg_dump.h:245
DumpableAcl dacl
Definition pg_dump.h:242
const char * rolname
Definition pg_dump.h:190
int32 nindAttNames
Definition pg_dump.h:462
char ** indAttNames
Definition pg_dump.h:461
int32 relpages
Definition pg_dump.h:451
int32 relallfrozen
Definition pg_dump.h:454
char * reltuples
Definition pg_dump.h:452
teSection section
Definition pg_dump.h:463
int32 relallvisible
Definition pg_dump.h:453
DumpableObject dobj
Definition pg_dump.h:450
int suppressDumpWarnings
Definition pg_backup.h:152
ConnParams cparams
Definition pg_backup.h:146
pg_compress_specification compression_spec
Definition pg_backup.h:150
int disable_dollar_quoting
Definition pg_backup.h:110
char * restrict_key
Definition pg_backup.h:168
const char * filename
Definition pg_backup.h:121
const char * lockWaitTimeout
Definition pg_backup.h:125
int enable_row_security
Definition pg_backup.h:159
DumpableObject dobj
Definition pg_dump.h:476
bool separate
Definition pg_dump.h:481
char ev_enabled
Definition pg_dump.h:480
bool is_instead
Definition pg_dump.h:479
TableInfo * ruletable
Definition pg_dump.h:477
char ev_type
Definition pg_dump.h:478
DumpableObject dobj
Definition pg_dump.h:413
char * attidentity
Definition pg_dump.h:361
char * reltablespace
Definition pg_dump.h:314
struct _relStatsInfo * stats
Definition pg_dump.h:381
int ncheck
Definition pg_dump.h:330
bool ispartition
Definition pg_dump.h:344
DumpableObject dobj
Definition pg_dump.h:307
bool is_identity_sequence
Definition pg_dump.h:337
Oid reloftype
Definition pg_dump.h:332
bool interesting
Definition pg_dump.h:341
char * toast_reloptions
Definition pg_dump.h:317
struct _tableInfo ** parents
Definition pg_dump.h:348
DumpableAcl dacl
Definition pg_dump.h:308
bool relispopulated
Definition pg_dump.h:312
Oid reltype
Definition pg_dump.h:331
bool hasoids
Definition pg_dump.h:324
Oid toast_oid
Definition pg_dump.h:327
Oid foreign_server
Definition pg_dump.h:333
bool hasrules
Definition pg_dump.h:319
uint32 frozenxid
Definition pg_dump.h:325
int owning_col
Definition pg_dump.h:336
char * checkoption
Definition pg_dump.h:316
bool hastriggers
Definition pg_dump.h:320
const char * rolname
Definition pg_dump.h:309
char relreplident
Definition pg_dump.h:313
uint32 minmxid
Definition pg_dump.h:326
int toastpages
Definition pg_dump.h:339
Oid owning_tab
Definition pg_dump.h:335
struct _tableDataInfo * dataObj
Definition pg_dump.h:390
char * amname
Definition pg_dump.h:383
bool dummy_view
Definition pg_dump.h:342
int32 relpages
Definition pg_dump.h:338
bool forcerowsec
Definition pg_dump.h:323
bool hascolumnACLs
Definition pg_dump.h:321
char relpersistence
Definition pg_dump.h:311
char ** attnames
Definition pg_dump.h:355
char relkind
Definition pg_dump.h:310
bool hasindex
Definition pg_dump.h:318
char * reloptions
Definition pg_dump.h:315
uint32 toast_frozenxid
Definition pg_dump.h:328
uint32 toast_minmxid
Definition pg_dump.h:329
bool postponed_def
Definition pg_dump.h:343
bool rowsec
Definition pg_dump.h:322
struct _tocEntry * next
DumpId * dependencies
DumpableObject dobj
Definition pg_dump.h:554
DumpableObject dobj
Definition pg_dump.h:205
char data[NAMEDATALEN]
Definition c.h:773
#define MinTransactionIdAttributeNumber
Definition sysattr.h:22
#define MaxCommandIdAttributeNumber
Definition sysattr.h:25
#define MaxTransactionIdAttributeNumber
Definition sysattr.h:24
#define TableOidAttributeNumber
Definition sysattr.h:26
#define SelfItemPointerAttributeNumber
Definition sysattr.h:21
#define MinCommandIdAttributeNumber
Definition sysattr.h:23
static StringInfo copybuf
Definition tablesync.c:128
static ItemArray items
static void * fn(void *arg)
#define FirstNormalObjectId
Definition transam.h:197
@ TRI_YES
Definition vacuumlo.c:38
@ TRI_NO
Definition vacuumlo.c:37
bool SplitGUCList(char *rawstring, char separator, List **namelist)
Definition varlena.c:3023
const char * description
const char * type
const char * name
ArchiveMode
Definition xlog.h:65