PostgreSQL Source Code git master
Loading...
Searching...
No Matches
pg_dump.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * pg_dump.c
4 * pg_dump is a utility for dumping out a postgres database
5 * into a script file.
6 *
7 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
9 *
10 * pg_dump will read the system catalogs in a database and dump out a
11 * script that reproduces the schema in terms of SQL that is understood
12 * by PostgreSQL
13 *
14 * Note that pg_dump runs in a transaction-snapshot mode transaction,
15 * so it sees a consistent snapshot of the database including system
16 * catalogs. However, it relies in part on various specialized backend
17 * functions like pg_get_indexdef(), and those things tend to look at
18 * the currently committed state. So it is possible to get 'cache
19 * lookup failed' error if someone performs DDL changes while a dump is
20 * happening. The window for this sort of thing is from the acquisition
21 * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22 * AccessShareLock on every table it intends to dump). It isn't very large,
23 * but it can happen.
24 *
25 * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26 *
27 * IDENTIFICATION
28 * src/bin/pg_dump/pg_dump.c
29 *
30 *-------------------------------------------------------------------------
31 */
32#include "postgres_fe.h"
33
34#include <unistd.h>
35#include <ctype.h>
36#include <limits.h>
37#ifdef HAVE_TERMIOS_H
38#include <termios.h>
39#endif
40
41#include "access/attnum.h"
42#include "access/sysattr.h"
43#include "access/transam.h"
44#include "catalog/pg_aggregate_d.h"
45#include "catalog/pg_am_d.h"
46#include "catalog/pg_attribute_d.h"
47#include "catalog/pg_authid_d.h"
48#include "catalog/pg_cast_d.h"
49#include "catalog/pg_class_d.h"
50#include "catalog/pg_constraint_d.h"
51#include "catalog/pg_default_acl_d.h"
52#include "catalog/pg_largeobject_d.h"
53#include "catalog/pg_largeobject_metadata_d.h"
54#include "catalog/pg_proc_d.h"
55#include "catalog/pg_publication_d.h"
56#include "catalog/pg_shdepend_d.h"
57#include "catalog/pg_subscription_d.h"
58#include "catalog/pg_type_d.h"
59#include "common/connect.h"
60#include "common/int.h"
61#include "common/relpath.h"
62#include "common/shortest_dec.h"
63#include "compress_io.h"
64#include "dumputils.h"
67#include "filter.h"
68#include "getopt_long.h"
69#include "libpq/libpq-fs.h"
70#include "parallel.h"
71#include "pg_backup_db.h"
72#include "pg_backup_utils.h"
73#include "pg_dump.h"
75#include "storage/block.h"
76
77typedef struct
78{
79 Oid roleoid; /* role's OID */
80 const char *rolename; /* role's name */
82
83typedef struct
84{
85 const char *descr; /* comment for an object */
86 Oid classoid; /* object class (catalog OID) */
87 Oid objoid; /* object OID */
88 int objsubid; /* subobject (table column #) */
90
91typedef struct
92{
93 const char *provider; /* label provider of this security label */
94 const char *label; /* security label for an object */
95 Oid classoid; /* object class (catalog OID) */
96 Oid objoid; /* object OID */
97 int objsubid; /* subobject (table column #) */
99
100typedef struct
101{
102 Oid oid; /* object OID */
103 char relkind; /* object kind */
104 RelFileNumber relfilenumber; /* object filenode */
105 Oid toast_oid; /* toast table OID */
106 RelFileNumber toast_relfilenumber; /* toast table filenode */
107 Oid toast_index_oid; /* toast table index OID */
108 RelFileNumber toast_index_relfilenumber; /* toast table index filenode */
110
111/* sequence types */
118
119static const char *const SeqTypeNames[] =
120{
121 [SEQTYPE_SMALLINT] = "smallint",
122 [SEQTYPE_INTEGER] = "integer",
123 [SEQTYPE_BIGINT] = "bigint",
124};
125
127 "array length mismatch");
128
129typedef struct
130{
131 Oid oid; /* sequence OID */
132 SeqType seqtype; /* data type of sequence */
133 bool cycled; /* whether sequence cycles */
134 int64 minv; /* minimum value */
135 int64 maxv; /* maximum value */
136 int64 startv; /* start value */
137 int64 incby; /* increment value */
138 int64 cache; /* cache size */
139 int64 last_value; /* last value of sequence */
140 bool is_called; /* whether nextval advances before returning */
141 bool null_seqtuple; /* did pg_get_sequence_data return nulls? */
143
150
151/* global decls */
152static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
153
154static Oid g_last_builtin_oid; /* value of the last builtin oid */
155
156/* The specified names/patterns should to match at least one entity */
157static int strict_names = 0;
158
160
161/*
162 * Object inclusion/exclusion lists
163 *
164 * The string lists record the patterns given by command-line switches,
165 * which we then convert to lists of OIDs of matching objects.
166 */
171
181
184
187
190
191static const CatalogId nilCatalogId = {0, 0};
192
193/* override for standard extra_float_digits setting */
194static bool have_extra_float_digits = false;
196
197/* sorted table of role names */
199static int nrolenames = 0;
200
201/* sorted table of comments */
203static int ncomments = 0;
204
205/* sorted table of security labels */
207static int nseclabels = 0;
208
209/* sorted table of pg_class information for binary upgrade */
212
213/* sorted table of sequences */
215static int nsequences = 0;
216
217/* Maximum number of relations to fetch in a fetchAttributeStats() call. */
218#define MAX_ATTR_STATS_RELS 64
219
220/*
221 * The default number of rows per INSERT when
222 * --inserts is specified without --rows-per-insert
223 */
224#define DUMP_DEFAULT_ROWS_PER_INSERT 1
225
226/*
227 * Maximum number of large objects to group into a single ArchiveEntry.
228 * At some point we might want to make this user-controllable, but for now
229 * a hard-wired setting will suffice.
230 */
231#define MAX_BLOBS_PER_ARCHIVE_ENTRY 1000
232
233/*
234 * Macro for producing quoted, schema-qualified name of a dumpable object.
235 */
236#define fmtQualifiedDumpable(obj) \
237 fmtQualifiedId((obj)->dobj.namespace->dobj.name, \
238 (obj)->dobj.name)
239
240static void help(const char *progname);
241static void setup_connection(Archive *AH,
242 const char *dumpencoding, const char *dumpsnapshot,
243 char *use_role);
247 SimpleOidList *oids,
248 bool strict_names);
251 SimpleOidList *oids,
252 bool strict_names);
255 SimpleOidList *oids);
258 SimpleOidList *oids,
259 bool strict_names,
260 bool with_child_tables);
261static void prohibit_crossdb_refs(PGconn *conn, const char *dbname,
262 const char *pattern);
263
265static void dumpTableData(Archive *fout, const TableDataInfo *tdinfo);
267static const char *getRoleName(const char *roleoid_str);
268static void collectRoleNames(Archive *fout);
269static void getAdditionalACLs(Archive *fout);
270static void dumpCommentExtended(Archive *fout, const char *type,
271 const char *name, const char *namespace,
272 const char *owner, CatalogId catalogId,
273 int subid, DumpId dumpId,
274 const char *initdb_comment);
275static inline void dumpComment(Archive *fout, const char *type,
276 const char *name, const char *namespace,
277 const char *owner, CatalogId catalogId,
278 int subid, DumpId dumpId);
279static int findComments(Oid classoid, Oid objoid, CommentItem **items);
280static void collectComments(Archive *fout);
281static void dumpSecLabel(Archive *fout, const char *type, const char *name,
282 const char *namespace, const char *owner,
283 CatalogId catalogId, int subid, DumpId dumpId);
284static int findSecLabels(Oid classoid, Oid objoid, SecLabelItem **items);
285static void collectSecLabels(Archive *fout);
286static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
287static void dumpNamespace(Archive *fout, const NamespaceInfo *nspinfo);
288static void dumpExtension(Archive *fout, const ExtensionInfo *extinfo);
289static void dumpType(Archive *fout, const TypeInfo *tyinfo);
290static void dumpBaseType(Archive *fout, const TypeInfo *tyinfo);
291static void dumpEnumType(Archive *fout, const TypeInfo *tyinfo);
292static void dumpRangeType(Archive *fout, const TypeInfo *tyinfo);
293static void dumpUndefinedType(Archive *fout, const TypeInfo *tyinfo);
294static void dumpDomain(Archive *fout, const TypeInfo *tyinfo);
295static void dumpCompositeType(Archive *fout, const TypeInfo *tyinfo);
297 PGresult *res);
298static void dumpShellType(Archive *fout, const ShellTypeInfo *stinfo);
299static void dumpProcLang(Archive *fout, const ProcLangInfo *plang);
300static void dumpFunc(Archive *fout, const FuncInfo *finfo);
301static void dumpCast(Archive *fout, const CastInfo *cast);
302static void dumpTransform(Archive *fout, const TransformInfo *transform);
303static void dumpOpr(Archive *fout, const OprInfo *oprinfo);
305static void dumpOpclass(Archive *fout, const OpclassInfo *opcinfo);
306static void dumpOpfamily(Archive *fout, const OpfamilyInfo *opfinfo);
307static void dumpCollation(Archive *fout, const CollInfo *collinfo);
308static void dumpConversion(Archive *fout, const ConvInfo *convinfo);
309static void dumpRule(Archive *fout, const RuleInfo *rinfo);
310static void dumpAgg(Archive *fout, const AggInfo *agginfo);
311static void dumpTrigger(Archive *fout, const TriggerInfo *tginfo);
313static void dumpTable(Archive *fout, const TableInfo *tbinfo);
314static void dumpTableSchema(Archive *fout, const TableInfo *tbinfo);
316static void dumpAttrDef(Archive *fout, const AttrDefInfo *adinfo);
317static void collectSequences(Archive *fout);
318static void dumpSequence(Archive *fout, const TableInfo *tbinfo);
319static void dumpSequenceData(Archive *fout, const TableDataInfo *tdinfo);
320static void dumpIndex(Archive *fout, const IndxInfo *indxinfo);
324static void dumpConstraint(Archive *fout, const ConstraintInfo *coninfo);
326static void dumpTSParser(Archive *fout, const TSParserInfo *prsinfo);
327static void dumpTSDictionary(Archive *fout, const TSDictInfo *dictinfo);
329static void dumpTSConfig(Archive *fout, const TSConfigInfo *cfginfo);
332static void dumpUserMappings(Archive *fout,
333 const char *servername, const char *namespace,
334 const char *owner, CatalogId catalogId, DumpId dumpId);
336
338 const char *type, const char *name, const char *subname,
339 const char *nspname, const char *tag, const char *owner,
340 const DumpableAcl *dacl);
341
342static void getDependencies(Archive *fout);
344static void findDumpableDependencies(ArchiveHandle *AH, const DumpableObject *dobj,
345 DumpId **dependencies, int *nDeps, int *allocDeps);
346
350
351static void addConstrChildIdxDeps(DumpableObject *dobj, const IndxInfo *refidx);
353static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
354static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo);
356static void getTableDataFKConstraints(void);
357static void determineNotNullFlags(Archive *fout, PGresult *res, int r,
358 TableInfo *tbinfo, int j,
359 int i_notnull_name,
365static char *format_function_arguments(const FuncInfo *finfo, const char *funcargs,
366 bool is_agg);
368 const FuncInfo *finfo, bool honor_quotes);
369static char *convertRegProcReference(const char *proc);
370static char *getFormattedOperatorName(const char *oproid);
371static char *convertTSFunction(Archive *fout, Oid funcOid);
372static const char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
373static void getLOs(Archive *fout);
374static void dumpLO(Archive *fout, const LoInfo *loinfo);
375static int dumpLOs(Archive *fout, const void *arg);
376static void dumpPolicy(Archive *fout, const PolicyInfo *polinfo);
379static void dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo);
381static void dumpDatabase(Archive *fout);
382static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
383 const char *dbname, Oid dboid);
384static void dumpEncoding(Archive *AH);
385static void dumpStdStrings(Archive *AH);
386static void dumpSearchPath(Archive *AH);
390 bool force_array_type,
394 const TableInfo *tbinfo);
400 const DumpableObject *dobj,
401 const char *objtype,
402 const char *objname,
403 const char *objnamespace);
404static const char *getAttrName(int attrnum, const TableInfo *tblInfo);
405static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
406static bool nonemptyReloptions(const char *reloptions);
407static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
408 const char *prefix, Archive *fout);
410static void set_restrict_relation_kind(Archive *AH, const char *value);
411static void setupDumpWorker(Archive *AH);
413static bool forcePartitionRootLoad(const TableInfo *tbinfo);
414static void read_dump_filters(const char *filename, DumpOptions *dopt);
415
416
417int
418main(int argc, char **argv)
419{
420 int c;
421 const char *filename = NULL;
422 const char *format = "p";
423 TableInfo *tblinfo;
424 int numTables;
426 int numObjs;
428 int i;
429 int optindex;
430 RestoreOptions *ropt;
431 Archive *fout; /* the script file */
432 bool g_verbose = false;
433 const char *dumpencoding = NULL;
434 const char *dumpsnapshot = NULL;
435 char *use_role = NULL;
436 int numWorkers = 1;
437 int plainText = 0;
440 pg_compress_specification compression_spec = {0};
441 char *compression_detail = NULL;
442 char *compression_algorithm_str = "none";
443 char *error_detail = NULL;
444 bool user_compression_defined = false;
446 bool data_only = false;
447 bool schema_only = false;
448 bool statistics_only = false;
449 bool with_statistics = false;
450 bool no_data = false;
451 bool no_schema = false;
452 bool no_statistics = false;
453
454 static DumpOptions dopt;
455
456 static struct option long_options[] = {
457 {"data-only", no_argument, NULL, 'a'},
458 {"blobs", no_argument, NULL, 'b'},
459 {"large-objects", no_argument, NULL, 'b'},
460 {"no-blobs", no_argument, NULL, 'B'},
461 {"no-large-objects", no_argument, NULL, 'B'},
462 {"clean", no_argument, NULL, 'c'},
463 {"create", no_argument, NULL, 'C'},
464 {"dbname", required_argument, NULL, 'd'},
465 {"extension", required_argument, NULL, 'e'},
466 {"file", required_argument, NULL, 'f'},
467 {"format", required_argument, NULL, 'F'},
468 {"host", required_argument, NULL, 'h'},
469 {"jobs", 1, NULL, 'j'},
470 {"no-reconnect", no_argument, NULL, 'R'},
471 {"no-owner", no_argument, NULL, 'O'},
472 {"port", required_argument, NULL, 'p'},
473 {"schema", required_argument, NULL, 'n'},
474 {"exclude-schema", required_argument, NULL, 'N'},
475 {"schema-only", no_argument, NULL, 's'},
476 {"superuser", required_argument, NULL, 'S'},
477 {"table", required_argument, NULL, 't'},
478 {"exclude-table", required_argument, NULL, 'T'},
479 {"no-password", no_argument, NULL, 'w'},
480 {"password", no_argument, NULL, 'W'},
481 {"username", required_argument, NULL, 'U'},
482 {"verbose", no_argument, NULL, 'v'},
483 {"no-privileges", no_argument, NULL, 'x'},
484 {"no-acl", no_argument, NULL, 'x'},
485 {"compress", required_argument, NULL, 'Z'},
486 {"encoding", required_argument, NULL, 'E'},
487 {"help", no_argument, NULL, '?'},
488 {"version", no_argument, NULL, 'V'},
489
490 /*
491 * the following options don't have an equivalent short option letter
492 */
493 {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
494 {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
495 {"column-inserts", no_argument, &dopt.column_inserts, 1},
496 {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
497 {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
498 {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
499 {"exclude-table-data", required_argument, NULL, 4},
500 {"extra-float-digits", required_argument, NULL, 8},
501 {"if-exists", no_argument, &dopt.if_exists, 1},
502 {"inserts", no_argument, NULL, 9},
503 {"lock-wait-timeout", required_argument, NULL, 2},
504 {"no-table-access-method", no_argument, &dopt.outputNoTableAm, 1},
505 {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
506 {"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
507 {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
508 {"role", required_argument, NULL, 3},
509 {"section", required_argument, NULL, 5},
510 {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
511 {"snapshot", required_argument, NULL, 6},
512 {"statistics", no_argument, NULL, 22},
513 {"statistics-only", no_argument, NULL, 18},
514 {"strict-names", no_argument, &strict_names, 1},
515 {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
516 {"no-comments", no_argument, &dopt.no_comments, 1},
517 {"no-data", no_argument, NULL, 19},
518 {"no-policies", no_argument, &dopt.no_policies, 1},
519 {"no-publications", no_argument, &dopt.no_publications, 1},
520 {"no-schema", no_argument, NULL, 20},
521 {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
522 {"no-statistics", no_argument, NULL, 21},
523 {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
524 {"no-toast-compression", no_argument, &dopt.no_toast_compression, 1},
525 {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
526 {"no-sync", no_argument, NULL, 7},
527 {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
528 {"rows-per-insert", required_argument, NULL, 10},
529 {"include-foreign-data", required_argument, NULL, 11},
530 {"table-and-children", required_argument, NULL, 12},
531 {"exclude-table-and-children", required_argument, NULL, 13},
532 {"exclude-table-data-and-children", required_argument, NULL, 14},
533 {"sync-method", required_argument, NULL, 15},
534 {"filter", required_argument, NULL, 16},
535 {"exclude-extension", required_argument, NULL, 17},
536 {"sequence-data", no_argument, &dopt.sequence_data, 1},
537 {"restrict-key", required_argument, NULL, 25},
538
539 {NULL, 0, NULL, 0}
540 };
541
542 pg_logging_init(argv[0]);
544 set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
545
546 /*
547 * Initialize what we need for parallel execution, especially for thread
548 * support on Windows.
549 */
551
552 progname = get_progname(argv[0]);
553
554 if (argc > 1)
555 {
556 if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
557 {
558 help(progname);
559 exit_nicely(0);
560 }
561 if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
562 {
563 puts("pg_dump (PostgreSQL) " PG_VERSION);
564 exit_nicely(0);
565 }
566 }
567
568 InitDumpOptions(&dopt);
569
570 while ((c = getopt_long(argc, argv, "abBcCd:e:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxXZ:",
571 long_options, &optindex)) != -1)
572 {
573 switch (c)
574 {
575 case 'a': /* Dump data only */
576 data_only = true;
577 break;
578
579 case 'b': /* Dump LOs */
580 dopt.outputLOs = true;
581 break;
582
583 case 'B': /* Don't dump LOs */
584 dopt.dontOutputLOs = true;
585 break;
586
587 case 'c': /* clean (i.e., drop) schema prior to create */
588 dopt.outputClean = 1;
589 break;
590
591 case 'C': /* Create DB */
592 dopt.outputCreateDB = 1;
593 break;
594
595 case 'd': /* database name */
597 break;
598
599 case 'e': /* include extension(s) */
601 dopt.include_everything = false;
602 break;
603
604 case 'E': /* Dump encoding */
606 break;
607
608 case 'f':
610 break;
611
612 case 'F':
614 break;
615
616 case 'h': /* server host */
618 break;
619
620 case 'j': /* number of dump jobs */
621 if (!option_parse_int(optarg, "-j/--jobs", 1,
623 &numWorkers))
624 exit_nicely(1);
625 break;
626
627 case 'n': /* include schema(s) */
629 dopt.include_everything = false;
630 break;
631
632 case 'N': /* exclude schema(s) */
634 break;
635
636 case 'O': /* Don't reconnect to match owner */
637 dopt.outputNoOwner = 1;
638 break;
639
640 case 'p': /* server port */
642 break;
643
644 case 'R':
645 /* no-op, still accepted for backwards compatibility */
646 break;
647
648 case 's': /* dump schema only */
649 schema_only = true;
650 break;
651
652 case 'S': /* Username for superuser in plain text output */
654 break;
655
656 case 't': /* include table(s) */
658 dopt.include_everything = false;
659 break;
660
661 case 'T': /* exclude table(s) */
663 break;
664
665 case 'U':
667 break;
668
669 case 'v': /* verbose */
670 g_verbose = true;
672 break;
673
674 case 'w':
676 break;
677
678 case 'W':
680 break;
681
682 case 'x': /* skip ACL dump */
683 dopt.aclsSkip = true;
684 break;
685
686 case 'Z': /* Compression */
690 break;
691
692 case 0:
693 /* This covers the long options. */
694 break;
695
696 case 2: /* lock-wait-timeout */
698 break;
699
700 case 3: /* SET ROLE */
701 use_role = pg_strdup(optarg);
702 break;
703
704 case 4: /* exclude table(s) data */
706 break;
707
708 case 5: /* section */
710 break;
711
712 case 6: /* snapshot */
714 break;
715
716 case 7: /* no-sync */
717 dosync = false;
718 break;
719
720 case 8:
722 if (!option_parse_int(optarg, "--extra-float-digits", -15, 3,
724 exit_nicely(1);
725 break;
726
727 case 9: /* inserts */
728
729 /*
730 * dump_inserts also stores --rows-per-insert, careful not to
731 * overwrite that.
732 */
733 if (dopt.dump_inserts == 0)
735 break;
736
737 case 10: /* rows per insert */
738 if (!option_parse_int(optarg, "--rows-per-insert", 1, INT_MAX,
739 &dopt.dump_inserts))
740 exit_nicely(1);
741 break;
742
743 case 11: /* include foreign data */
745 optarg);
746 break;
747
748 case 12: /* include table(s) and their children */
750 optarg);
751 dopt.include_everything = false;
752 break;
753
754 case 13: /* exclude table(s) and their children */
756 optarg);
757 break;
758
759 case 14: /* exclude data of table(s) and children */
761 optarg);
762 break;
763
764 case 15:
766 exit_nicely(1);
767 break;
768
769 case 16: /* read object filters from file */
771 break;
772
773 case 17: /* exclude extension(s) */
775 optarg);
776 break;
777
778 case 18:
779 statistics_only = true;
780 break;
781
782 case 19:
783 no_data = true;
784 break;
785
786 case 20:
787 no_schema = true;
788 break;
789
790 case 21:
791 no_statistics = true;
792 break;
793
794 case 22:
795 with_statistics = true;
796 break;
797
798 case 25:
800 break;
801
802 default:
803 /* getopt_long already emitted a complaint */
804 pg_log_error_hint("Try \"%s --help\" for more information.", progname);
805 exit_nicely(1);
806 }
807 }
808
809 /*
810 * Non-option argument specifies database name as long as it wasn't
811 * already specified with -d / --dbname
812 */
813 if (optind < argc && dopt.cparams.dbname == NULL)
814 dopt.cparams.dbname = argv[optind++];
815
816 /* Complain if any arguments remain */
817 if (optind < argc)
818 {
819 pg_log_error("too many command-line arguments (first is \"%s\")",
820 argv[optind]);
821 pg_log_error_hint("Try \"%s --help\" for more information.", progname);
822 exit_nicely(1);
823 }
824
825 /* --column-inserts implies --inserts */
826 if (dopt.column_inserts && dopt.dump_inserts == 0)
828
829 /* *-only options are incompatible with each other */
830 check_mut_excl_opts(data_only, "-a/--data-only",
831 schema_only, "-s/--schema-only",
832 statistics_only, "--statistics-only");
833
834 /* --no-* and *-only for same thing are incompatible */
835 check_mut_excl_opts(data_only, "-a/--data-only",
836 no_data, "--no-data");
837 check_mut_excl_opts(schema_only, "-s/--schema-only",
838 no_schema, "--no-schema");
839 check_mut_excl_opts(statistics_only, "--statistics-only",
840 no_statistics, "--no-statistics");
841
842 /* --statistics and --no-statistics are incompatible */
843 check_mut_excl_opts(with_statistics, "--statistics",
844 no_statistics, "--no-statistics");
845
846 /* --statistics is incompatible with *-only (except --statistics-only) */
847 check_mut_excl_opts(with_statistics, "--statistics",
848 data_only, "-a/--data-only",
849 schema_only, "-s/--schema-only");
850
851 /* --include-foreign-data is incompatible with --schema-only */
853 schema_only, "-s/--schema-only");
854
855 if (numWorkers > 1 && foreign_servers_include_patterns.head != NULL)
856 pg_fatal("option %s is not supported with parallel backup",
857 "--include-foreign-data");
858
859 /* --clean is incompatible with --data-only */
860 check_mut_excl_opts(dopt.outputClean, "-c/--clean",
861 data_only, "-a/--data-only");
862
863 if (dopt.if_exists && !dopt.outputClean)
864 pg_fatal("option %s requires option %s",
865 "--if-exists", "-c/--clean");
866
867 /*
868 * Set derivative flags. Ambiguous or nonsensical combinations, e.g.
869 * "--schema-only --no-schema", will have already caused an error in one
870 * of the checks above.
871 */
872 dopt.dumpData = ((dopt.dumpData && !schema_only && !statistics_only) ||
873 data_only) && !no_data;
874 dopt.dumpSchema = ((dopt.dumpSchema && !data_only && !statistics_only) ||
876 dopt.dumpStatistics = ((dopt.dumpStatistics && !schema_only && !data_only) ||
878
879
880 /*
881 * --inserts are already implied above if --column-inserts or
882 * --rows-per-insert were specified.
883 */
884 if (dopt.do_nothing && dopt.dump_inserts == 0)
885 pg_fatal("option %s requires option %s, %s, or %s",
886 "--on-conflict-do-nothing",
887 "--inserts", "--rows-per-insert", "--column-inserts");
888
889 /* Identify archive format to emit */
891
892 /* archiveFormat specific setup */
893 if (archiveFormat == archNull)
894 {
895 plainText = 1;
896
897 /*
898 * If you don't provide a restrict key, one will be appointed for you.
899 */
900 if (!dopt.restrict_key)
902 if (!dopt.restrict_key)
903 pg_fatal("could not generate restrict key");
905 pg_fatal("invalid restrict key");
906 }
907 else if (dopt.restrict_key)
908 pg_fatal("option %s can only be used with %s",
909 "--restrict-key", "--format=plain");
910
911 /*
912 * Custom and directory formats are compressed by default with gzip when
913 * available, not the others. If gzip is not available, no compression is
914 * done by default.
915 */
918 {
919#ifdef HAVE_LIBZ
921#else
923#endif
924 }
925
926 /*
927 * Compression options
928 */
931 pg_fatal("unrecognized compression algorithm: \"%s\"",
933
935 &compression_spec);
937 if (error_detail != NULL)
938 pg_fatal("invalid compression specification: %s",
940
941 error_detail = supports_compression(compression_spec);
942 if (error_detail != NULL)
943 pg_fatal("%s", error_detail);
944
945 /*
946 * Disable support for zstd workers for now - these are based on
947 * threading, and it's unclear how it interacts with parallel dumps on
948 * platforms where that relies on threads too (e.g. Windows).
949 */
950 if (compression_spec.options & PG_COMPRESSION_OPTION_WORKERS)
951 pg_log_warning("compression option \"%s\" is not currently supported by pg_dump",
952 "workers");
953
954 /*
955 * If emitting an archive format, we always want to emit a DATABASE item,
956 * in case --create is specified at pg_restore time.
957 */
958 if (!plainText)
959 dopt.outputCreateDB = 1;
960
961 /* Parallel backup only in the directory archive format so far */
962 if (archiveFormat != archDirectory && numWorkers > 1)
963 pg_fatal("parallel backup only supported by the directory format");
964
965 /* Open the output file */
966 fout = CreateArchive(filename, archiveFormat, compression_spec,
968
969 /* Make dump options accessible right away */
970 SetArchiveOptions(fout, &dopt, NULL);
971
972 /* Register the cleanup hook */
974
975 /* Let the archiver know how noisy to be */
977
978
979 /*
980 * We allow the server to be back to 9.2, and up to any minor release of
981 * our own major version. (See also version check in pg_dumpall.c.)
982 */
983 fout->minRemoteVersion = 90200;
984 fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
985
986 fout->numWorkers = numWorkers;
987
988 /*
989 * Open the database using the Archiver, so it knows about it. Errors mean
990 * death.
991 */
992 ConnectDatabaseAhx(fout, &dopt.cparams, false);
994
995 /*
996 * On hot standbys, never try to dump unlogged table data, since it will
997 * just throw an error.
998 */
999 if (fout->isStandby)
1000 dopt.no_unlogged_table_data = true;
1001
1002 /*
1003 * Find the last built-in OID, if needed (prior to 8.1)
1004 *
1005 * With 8.1 and above, we can just use FirstNormalObjectId - 1.
1006 */
1008
1009 pg_log_info("last built-in OID is %u", g_last_builtin_oid);
1010
1011 /* Expand schema selection patterns into OID lists */
1013 {
1016 strict_names);
1018 pg_fatal("no matching schemas were found");
1019 }
1022 false);
1023 /* non-matching exclusion patterns aren't an error */
1024
1025 /* Expand table selection patterns into OID lists */
1028 strict_names, false);
1031 strict_names, true);
1035 pg_fatal("no matching tables were found");
1036
1039 false, false);
1042 false, true);
1043
1046 false, false);
1049 false, true);
1050
1053
1054 /* non-matching exclusion patterns aren't an error */
1055
1056 /* Expand extension selection patterns into OID lists */
1058 {
1061 strict_names);
1063 pg_fatal("no matching extensions were found");
1064 }
1067 false);
1068 /* non-matching exclusion patterns aren't an error */
1069
1070 /*
1071 * Dumping LOs is the default for dumps where an inclusion switch is not
1072 * used (an "include everything" dump). -B can be used to exclude LOs
1073 * from those dumps. -b can be used to include LOs even when an inclusion
1074 * switch is used.
1075 *
1076 * -s means "schema only" and LOs are data, not schema, so we never
1077 * include LOs when -s is used.
1078 */
1079 if (dopt.include_everything && dopt.dumpData && !dopt.dontOutputLOs)
1080 dopt.outputLOs = true;
1081
1082 /*
1083 * Collect role names so we can map object owner OIDs to names.
1084 */
1086
1087 /*
1088 * Now scan the database and create DumpableObject structs for all the
1089 * objects we intend to dump.
1090 */
1091 tblinfo = getSchemaData(fout, &numTables);
1092
1093 if (dopt.dumpData)
1094 {
1095 getTableData(&dopt, tblinfo, numTables, 0);
1097 if (!dopt.dumpSchema)
1099 }
1100
1101 if (!dopt.dumpData && dopt.sequence_data)
1102 getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
1103
1104 /*
1105 * For binary upgrade mode, dump the pg_shdepend rows for large objects
1106 * and maybe even pg_largeobject_metadata (see comment below for details).
1107 * This is faster to restore than the equivalent set of large object
1108 * commands.
1109 */
1110 if (dopt.binary_upgrade)
1111 {
1113
1116
1117 /*
1118 * Only dump large object shdepend rows for this database.
1119 */
1120 shdepend->dataObj->filtercond = "WHERE classid = 'pg_largeobject'::regclass "
1121 "AND dbid = (SELECT oid FROM pg_database "
1122 " WHERE datname = current_database())";
1123
1124 /*
1125 * For binary upgrades from v16 and newer versions, we can copy
1126 * pg_largeobject_metadata's files from the old cluster, so we don't
1127 * need to dump its contents. pg_upgrade can't copy/link the files
1128 * from older versions because aclitem (needed by
1129 * pg_largeobject_metadata.lomacl) changed its storage format in v16.
1130 */
1131 if (fout->remoteVersion < 160000)
1132 {
1134
1137 }
1138 }
1139
1140 /*
1141 * In binary-upgrade mode, we do not have to worry about the actual LO
1142 * data or the associated metadata that resides in the pg_largeobject and
1143 * pg_largeobject_metadata tables, respectively.
1144 *
1145 * However, we do need to collect LO information as there may be comments
1146 * or other information on LOs that we do need to dump out.
1147 */
1148 if (dopt.outputLOs || dopt.binary_upgrade)
1149 getLOs(fout);
1150
1151 /*
1152 * Collect dependency data to assist in ordering the objects.
1153 */
1155
1156 /*
1157 * Collect ACLs, comments, and security labels, if wanted.
1158 */
1159 if (!dopt.aclsSkip)
1161 if (!dopt.no_comments)
1163 if (!dopt.no_security_labels)
1165
1166 /* For binary upgrade mode, collect required pg_class information. */
1167 if (dopt.binary_upgrade)
1169
1170 /* Collect sequence information. */
1172
1173 /* Lastly, create dummy objects to represent the section boundaries */
1175
1176 /* Get pointers to all the known DumpableObjects */
1178
1179 /*
1180 * Add dummy dependencies to enforce the dump section ordering.
1181 */
1183
1184 /*
1185 * Sort the objects into a safe dump order (no forward references).
1186 *
1187 * We rely on dependency information to help us determine a safe order, so
1188 * the initial sort is mostly for cosmetic purposes: we sort by name to
1189 * ensure that logically identical schemas will dump identically.
1190 */
1192
1194 boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
1195
1196 /*
1197 * Create archive TOC entries for all the objects to be dumped, in a safe
1198 * order.
1199 */
1200
1201 /*
1202 * First the special entries for ENCODING, STDSTRINGS, and SEARCHPATH.
1203 */
1207
1208 /* The database items are always next, unless we don't want them at all */
1209 if (dopt.outputCreateDB)
1211
1212 /* Now the rearrangeable objects. */
1213 for (i = 0; i < numObjs; i++)
1215
1216 /*
1217 * Set up options info to ensure we dump what we want.
1218 */
1219 ropt = NewRestoreOptions();
1220 ropt->filename = filename;
1221
1222 /* if you change this list, see dumpOptionsFromRestoreOptions */
1223 ropt->cparams.dbname = dopt.cparams.dbname ? pg_strdup(dopt.cparams.dbname) : NULL;
1224 ropt->cparams.pgport = dopt.cparams.pgport ? pg_strdup(dopt.cparams.pgport) : NULL;
1225 ropt->cparams.pghost = dopt.cparams.pghost ? pg_strdup(dopt.cparams.pghost) : NULL;
1228 ropt->dropSchema = dopt.outputClean;
1229 ropt->dumpData = dopt.dumpData;
1230 ropt->dumpSchema = dopt.dumpSchema;
1231 ropt->dumpStatistics = dopt.dumpStatistics;
1232 ropt->if_exists = dopt.if_exists;
1233 ropt->column_inserts = dopt.column_inserts;
1234 ropt->dumpSections = dopt.dumpSections;
1235 ropt->aclsSkip = dopt.aclsSkip;
1236 ropt->superuser = dopt.outputSuperuser;
1237 ropt->createDB = dopt.outputCreateDB;
1238 ropt->noOwner = dopt.outputNoOwner;
1239 ropt->noTableAm = dopt.outputNoTableAm;
1240 ropt->noTablespace = dopt.outputNoTablespaces;
1242 ropt->use_setsessauth = dopt.use_setsessauth;
1244 ropt->dump_inserts = dopt.dump_inserts;
1245 ropt->no_comments = dopt.no_comments;
1246 ropt->no_policies = dopt.no_policies;
1247 ropt->no_publications = dopt.no_publications;
1250 ropt->lockWaitTimeout = dopt.lockWaitTimeout;
1253 ropt->sequence_data = dopt.sequence_data;
1254 ropt->binary_upgrade = dopt.binary_upgrade;
1255 ropt->restrict_key = dopt.restrict_key ? pg_strdup(dopt.restrict_key) : NULL;
1256
1257 ropt->compression_spec = compression_spec;
1258
1259 ropt->suppressDumpWarnings = true; /* We've already shown them */
1260
1261 SetArchiveOptions(fout, &dopt, ropt);
1262
1263 /* Mark which entries should be output */
1265
1266 /*
1267 * The archive's TOC entries are now marked as to which ones will actually
1268 * be output, so we can set up their dependency lists properly. This isn't
1269 * necessary for plain-text output, though.
1270 */
1271 if (!plainText)
1273
1274 /*
1275 * And finally we can do the actual output.
1276 *
1277 * Note: for non-plain-text output formats, the output file is written
1278 * inside CloseArchive(). This is, um, bizarre; but not worth changing
1279 * right now.
1280 */
1281 if (plainText)
1282 RestoreArchive(fout, false);
1283
1285
1286 exit_nicely(0);
1287}
1288
1289
1290static void
1291help(const char *progname)
1292{
1293 printf(_("%s exports a PostgreSQL database as an SQL script or to other formats.\n\n"), progname);
1294 printf(_("Usage:\n"));
1295 printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
1296
1297 printf(_("\nGeneral options:\n"));
1298 printf(_(" -f, --file=FILENAME output file or directory name\n"));
1299 printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
1300 " plain text (default))\n"));
1301 printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
1302 printf(_(" -v, --verbose verbose mode\n"));
1303 printf(_(" -V, --version output version information, then exit\n"));
1304 printf(_(" -Z, --compress=METHOD[:DETAIL]\n"
1305 " compress as specified\n"));
1306 printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
1307 printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
1308 printf(_(" --sync-method=METHOD set method for syncing files to disk\n"));
1309 printf(_(" -?, --help show this help, then exit\n"));
1310
1311 printf(_("\nOptions controlling the output content:\n"));
1312 printf(_(" -a, --data-only dump only the data, not the schema or statistics\n"));
1313 printf(_(" -b, --large-objects include large objects in dump\n"));
1314 printf(_(" --blobs (same as --large-objects, deprecated)\n"));
1315 printf(_(" -B, --no-large-objects exclude large objects in dump\n"));
1316 printf(_(" --no-blobs (same as --no-large-objects, deprecated)\n"));
1317 printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
1318 printf(_(" -C, --create include commands to create database in dump\n"));
1319 printf(_(" -e, --extension=PATTERN dump the specified extension(s) only\n"));
1320 printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
1321 printf(_(" -n, --schema=PATTERN dump the specified schema(s) only\n"));
1322 printf(_(" -N, --exclude-schema=PATTERN do NOT dump the specified schema(s)\n"));
1323 printf(_(" -O, --no-owner skip restoration of object ownership in\n"
1324 " plain-text format\n"));
1325 printf(_(" -s, --schema-only dump only the schema, no data or statistics\n"));
1326 printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
1327 printf(_(" -t, --table=PATTERN dump only the specified table(s)\n"));
1328 printf(_(" -T, --exclude-table=PATTERN do NOT dump the specified table(s)\n"));
1329 printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
1330 printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
1331 printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
1332 printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
1333 printf(_(" --disable-triggers disable triggers during data-only restore\n"));
1334 printf(_(" --enable-row-security enable row security (dump only content user has\n"
1335 " access to)\n"));
1336 printf(_(" --exclude-extension=PATTERN do NOT dump the specified extension(s)\n"));
1337 printf(_(" --exclude-table-and-children=PATTERN\n"
1338 " do NOT dump the specified table(s), including\n"
1339 " child and partition tables\n"));
1340 printf(_(" --exclude-table-data=PATTERN do NOT dump data for the specified table(s)\n"));
1341 printf(_(" --exclude-table-data-and-children=PATTERN\n"
1342 " do NOT dump data for the specified table(s),\n"
1343 " including child and partition tables\n"));
1344 printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
1345 printf(_(" --filter=FILENAME include or exclude objects and data from dump\n"
1346 " based on expressions in FILENAME\n"));
1347 printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
1348 printf(_(" --include-foreign-data=PATTERN\n"
1349 " include data of foreign tables on foreign\n"
1350 " servers matching PATTERN\n"));
1351 printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
1352 printf(_(" --load-via-partition-root load partitions via the root table\n"));
1353 printf(_(" --no-comments do not dump comment commands\n"));
1354 printf(_(" --no-data do not dump data\n"));
1355 printf(_(" --no-policies do not dump row security policies\n"));
1356 printf(_(" --no-publications do not dump publications\n"));
1357 printf(_(" --no-schema do not dump schema\n"));
1358 printf(_(" --no-security-labels do not dump security label assignments\n"));
1359 printf(_(" --no-statistics do not dump statistics\n"));
1360 printf(_(" --no-subscriptions do not dump subscriptions\n"));
1361 printf(_(" --no-table-access-method do not dump table access methods\n"));
1362 printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
1363 printf(_(" --no-toast-compression do not dump TOAST compression methods\n"));
1364 printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
1365 printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
1366 printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
1367 printf(_(" --restrict-key=RESTRICT_KEY use provided string as psql \\restrict key\n"));
1368 printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
1369 printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
1370 printf(_(" --sequence-data include sequence data in dump\n"));
1371 printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
1372 printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
1373 printf(_(" --statistics dump the statistics\n"));
1374 printf(_(" --statistics-only dump only the statistics, not schema or data\n"));
1375 printf(_(" --strict-names require table and/or schema include patterns to\n"
1376 " match at least one entity each\n"));
1377 printf(_(" --table-and-children=PATTERN dump only the specified table(s), including\n"
1378 " child and partition tables\n"));
1379 printf(_(" --use-set-session-authorization\n"
1380 " use SET SESSION AUTHORIZATION commands instead of\n"
1381 " ALTER OWNER commands to set ownership\n"));
1382
1383 printf(_("\nConnection options:\n"));
1384 printf(_(" -d, --dbname=DBNAME database to dump\n"));
1385 printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
1386 printf(_(" -p, --port=PORT database server port number\n"));
1387 printf(_(" -U, --username=NAME connect as specified database user\n"));
1388 printf(_(" -w, --no-password never prompt for password\n"));
1389 printf(_(" -W, --password force password prompt (should happen automatically)\n"));
1390 printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
1391
1392 printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
1393 "variable value is used.\n\n"));
1394 printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
1395 printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
1396}
1397
1398static void
1400 const char *dumpsnapshot, char *use_role)
1401{
1402 DumpOptions *dopt = AH->dopt;
1403 PGconn *conn = GetConnection(AH);
1404
1406
1407 /*
1408 * Set the client encoding if requested.
1409 */
1410 if (dumpencoding)
1411 {
1413 pg_fatal("invalid client encoding \"%s\" specified",
1414 dumpencoding);
1415 }
1416
1417 /*
1418 * Force standard_conforming_strings on, just in case we are dumping from
1419 * an old server that has it disabled. Without this, literals in views,
1420 * expressions, etc, would be incorrect for modern servers.
1421 */
1422 ExecuteSqlStatement(AH, "SET standard_conforming_strings = on");
1423
1424 /*
1425 * And reflect that to AH->std_strings. You might think that we should
1426 * just delete that variable and the code that checks it, but that would
1427 * be problematic for pg_restore, which at least for now should still cope
1428 * with archives containing the other setting (cf. processStdStringsEntry
1429 * in pg_backup_archiver.c).
1430 */
1431 AH->std_strings = true;
1432
1433 /*
1434 * Get the active encoding, so we know how to escape strings.
1435 */
1438
1439 /*
1440 * Set the role if requested. In a parallel dump worker, we'll be passed
1441 * use_role == NULL, but AH->use_role is already set (if user specified it
1442 * originally) and we should use that.
1443 */
1444 if (!use_role && AH->use_role)
1445 use_role = AH->use_role;
1446
1447 /* Set the role if requested */
1448 if (use_role)
1449 {
1451
1452 appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1453 ExecuteSqlStatement(AH, query->data);
1454 destroyPQExpBuffer(query);
1455
1456 /* save it for possible later use by parallel workers */
1457 if (!AH->use_role)
1458 AH->use_role = pg_strdup(use_role);
1459 }
1460
1461 /* Set the datestyle to ISO to ensure the dump's portability */
1462 ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1463
1464 /* Likewise, avoid using sql_standard intervalstyle */
1465 ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1466
1467 /*
1468 * Use an explicitly specified extra_float_digits if it has been provided.
1469 * Otherwise, set extra_float_digits so that we can dump float data
1470 * exactly (given correctly implemented float I/O code, anyway).
1471 */
1473 {
1475
1476 appendPQExpBuffer(q, "SET extra_float_digits TO %d",
1478 ExecuteSqlStatement(AH, q->data);
1480 }
1481 else
1482 ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1483
1484 /*
1485 * Disable synchronized scanning, to prevent unpredictable changes in row
1486 * ordering across a dump and reload.
1487 */
1488 ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1489
1490 /*
1491 * Disable timeouts if supported.
1492 */
1493 ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1494 if (AH->remoteVersion >= 90300)
1495 ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1496 if (AH->remoteVersion >= 90600)
1497 ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1498 if (AH->remoteVersion >= 170000)
1499 ExecuteSqlStatement(AH, "SET transaction_timeout = 0");
1500
1501 /*
1502 * Quote all identifiers, if requested.
1503 */
1505 ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1506
1507 /*
1508 * Adjust row-security mode, if supported.
1509 */
1510 if (AH->remoteVersion >= 90500)
1511 {
1512 if (dopt->enable_row_security)
1513 ExecuteSqlStatement(AH, "SET row_security = on");
1514 else
1515 ExecuteSqlStatement(AH, "SET row_security = off");
1516 }
1517
1518 /*
1519 * For security reasons, we restrict the expansion of non-system views and
1520 * access to foreign tables during the pg_dump process. This restriction
1521 * is adjusted when dumping foreign table data.
1522 */
1523 set_restrict_relation_kind(AH, "view, foreign-table");
1524
1525 /*
1526 * Initialize prepared-query state to "nothing prepared". We do this here
1527 * so that a parallel dump worker will have its own state.
1528 */
1530
1531 /*
1532 * Start transaction-snapshot mode transaction to dump consistent data.
1533 */
1534 ExecuteSqlStatement(AH, "BEGIN");
1535
1536 /*
1537 * To support the combination of serializable_deferrable with the jobs
1538 * option we use REPEATABLE READ for the worker connections that are
1539 * passed a snapshot. As long as the snapshot is acquired in a
1540 * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1541 * REPEATABLE READ transaction provides the appropriate integrity
1542 * guarantees. This is a kluge, but safe for back-patching.
1543 */
1544 if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1546 "SET TRANSACTION ISOLATION LEVEL "
1547 "SERIALIZABLE, READ ONLY, DEFERRABLE");
1548 else
1550 "SET TRANSACTION ISOLATION LEVEL "
1551 "REPEATABLE READ, READ ONLY");
1552
1553 /*
1554 * If user specified a snapshot to use, select that. In a parallel dump
1555 * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1556 * is already set (if the server can handle it) and we should use that.
1557 */
1558 if (dumpsnapshot)
1560
1561 if (AH->sync_snapshot_id)
1562 {
1564
1565 appendPQExpBufferStr(query, "SET TRANSACTION SNAPSHOT ");
1567 ExecuteSqlStatement(AH, query->data);
1568 destroyPQExpBuffer(query);
1569 }
1570 else if (AH->numWorkers > 1)
1571 {
1572 if (AH->isStandby && AH->remoteVersion < 100000)
1573 pg_fatal("parallel dumps from standby servers are not supported by this server version");
1575 }
1576}
1577
1578/* Set up connection for a parallel worker process */
1579static void
1581{
1582 /*
1583 * We want to re-select all the same values the leader connection is
1584 * using. We'll have inherited directly-usable values in
1585 * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1586 * inherited encoding value back to a string to pass to setup_connection.
1587 */
1590 NULL,
1591 NULL);
1592}
1593
1594static char *
1596{
1597 char *query = "SELECT pg_catalog.pg_export_snapshot()";
1598 char *result;
1599 PGresult *res;
1600
1601 res = ExecuteSqlQueryForSingleRow(fout, query);
1602 result = pg_strdup(PQgetvalue(res, 0, 0));
1603 PQclear(res);
1604
1605 return result;
1606}
1607
1608static ArchiveFormat
1610{
1612
1614
1615 if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1616 {
1617 /* This is used by pg_dumpall, and is not documented */
1620 }
1621 else if (pg_strcasecmp(format, "c") == 0)
1623 else if (pg_strcasecmp(format, "custom") == 0)
1625 else if (pg_strcasecmp(format, "d") == 0)
1627 else if (pg_strcasecmp(format, "directory") == 0)
1629 else if (pg_strcasecmp(format, "p") == 0)
1631 else if (pg_strcasecmp(format, "plain") == 0)
1633 else if (pg_strcasecmp(format, "t") == 0)
1635 else if (pg_strcasecmp(format, "tar") == 0)
1637 else
1638 pg_fatal("invalid output format \"%s\" specified", format);
1639 return archiveFormat;
1640}
1641
1642/*
1643 * Find the OIDs of all schemas matching the given list of patterns,
1644 * and append them to the given OID list.
1645 */
1646static void
1649 SimpleOidList *oids,
1650 bool strict_names)
1651{
1652 PQExpBuffer query;
1653 PGresult *res;
1655 int i;
1656
1657 if (patterns->head == NULL)
1658 return; /* nothing to do */
1659
1660 query = createPQExpBuffer();
1661
1662 /*
1663 * The loop below runs multiple SELECTs might sometimes result in
1664 * duplicate entries in the OID list, but we don't care.
1665 */
1666
1667 for (cell = patterns->head; cell; cell = cell->next)
1668 {
1670 int dotcnt;
1671
1673 "SELECT oid FROM pg_catalog.pg_namespace n\n");
1675 processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1676 false, NULL, "n.nspname", NULL, NULL, &dbbuf,
1677 &dotcnt);
1678 if (dotcnt > 1)
1679 pg_fatal("improper qualified name (too many dotted names): %s",
1680 cell->val);
1681 else if (dotcnt == 1)
1684
1685 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1686 if (strict_names && PQntuples(res) == 0)
1687 pg_fatal("no matching schemas were found for pattern \"%s\"", cell->val);
1688
1689 for (i = 0; i < PQntuples(res); i++)
1690 {
1691 simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1692 }
1693
1694 PQclear(res);
1695 resetPQExpBuffer(query);
1696 }
1697
1698 destroyPQExpBuffer(query);
1699}
1700
1701/*
1702 * Find the OIDs of all extensions matching the given list of patterns,
1703 * and append them to the given OID list.
1704 */
1705static void
1708 SimpleOidList *oids,
1709 bool strict_names)
1710{
1711 PQExpBuffer query;
1712 PGresult *res;
1714 int i;
1715
1716 if (patterns->head == NULL)
1717 return; /* nothing to do */
1718
1719 query = createPQExpBuffer();
1720
1721 /*
1722 * The loop below runs multiple SELECTs might sometimes result in
1723 * duplicate entries in the OID list, but we don't care.
1724 */
1725 for (cell = patterns->head; cell; cell = cell->next)
1726 {
1727 int dotcnt;
1728
1730 "SELECT oid FROM pg_catalog.pg_extension e\n");
1731 processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1732 false, NULL, "e.extname", NULL, NULL, NULL,
1733 &dotcnt);
1734 if (dotcnt > 0)
1735 pg_fatal("improper qualified name (too many dotted names): %s",
1736 cell->val);
1737
1738 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1739 if (strict_names && PQntuples(res) == 0)
1740 pg_fatal("no matching extensions were found for pattern \"%s\"", cell->val);
1741
1742 for (i = 0; i < PQntuples(res); i++)
1743 {
1744 simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1745 }
1746
1747 PQclear(res);
1748 resetPQExpBuffer(query);
1749 }
1750
1751 destroyPQExpBuffer(query);
1752}
1753
1754/*
1755 * Find the OIDs of all foreign servers matching the given list of patterns,
1756 * and append them to the given OID list.
1757 */
1758static void
1761 SimpleOidList *oids)
1762{
1763 PQExpBuffer query;
1764 PGresult *res;
1766 int i;
1767
1768 if (patterns->head == NULL)
1769 return; /* nothing to do */
1770
1771 query = createPQExpBuffer();
1772
1773 /*
1774 * The loop below runs multiple SELECTs might sometimes result in
1775 * duplicate entries in the OID list, but we don't care.
1776 */
1777
1778 for (cell = patterns->head; cell; cell = cell->next)
1779 {
1780 int dotcnt;
1781
1783 "SELECT oid FROM pg_catalog.pg_foreign_server s\n");
1784 processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1785 false, NULL, "s.srvname", NULL, NULL, NULL,
1786 &dotcnt);
1787 if (dotcnt > 0)
1788 pg_fatal("improper qualified name (too many dotted names): %s",
1789 cell->val);
1790
1791 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1792 if (PQntuples(res) == 0)
1793 pg_fatal("no matching foreign servers were found for pattern \"%s\"", cell->val);
1794
1795 for (i = 0; i < PQntuples(res); i++)
1796 simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1797
1798 PQclear(res);
1799 resetPQExpBuffer(query);
1800 }
1801
1802 destroyPQExpBuffer(query);
1803}
1804
1805/*
1806 * Find the OIDs of all tables matching the given list of patterns,
1807 * and append them to the given OID list. See also expand_dbname_patterns()
1808 * in pg_dumpall.c
1809 */
1810static void
1814{
1815 PQExpBuffer query;
1816 PGresult *res;
1818 int i;
1819
1820 if (patterns->head == NULL)
1821 return; /* nothing to do */
1822
1823 query = createPQExpBuffer();
1824
1825 /*
1826 * this might sometimes result in duplicate entries in the OID list, but
1827 * we don't care.
1828 */
1829
1830 for (cell = patterns->head; cell; cell = cell->next)
1831 {
1833 int dotcnt;
1834
1835 /*
1836 * Query must remain ABSOLUTELY devoid of unqualified names. This
1837 * would be unnecessary given a pg_table_is_visible() variant taking a
1838 * search_path argument.
1839 *
1840 * For with_child_tables, we start with the basic query's results and
1841 * recursively search the inheritance tree to add child tables.
1842 */
1844 {
1845 appendPQExpBufferStr(query, "WITH RECURSIVE partition_tree (relid) AS (\n");
1846 }
1847
1848 appendPQExpBuffer(query,
1849 "SELECT c.oid"
1850 "\nFROM pg_catalog.pg_class c"
1851 "\n LEFT JOIN pg_catalog.pg_namespace n"
1852 "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1853 "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1854 "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1859 processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1860 false, "n.nspname", "c.relname", NULL,
1861 "pg_catalog.pg_table_is_visible(c.oid)", &dbbuf,
1862 &dotcnt);
1863 if (dotcnt > 2)
1864 pg_fatal("improper relation name (too many dotted names): %s",
1865 cell->val);
1866 else if (dotcnt == 2)
1869
1871 {
1872 appendPQExpBufferStr(query, "UNION"
1873 "\nSELECT i.inhrelid"
1874 "\nFROM partition_tree p"
1875 "\n JOIN pg_catalog.pg_inherits i"
1876 "\n ON p.relid OPERATOR(pg_catalog.=) i.inhparent"
1877 "\n)"
1878 "\nSELECT relid FROM partition_tree");
1879 }
1880
1881 ExecuteSqlStatement(fout, "RESET search_path");
1882 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1885 if (strict_names && PQntuples(res) == 0)
1886 pg_fatal("no matching tables were found for pattern \"%s\"", cell->val);
1887
1888 for (i = 0; i < PQntuples(res); i++)
1889 {
1890 simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1891 }
1892
1893 PQclear(res);
1894 resetPQExpBuffer(query);
1895 }
1896
1897 destroyPQExpBuffer(query);
1898}
1899
1900/*
1901 * Verifies that the connected database name matches the given database name,
1902 * and if not, dies with an error about the given pattern.
1903 *
1904 * The 'dbname' argument should be a literal name parsed from 'pattern'.
1905 */
1906static void
1907prohibit_crossdb_refs(PGconn *conn, const char *dbname, const char *pattern)
1908{
1909 const char *db;
1910
1911 db = PQdb(conn);
1912 if (db == NULL)
1913 pg_fatal("You are currently not connected to a database.");
1914
1915 if (strcmp(db, dbname) != 0)
1916 pg_fatal("cross-database references are not implemented: %s",
1917 pattern);
1918}
1919
1920/*
1921 * checkExtensionMembership
1922 * Determine whether object is an extension member, and if so,
1923 * record an appropriate dependency and set the object's dump flag.
1924 *
1925 * It's important to call this for each object that could be an extension
1926 * member. Generally, we integrate this with determining the object's
1927 * to-be-dumped-ness, since extension membership overrides other rules for that.
1928 *
1929 * Returns true if object is an extension member, else false.
1930 */
1931static bool
1933{
1935
1936 if (ext == NULL)
1937 return false;
1938
1939 dobj->ext_member = true;
1940
1941 /* Record dependency so that getDependencies needn't deal with that */
1942 addObjectDependency(dobj, ext->dobj.dumpId);
1943
1944 /*
1945 * In 9.6 and above, mark the member object to have any non-initial ACLs
1946 * dumped. (Any initial ACLs will be removed later, using data from
1947 * pg_init_privs, so that we'll dump only the delta from the extension's
1948 * initial setup.)
1949 *
1950 * Prior to 9.6, we do not include any extension member components.
1951 *
1952 * In binary upgrades, we still dump all components of the members
1953 * individually, since the idea is to exactly reproduce the database
1954 * contents rather than replace the extension contents with something
1955 * different.
1956 *
1957 * Note: it might be interesting someday to implement storage and delta
1958 * dumping of extension members' RLS policies and/or security labels.
1959 * However there is a pitfall for RLS policies: trying to dump them
1960 * requires getting a lock on their tables, and the calling user might not
1961 * have privileges for that. We need no lock to examine a table's ACLs,
1962 * so the current feature doesn't have a problem of that sort.
1963 */
1964 if (fout->dopt->binary_upgrade)
1965 dobj->dump = ext->dobj.dump;
1966 else
1967 {
1968 if (fout->remoteVersion < 90600)
1969 dobj->dump = DUMP_COMPONENT_NONE;
1970 else
1971 dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL);
1972 }
1973
1974 return true;
1975}
1976
1977/*
1978 * selectDumpableNamespace: policy-setting subroutine
1979 * Mark a namespace as to be dumped or not
1980 */
1981static void
1983{
1984 /*
1985 * DUMP_COMPONENT_DEFINITION typically implies a CREATE SCHEMA statement
1986 * and (for --clean) a DROP SCHEMA statement. (In the absence of
1987 * DUMP_COMPONENT_DEFINITION, this value is irrelevant.)
1988 */
1989 nsinfo->create = true;
1990
1991 /*
1992 * If specific tables are being dumped, do not dump any complete
1993 * namespaces. If specific namespaces are being dumped, dump just those
1994 * namespaces. Otherwise, dump all non-system namespaces.
1995 */
1997 nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1998 else if (schema_include_oids.head != NULL)
1999 nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
2001 nsinfo->dobj.catId.oid) ?
2003 else if (fout->remoteVersion >= 90600 &&
2004 strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
2005 {
2006 /*
2007 * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
2008 * they are interesting (and not the original ACLs which were set at
2009 * initdb time, see pg_init_privs).
2010 */
2011 nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
2012 }
2013 else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
2014 strcmp(nsinfo->dobj.name, "information_schema") == 0)
2015 {
2016 /* Other system schemas don't get dumped */
2017 nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
2018 }
2019 else if (strcmp(nsinfo->dobj.name, "public") == 0)
2020 {
2021 /*
2022 * The public schema is a strange beast that sits in a sort of
2023 * no-mans-land between being a system object and a user object.
2024 * CREATE SCHEMA would fail, so its DUMP_COMPONENT_DEFINITION is just
2025 * a comment and an indication of ownership. If the owner is the
2026 * default, omit that superfluous DUMP_COMPONENT_DEFINITION. Before
2027 * v15, the default owner was BOOTSTRAP_SUPERUSERID.
2028 */
2029 nsinfo->create = false;
2030 nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
2031 if (nsinfo->nspowner == ROLE_PG_DATABASE_OWNER)
2032 nsinfo->dobj.dump &= ~DUMP_COMPONENT_DEFINITION;
2033 nsinfo->dobj.dump_contains = DUMP_COMPONENT_ALL;
2034
2035 /*
2036 * Also, make like it has a comment even if it doesn't; this is so
2037 * that we'll emit a command to drop the comment, if appropriate.
2038 * (Without this, we'd not call dumpCommentExtended for it.)
2039 */
2040 nsinfo->dobj.components |= DUMP_COMPONENT_COMMENT;
2041 }
2042 else
2043 nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
2044
2045 /*
2046 * In any case, a namespace can be excluded by an exclusion switch
2047 */
2048 if (nsinfo->dobj.dump_contains &&
2050 nsinfo->dobj.catId.oid))
2051 nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
2052
2053 /*
2054 * If the schema belongs to an extension, allow extension membership to
2055 * override the dump decision for the schema itself. However, this does
2056 * not change dump_contains, so this won't change what we do with objects
2057 * within the schema. (If they belong to the extension, they'll get
2058 * suppressed by it, otherwise not.)
2059 */
2061}
2062
2063/*
2064 * selectDumpableTable: policy-setting subroutine
2065 * Mark a table as to be dumped or not
2066 */
2067static void
2069{
2071 return; /* extension membership overrides all else */
2072
2073 /*
2074 * If specific tables are being dumped, dump just those tables; else, dump
2075 * according to the parent namespace's dump flag.
2076 */
2079 tbinfo->dobj.catId.oid) ?
2081 else
2082 tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
2083
2084 /*
2085 * In any case, a table can be excluded by an exclusion switch
2086 */
2087 if (tbinfo->dobj.dump &&
2089 tbinfo->dobj.catId.oid))
2090 tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
2091}
2092
2093/*
2094 * selectDumpableType: policy-setting subroutine
2095 * Mark a type as to be dumped or not
2096 *
2097 * If it's a table's rowtype or an autogenerated array type, we also apply a
2098 * special type code to facilitate sorting into the desired order. (We don't
2099 * want to consider those to be ordinary types because that would bring tables
2100 * up into the datatype part of the dump order.) We still set the object's
2101 * dump flag; that's not going to cause the dummy type to be dumped, but we
2102 * need it so that casts involving such types will be dumped correctly -- see
2103 * dumpCast. This means the flag should be set the same as for the underlying
2104 * object (the table or base type).
2105 */
2106static void
2108{
2109 /* skip complex types, except for standalone composite types */
2110 if (OidIsValid(tyinfo->typrelid) &&
2111 tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
2112 {
2113 TableInfo *tytable = findTableByOid(tyinfo->typrelid);
2114
2115 tyinfo->dobj.objType = DO_DUMMY_TYPE;
2116 if (tytable != NULL)
2117 tyinfo->dobj.dump = tytable->dobj.dump;
2118 else
2119 tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
2120 return;
2121 }
2122
2123 /* skip auto-generated array and multirange types */
2124 if (tyinfo->isArray || tyinfo->isMultirange)
2125 {
2126 tyinfo->dobj.objType = DO_DUMMY_TYPE;
2127
2128 /*
2129 * Fall through to set the dump flag; we assume that the subsequent
2130 * rules will do the same thing as they would for the array's base
2131 * type or multirange's range type. (We cannot reliably look up the
2132 * base type here, since getTypes may not have processed it yet.)
2133 */
2134 }
2135
2137 return; /* extension membership overrides all else */
2138
2139 /* Dump based on if the contents of the namespace are being dumped */
2140 tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
2141}
2142
2143/*
2144 * selectDumpableDefaultACL: policy-setting subroutine
2145 * Mark a default ACL as to be dumped or not
2146 *
2147 * For per-schema default ACLs, dump if the schema is to be dumped.
2148 * Otherwise dump if we are dumping "everything". Note that dumpSchema
2149 * and aclsSkip are checked separately.
2150 */
2151static void
2153{
2154 /* Default ACLs can't be extension members */
2155
2156 if (dinfo->dobj.namespace)
2157 /* default ACLs are considered part of the namespace */
2158 dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
2159 else
2160 dinfo->dobj.dump = dopt->include_everything ?
2162}
2163
2164/*
2165 * selectDumpableCast: policy-setting subroutine
2166 * Mark a cast as to be dumped or not
2167 *
2168 * Casts do not belong to any particular namespace (since they haven't got
2169 * names), nor do they have identifiable owners. To distinguish user-defined
2170 * casts from built-in ones, we must resort to checking whether the cast's
2171 * OID is in the range reserved for initdb.
2172 */
2173static void
2175{
2176 if (checkExtensionMembership(&cast->dobj, fout))
2177 return; /* extension membership overrides all else */
2178
2179 /*
2180 * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
2181 * support ACLs currently.
2182 */
2183 if (cast->dobj.catId.oid <= g_last_builtin_oid)
2184 cast->dobj.dump = DUMP_COMPONENT_NONE;
2185 else
2186 cast->dobj.dump = fout->dopt->include_everything ?
2188}
2189
2190/*
2191 * selectDumpableProcLang: policy-setting subroutine
2192 * Mark a procedural language as to be dumped or not
2193 *
2194 * Procedural languages do not belong to any particular namespace. To
2195 * identify built-in languages, we must resort to checking whether the
2196 * language's OID is in the range reserved for initdb.
2197 */
2198static void
2200{
2201 if (checkExtensionMembership(&plang->dobj, fout))
2202 return; /* extension membership overrides all else */
2203
2204 /*
2205 * Only include procedural languages when we are dumping everything.
2206 *
2207 * For from-initdb procedural languages, only include ACLs, as we do for
2208 * the pg_catalog namespace. We need this because procedural languages do
2209 * not live in any namespace.
2210 */
2212 plang->dobj.dump = DUMP_COMPONENT_NONE;
2213 else
2214 {
2215 if (plang->dobj.catId.oid <= g_last_builtin_oid)
2216 plang->dobj.dump = fout->remoteVersion < 90600 ?
2218 else
2219 plang->dobj.dump = DUMP_COMPONENT_ALL;
2220 }
2221}
2222
2223/*
2224 * selectDumpableAccessMethod: policy-setting subroutine
2225 * Mark an access method as to be dumped or not
2226 *
2227 * Access methods do not belong to any particular namespace. To identify
2228 * built-in access methods, we must resort to checking whether the
2229 * method's OID is in the range reserved for initdb.
2230 */
2231static void
2233{
2234 /* see getAccessMethods() comment about v9.6. */
2235 if (fout->remoteVersion < 90600)
2236 {
2237 method->dobj.dump = DUMP_COMPONENT_NONE;
2238 return;
2239 }
2240
2241 if (checkExtensionMembership(&method->dobj, fout))
2242 return; /* extension membership overrides all else */
2243
2244 /*
2245 * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
2246 * they do not support ACLs currently.
2247 */
2248 if (method->dobj.catId.oid <= g_last_builtin_oid)
2249 method->dobj.dump = DUMP_COMPONENT_NONE;
2250 else
2251 method->dobj.dump = fout->dopt->include_everything ?
2253}
2254
2255/*
2256 * selectDumpableExtension: policy-setting subroutine
2257 * Mark an extension as to be dumped or not
2258 *
2259 * Built-in extensions should be skipped except for checking ACLs, since we
2260 * assume those will already be installed in the target database. We identify
2261 * such extensions by their having OIDs in the range reserved for initdb.
2262 * We dump all user-added extensions by default. No extensions are dumped
2263 * if include_everything is false (i.e., a --schema or --table switch was
2264 * given), except if --extension specifies a list of extensions to dump.
2265 */
2266static void
2268{
2269 /*
2270 * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
2271 * change permissions on their member objects, if they wish to, and have
2272 * those changes preserved.
2273 */
2274 if (extinfo->dobj.catId.oid <= g_last_builtin_oid)
2275 extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
2276 else
2277 {
2278 /* check if there is a list of extensions to dump */
2280 extinfo->dobj.dump = extinfo->dobj.dump_contains =
2282 extinfo->dobj.catId.oid) ?
2284 else
2285 extinfo->dobj.dump = extinfo->dobj.dump_contains =
2286 dopt->include_everything ?
2288
2289 /* check that the extension is not explicitly excluded */
2290 if (extinfo->dobj.dump &&
2292 extinfo->dobj.catId.oid))
2293 extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_NONE;
2294 }
2295}
2296
2297/*
2298 * selectDumpablePublicationObject: policy-setting subroutine
2299 * Mark a publication object as to be dumped or not
2300 *
2301 * A publication can have schemas and tables which have schemas, but those are
2302 * ignored in decision making, because publications are only dumped when we are
2303 * dumping everything.
2304 */
2305static void
2307{
2308 if (checkExtensionMembership(dobj, fout))
2309 return; /* extension membership overrides all else */
2310
2311 dobj->dump = fout->dopt->include_everything ?
2313}
2314
2315/*
2316 * selectDumpableStatisticsObject: policy-setting subroutine
2317 * Mark an extended statistics object as to be dumped or not
2318 *
2319 * We dump an extended statistics object if the schema it's in and the table
2320 * it's for are being dumped. (This'll need more thought if statistics
2321 * objects ever support cross-table stats.)
2322 */
2323static void
2325{
2326 if (checkExtensionMembership(&sobj->dobj, fout))
2327 return; /* extension membership overrides all else */
2328
2329 sobj->dobj.dump = sobj->dobj.namespace->dobj.dump_contains;
2330 if (sobj->stattable == NULL ||
2331 !(sobj->stattable->dobj.dump & DUMP_COMPONENT_DEFINITION))
2332 sobj->dobj.dump = DUMP_COMPONENT_NONE;
2333}
2334
2335/*
2336 * selectDumpableObject: policy-setting subroutine
2337 * Mark a generic dumpable object as to be dumped or not
2338 *
2339 * Use this only for object types without a special-case routine above.
2340 */
2341static void
2343{
2344 if (checkExtensionMembership(dobj, fout))
2345 return; /* extension membership overrides all else */
2346
2347 /*
2348 * Default policy is to dump if parent namespace is dumpable, or for
2349 * non-namespace-associated items, dump if we're dumping "everything".
2350 */
2351 if (dobj->namespace)
2352 dobj->dump = dobj->namespace->dobj.dump_contains;
2353 else
2354 dobj->dump = fout->dopt->include_everything ?
2356}
2357
2358/*
2359 * Dump a table's contents for loading using the COPY command
2360 * - this routine is called by the Archiver when it wants the table
2361 * to be dumped.
2362 */
2363static int
2365{
2366 const TableDataInfo *tdinfo = dcontext;
2367 const TableInfo *tbinfo = tdinfo->tdtable;
2368 const char *classname = tbinfo->dobj.name;
2370
2371 /*
2372 * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
2373 * which uses it already.
2374 */
2377 PGresult *res;
2378 int ret;
2379 char *copybuf;
2380 const char *column_list;
2381
2382 pg_log_info("dumping contents of table \"%s.%s\"",
2383 tbinfo->dobj.namespace->dobj.name, classname);
2384
2385 /*
2386 * Specify the column list explicitly so that we have no possibility of
2387 * retrieving data in the wrong column order. (The default column
2388 * ordering of COPY will not be what we want in certain corner cases
2389 * involving ADD COLUMN and inheritance.)
2390 */
2392
2393 /*
2394 * Use COPY (SELECT ...) TO when dumping a foreign table's data, when a
2395 * filter condition was specified, and when in binary upgrade mode and
2396 * dumping an old pg_largeobject_metadata defined WITH OIDS. For other
2397 * cases a simple COPY suffices.
2398 */
2399 if (tdinfo->filtercond || tbinfo->relkind == RELKIND_FOREIGN_TABLE ||
2400 (fout->dopt->binary_upgrade && fout->remoteVersion < 120000 &&
2401 tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId))
2402 {
2403 /* Temporary allows to access to foreign tables to dump data */
2404 if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2406
2407 appendPQExpBufferStr(q, "COPY (SELECT ");
2408 /* klugery to get rid of parens in column list */
2409 if (strlen(column_list) > 2)
2410 {
2412 q->data[q->len - 1] = ' ';
2413 }
2414 else
2415 appendPQExpBufferStr(q, "* ");
2416
2417 appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
2419 tdinfo->filtercond ? tdinfo->filtercond : "");
2420 }
2421 else
2422 {
2423 appendPQExpBuffer(q, "COPY %s %s TO stdout;",
2425 column_list);
2426 }
2428 PQclear(res);
2430
2431 for (;;)
2432 {
2433 ret = PQgetCopyData(conn, &copybuf, 0);
2434
2435 if (ret < 0)
2436 break; /* done or error */
2437
2438 if (copybuf)
2439 {
2440 WriteData(fout, copybuf, ret);
2442 }
2443
2444 /* ----------
2445 * THROTTLE:
2446 *
2447 * There was considerable discussion in late July, 2000 regarding
2448 * slowing down pg_dump when backing up large tables. Users with both
2449 * slow & fast (multi-processor) machines experienced performance
2450 * degradation when doing a backup.
2451 *
2452 * Initial attempts based on sleeping for a number of ms for each ms
2453 * of work were deemed too complex, then a simple 'sleep in each loop'
2454 * implementation was suggested. The latter failed because the loop
2455 * was too tight. Finally, the following was implemented:
2456 *
2457 * If throttle is non-zero, then
2458 * See how long since the last sleep.
2459 * Work out how long to sleep (based on ratio).
2460 * If sleep is more than 100ms, then
2461 * sleep
2462 * reset timer
2463 * EndIf
2464 * EndIf
2465 *
2466 * where the throttle value was the number of ms to sleep per ms of
2467 * work. The calculation was done in each loop.
2468 *
2469 * Most of the hard work is done in the backend, and this solution
2470 * still did not work particularly well: on slow machines, the ratio
2471 * was 50:1, and on medium paced machines, 1:1, and on fast
2472 * multi-processor machines, it had little or no effect, for reasons
2473 * that were unclear.
2474 *
2475 * Further discussion ensued, and the proposal was dropped.
2476 *
2477 * For those people who want this feature, it can be implemented using
2478 * gettimeofday in each loop, calculating the time since last sleep,
2479 * multiplying that by the sleep ratio, then if the result is more
2480 * than a preset 'minimum sleep time' (say 100ms), call the 'select'
2481 * function to sleep for a subsecond period ie.
2482 *
2483 * select(0, NULL, NULL, NULL, &tvi);
2484 *
2485 * This will return after the interval specified in the structure tvi.
2486 * Finally, call gettimeofday again to save the 'last sleep time'.
2487 * ----------
2488 */
2489 }
2490 archprintf(fout, "\\.\n\n\n");
2491
2492 if (ret == -2)
2493 {
2494 /* copy data transfer failed */
2495 pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
2496 pg_log_error_detail("Error message from server: %s", PQerrorMessage(conn));
2497 pg_log_error_detail("Command was: %s", q->data);
2498 exit_nicely(1);
2499 }
2500
2501 /* Check command status and return to normal libpq state */
2502 res = PQgetResult(conn);
2503 if (PQresultStatus(res) != PGRES_COMMAND_OK)
2504 {
2505 pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
2506 pg_log_error_detail("Error message from server: %s", PQerrorMessage(conn));
2507 pg_log_error_detail("Command was: %s", q->data);
2508 exit_nicely(1);
2509 }
2510 PQclear(res);
2511
2512 /* Do this to ensure we've pumped libpq back to idle state */
2513 if (PQgetResult(conn) != NULL)
2514 pg_log_warning("unexpected extra results during COPY of table \"%s\"",
2515 classname);
2516
2518
2519 /* Revert back the setting */
2520 if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2521 set_restrict_relation_kind(fout, "view, foreign-table");
2522
2523 return 1;
2524}
2525
2526/*
2527 * Dump table data using INSERT commands.
2528 *
2529 * Caution: when we restore from an archive file direct to database, the
2530 * INSERT commands emitted by this function have to be parsed by
2531 * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
2532 * E'' strings, or dollar-quoted strings. So don't emit anything like that.
2533 */
2534static int
2536{
2537 const TableDataInfo *tdinfo = dcontext;
2538 const TableInfo *tbinfo = tdinfo->tdtable;
2539 DumpOptions *dopt = fout->dopt;
2542 char *attgenerated;
2543 PGresult *res;
2544 int nfields,
2545 i;
2546 int rows_per_statement = dopt->dump_inserts;
2547 int rows_this_statement = 0;
2548
2549 /* Temporary allows to access to foreign tables to dump data */
2550 if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2552
2553 /*
2554 * If we're going to emit INSERTs with column names, the most efficient
2555 * way to deal with generated columns is to exclude them entirely. For
2556 * INSERTs without column names, we have to emit DEFAULT rather than the
2557 * actual column value --- but we can save a few cycles by fetching nulls
2558 * rather than the uninteresting-to-us value.
2559 */
2560 attgenerated = pg_malloc_array(char, tbinfo->numatts);
2561 appendPQExpBufferStr(q, "DECLARE _pg_dump_cursor CURSOR FOR SELECT ");
2562 nfields = 0;
2563 for (i = 0; i < tbinfo->numatts; i++)
2564 {
2565 if (tbinfo->attisdropped[i])
2566 continue;
2567 if (tbinfo->attgenerated[i] && dopt->column_inserts)
2568 continue;
2569 if (nfields > 0)
2570 appendPQExpBufferStr(q, ", ");
2571 if (tbinfo->attgenerated[i])
2572 appendPQExpBufferStr(q, "NULL");
2573 else
2574 appendPQExpBufferStr(q, fmtId(tbinfo->attnames[i]));
2575 attgenerated[nfields] = tbinfo->attgenerated[i];
2576 nfields++;
2577 }
2578 /* Servers before 9.4 will complain about zero-column SELECT */
2579 if (nfields == 0)
2580 appendPQExpBufferStr(q, "NULL");
2581 appendPQExpBuffer(q, " FROM ONLY %s",
2583 if (tdinfo->filtercond)
2584 appendPQExpBuffer(q, " %s", tdinfo->filtercond);
2585
2587
2588 while (1)
2589 {
2590 res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
2592
2593 /* cross-check field count, allowing for dummy NULL if any */
2594 if (nfields != PQnfields(res) &&
2595 !(nfields == 0 && PQnfields(res) == 1))
2596 pg_fatal("wrong number of fields retrieved from table \"%s\"",
2597 tbinfo->dobj.name);
2598
2599 /*
2600 * First time through, we build as much of the INSERT statement as
2601 * possible in "insertStmt", which we can then just print for each
2602 * statement. If the table happens to have zero dumpable columns then
2603 * this will be a complete statement, otherwise it will end in
2604 * "VALUES" and be ready to have the row's column values printed.
2605 */
2606 if (insertStmt == NULL)
2607 {
2608 const TableInfo *targettab;
2609
2611
2612 /*
2613 * When load-via-partition-root is set or forced, get the root
2614 * table name for the partition table, so that we can reload data
2615 * through the root table.
2616 */
2617 if (tbinfo->ispartition &&
2618 (dopt->load_via_partition_root ||
2621 else
2622 targettab = tbinfo;
2623
2624 appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
2626
2627 /* corner case for zero-column table */
2628 if (nfields == 0)
2629 {
2630 appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
2631 }
2632 else
2633 {
2634 /* append the list of column names if required */
2635 if (dopt->column_inserts)
2636 {
2638 for (int field = 0; field < nfields; field++)
2639 {
2640 if (field > 0)
2643 fmtId(PQfname(res, field)));
2644 }
2646 }
2647
2648 if (tbinfo->needs_override)
2649 appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
2650
2652 }
2653 }
2654
2655 for (int tuple = 0; tuple < PQntuples(res); tuple++)
2656 {
2657 /* Write the INSERT if not in the middle of a multi-row INSERT. */
2658 if (rows_this_statement == 0)
2659 archputs(insertStmt->data, fout);
2660
2661 /*
2662 * If it is zero-column table then we've already written the
2663 * complete statement, which will mean we've disobeyed
2664 * --rows-per-insert when it's set greater than 1. We do support
2665 * a way to make this multi-row with: SELECT UNION ALL SELECT
2666 * UNION ALL ... but that's non-standard so we should avoid it
2667 * given that using INSERTs is mostly only ever needed for
2668 * cross-database exports.
2669 */
2670 if (nfields == 0)
2671 continue;
2672
2673 /* Emit a row heading */
2674 if (rows_per_statement == 1)
2675 archputs(" (", fout);
2676 else if (rows_this_statement > 0)
2677 archputs(",\n\t(", fout);
2678 else
2679 archputs("\n\t(", fout);
2680
2681 for (int field = 0; field < nfields; field++)
2682 {
2683 if (field > 0)
2684 archputs(", ", fout);
2685 if (attgenerated[field])
2686 {
2687 archputs("DEFAULT", fout);
2688 continue;
2689 }
2690 if (PQgetisnull(res, tuple, field))
2691 {
2692 archputs("NULL", fout);
2693 continue;
2694 }
2695
2696 /* XXX This code is partially duplicated in ruleutils.c */
2697 switch (PQftype(res, field))
2698 {
2699 case INT2OID:
2700 case INT4OID:
2701 case INT8OID:
2702 case OIDOID:
2703 case FLOAT4OID:
2704 case FLOAT8OID:
2705 case NUMERICOID:
2706 {
2707 /*
2708 * These types are printed without quotes unless
2709 * they contain values that aren't accepted by the
2710 * scanner unquoted (e.g., 'NaN'). Note that
2711 * strtod() and friends might accept NaN, so we
2712 * can't use that to test.
2713 *
2714 * In reality we only need to defend against
2715 * infinity and NaN, so we need not get too crazy
2716 * about pattern matching here.
2717 */
2718 const char *s = PQgetvalue(res, tuple, field);
2719
2720 if (strspn(s, "0123456789 +-eE.") == strlen(s))
2721 archputs(s, fout);
2722 else
2723 archprintf(fout, "'%s'", s);
2724 }
2725 break;
2726
2727 case BITOID:
2728 case VARBITOID:
2729 archprintf(fout, "B'%s'",
2730 PQgetvalue(res, tuple, field));
2731 break;
2732
2733 case BOOLOID:
2734 if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2735 archputs("true", fout);
2736 else
2737 archputs("false", fout);
2738 break;
2739
2740 default:
2741 /* All other types are printed as string literals. */
2744 PQgetvalue(res, tuple, field),
2745 fout);
2746 archputs(q->data, fout);
2747 break;
2748 }
2749 }
2750
2751 /* Terminate the row ... */
2752 archputs(")", fout);
2753
2754 /* ... and the statement, if the target no. of rows is reached */
2756 {
2757 if (dopt->do_nothing)
2758 archputs(" ON CONFLICT DO NOTHING;\n", fout);
2759 else
2760 archputs(";\n", fout);
2761 /* Reset the row counter */
2763 }
2764 }
2765
2766 if (PQntuples(res) <= 0)
2767 {
2768 PQclear(res);
2769 break;
2770 }
2771 PQclear(res);
2772 }
2773
2774 /* Terminate any statements that didn't make the row count. */
2775 if (rows_this_statement > 0)
2776 {
2777 if (dopt->do_nothing)
2778 archputs(" ON CONFLICT DO NOTHING;\n", fout);
2779 else
2780 archputs(";\n", fout);
2781 }
2782
2783 archputs("\n\n", fout);
2784
2785 ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2786
2788 if (insertStmt != NULL)
2790 free(attgenerated);
2791
2792 /* Revert back the setting */
2793 if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2794 set_restrict_relation_kind(fout, "view, foreign-table");
2795
2796 return 1;
2797}
2798
2799/*
2800 * getRootTableInfo:
2801 * get the root TableInfo for the given partition table.
2802 */
2803static TableInfo *
2805{
2807
2808 Assert(tbinfo->ispartition);
2809 Assert(tbinfo->numParents == 1);
2810
2811 parentTbinfo = tbinfo->parents[0];
2812 while (parentTbinfo->ispartition)
2813 {
2814 Assert(parentTbinfo->numParents == 1);
2815 parentTbinfo = parentTbinfo->parents[0];
2816 }
2817
2818 return parentTbinfo;
2819}
2820
2821/*
2822 * forcePartitionRootLoad
2823 * Check if we must force load_via_partition_root for this partition.
2824 *
2825 * This is required if any level of ancestral partitioned table has an
2826 * unsafe partitioning scheme.
2827 */
2828static bool
2830{
2832
2833 Assert(tbinfo->ispartition);
2834 Assert(tbinfo->numParents == 1);
2835
2836 parentTbinfo = tbinfo->parents[0];
2837 if (parentTbinfo->unsafe_partitions)
2838 return true;
2839 while (parentTbinfo->ispartition)
2840 {
2841 Assert(parentTbinfo->numParents == 1);
2842 parentTbinfo = parentTbinfo->parents[0];
2843 if (parentTbinfo->unsafe_partitions)
2844 return true;
2845 }
2846
2847 return false;
2848}
2849
2850/*
2851 * dumpTableData -
2852 * dump the contents of a single table
2853 *
2854 * Actually, this just makes an ArchiveEntry for the table contents.
2855 */
2856static void
2858{
2859 DumpOptions *dopt = fout->dopt;
2860 const TableInfo *tbinfo = tdinfo->tdtable;
2863 DataDumperPtr dumpFn;
2864 char *tdDefn = NULL;
2865 char *copyStmt;
2866 const char *copyFrom;
2867
2868 /* We had better have loaded per-column details about this table */
2869 Assert(tbinfo->interesting);
2870
2871 /*
2872 * When load-via-partition-root is set or forced, get the root table name
2873 * for the partition table, so that we can reload data through the root
2874 * table. Then construct a comment to be inserted into the TOC entry's
2875 * defn field, so that such cases can be identified reliably.
2876 */
2877 if (tbinfo->ispartition &&
2878 (dopt->load_via_partition_root ||
2880 {
2881 const TableInfo *parentTbinfo;
2882 char *sanitized;
2883
2887 printfPQExpBuffer(copyBuf, "-- load via partition root %s",
2888 sanitized);
2889 free(sanitized);
2890 tdDefn = pg_strdup(copyBuf->data);
2891 }
2892 else
2894
2895 if (dopt->dump_inserts == 0)
2896 {
2897 /* Dump/restore using COPY */
2898 dumpFn = dumpTableData_copy;
2899 /* must use 2 steps here 'cause fmtId is nonreentrant */
2900 printfPQExpBuffer(copyBuf, "COPY %s ",
2901 copyFrom);
2902 appendPQExpBuffer(copyBuf, "%s FROM stdin;\n",
2904 copyStmt = copyBuf->data;
2905 }
2906 else
2907 {
2908 /* Restore using INSERT */
2909 dumpFn = dumpTableData_insert;
2910 copyStmt = NULL;
2911 }
2912
2913 /*
2914 * Note: although the TableDataInfo is a full DumpableObject, we treat its
2915 * dependency on its table as "special" and pass it to ArchiveEntry now.
2916 * See comments for BuildArchiveDependencies.
2917 */
2918 if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2919 {
2920 TocEntry *te;
2921
2922 te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2923 ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2924 .namespace = tbinfo->dobj.namespace->dobj.name,
2925 .owner = tbinfo->rolname,
2926 .description = "TABLE DATA",
2927 .section = SECTION_DATA,
2928 .createStmt = tdDefn,
2929 .copyStmt = copyStmt,
2930 .deps = &(tbinfo->dobj.dumpId),
2931 .nDeps = 1,
2932 .dumpFn = dumpFn,
2933 .dumpArg = tdinfo));
2934
2935 /*
2936 * Set the TocEntry's dataLength in case we are doing a parallel dump
2937 * and want to order dump jobs by table size. We choose to measure
2938 * dataLength in table pages (including TOAST pages) during dump, so
2939 * no scaling is needed.
2940 *
2941 * However, relpages is declared as "integer" in pg_class, and hence
2942 * also in TableInfo, but it's really BlockNumber a/k/a unsigned int.
2943 * Cast so that we get the right interpretation of table sizes
2944 * exceeding INT_MAX pages.
2945 */
2946 te->dataLength = (BlockNumber) tbinfo->relpages;
2947 te->dataLength += (BlockNumber) tbinfo->toastpages;
2948
2949 /*
2950 * If pgoff_t is only 32 bits wide, the above refinement is useless,
2951 * and instead we'd better worry about integer overflow. Clamp to
2952 * INT_MAX if the correct result exceeds that.
2953 */
2954 if (sizeof(te->dataLength) == 4 &&
2955 (tbinfo->relpages < 0 || tbinfo->toastpages < 0 ||
2956 te->dataLength < 0))
2957 te->dataLength = INT_MAX;
2958 }
2959
2962}
2963
2964/*
2965 * refreshMatViewData -
2966 * load or refresh the contents of a single materialized view
2967 *
2968 * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2969 * statement.
2970 */
2971static void
2973{
2974 TableInfo *tbinfo = tdinfo->tdtable;
2975 PQExpBuffer q;
2976
2977 /* If the materialized view is not flagged as populated, skip this. */
2978 if (!tbinfo->relispopulated)
2979 return;
2980
2981 q = createPQExpBuffer();
2982
2983 appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2985
2986 if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2988 tdinfo->dobj.catId, /* catalog ID */
2989 tdinfo->dobj.dumpId, /* dump ID */
2990 ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2991 .namespace = tbinfo->dobj.namespace->dobj.name,
2992 .owner = tbinfo->rolname,
2993 .description = "MATERIALIZED VIEW DATA",
2994 .section = SECTION_POST_DATA,
2995 .createStmt = q->data,
2996 .deps = tdinfo->dobj.dependencies,
2997 .nDeps = tdinfo->dobj.nDeps));
2998
3000}
3001
3002/*
3003 * getTableData -
3004 * set up dumpable objects representing the contents of tables
3005 */
3006static void
3007getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind)
3008{
3009 int i;
3010
3011 for (i = 0; i < numTables; i++)
3012 {
3013 if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
3014 (!relkind || tblinfo[i].relkind == relkind))
3015 makeTableDataInfo(dopt, &(tblinfo[i]));
3016 }
3017}
3018
3019/*
3020 * Make a dumpable object for the data of this specific table
3021 *
3022 * Note: we make a TableDataInfo if and only if we are going to dump the
3023 * table data; the "dump" field in such objects isn't very interesting.
3024 */
3025static void
3027{
3029
3030 /*
3031 * Nothing to do if we already decided to dump the table. This will
3032 * happen for "config" tables.
3033 */
3034 if (tbinfo->dataObj != NULL)
3035 return;
3036
3037 /* Skip VIEWs (no data to dump) */
3038 if (tbinfo->relkind == RELKIND_VIEW)
3039 return;
3040 /* Skip FOREIGN TABLEs (no data to dump) unless requested explicitly */
3041 if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
3044 tbinfo->foreign_server)))
3045 return;
3046 /* Skip partitioned tables (data in partitions) */
3047 if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
3048 return;
3049
3050 /* Don't dump data in unlogged tables, if so requested */
3051 if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
3053 return;
3054
3055 /* Check that the data is not explicitly excluded */
3057 tbinfo->dobj.catId.oid))
3058 return;
3059
3060 /* OK, let's dump it */
3062
3063 if (tbinfo->relkind == RELKIND_MATVIEW)
3064 tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
3065 else if (tbinfo->relkind == RELKIND_SEQUENCE)
3066 tdinfo->dobj.objType = DO_SEQUENCE_SET;
3067 else
3068 tdinfo->dobj.objType = DO_TABLE_DATA;
3069
3070 /*
3071 * Note: use tableoid 0 so that this object won't be mistaken for
3072 * something that pg_depend entries apply to.
3073 */
3074 tdinfo->dobj.catId.tableoid = 0;
3075 tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3076 AssignDumpId(&tdinfo->dobj);
3077 tdinfo->dobj.name = tbinfo->dobj.name;
3078 tdinfo->dobj.namespace = tbinfo->dobj.namespace;
3079 tdinfo->tdtable = tbinfo;
3080 tdinfo->filtercond = NULL; /* might get set later */
3081 addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
3082
3083 /* A TableDataInfo contains data, of course */
3084 tdinfo->dobj.components |= DUMP_COMPONENT_DATA;
3085
3086 tbinfo->dataObj = tdinfo;
3087
3088 /*
3089 * Materialized view statistics must be restored after the data, because
3090 * REFRESH MATERIALIZED VIEW replaces the storage and resets the stats.
3091 *
3092 * The dependency is added here because the statistics objects are created
3093 * first.
3094 */
3095 if (tbinfo->relkind == RELKIND_MATVIEW && tbinfo->stats != NULL)
3096 {
3097 tbinfo->stats->section = SECTION_POST_DATA;
3098 addObjectDependency(&tbinfo->stats->dobj, tdinfo->dobj.dumpId);
3099 }
3100
3101 /* Make sure that we'll collect per-column info for this table. */
3102 tbinfo->interesting = true;
3103}
3104
3105/*
3106 * The refresh for a materialized view must be dependent on the refresh for
3107 * any materialized view that this one is dependent on.
3108 *
3109 * This must be called after all the objects are created, but before they are
3110 * sorted.
3111 */
3112static void
3114{
3115 PQExpBuffer query;
3116 PGresult *res;
3117 int ntups,
3118 i;
3119 int i_classid,
3120 i_objid,
3121 i_refobjid;
3122
3123 /* No Mat Views before 9.3. */
3124 if (fout->remoteVersion < 90300)
3125 return;
3126
3127 query = createPQExpBuffer();
3128
3129 appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
3130 "( "
3131 "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
3132 "FROM pg_depend d1 "
3133 "JOIN pg_class c1 ON c1.oid = d1.objid "
3134 "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
3135 " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
3136 "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
3137 "AND d2.objid = r1.oid "
3138 "AND d2.refobjid <> d1.objid "
3139 "JOIN pg_class c2 ON c2.oid = d2.refobjid "
3140 "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
3142 "WHERE d1.classid = 'pg_class'::regclass "
3143 "UNION "
3144 "SELECT w.objid, d3.refobjid, c3.relkind "
3145 "FROM w "
3146 "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
3147 "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
3148 "AND d3.objid = r3.oid "
3149 "AND d3.refobjid <> w.refobjid "
3150 "JOIN pg_class c3 ON c3.oid = d3.refobjid "
3151 "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
3153 ") "
3154 "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
3155 "FROM w "
3156 "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
3157
3158 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3159
3160 ntups = PQntuples(res);
3161
3162 i_classid = PQfnumber(res, "classid");
3163 i_objid = PQfnumber(res, "objid");
3164 i_refobjid = PQfnumber(res, "refobjid");
3165
3166 for (i = 0; i < ntups; i++)
3167 {
3168 CatalogId objId;
3170 DumpableObject *dobj;
3174
3175 objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
3176 objId.oid = atooid(PQgetvalue(res, i, i_objid));
3177 refobjId.tableoid = objId.tableoid;
3178 refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
3179
3180 dobj = findObjectByCatalogId(objId);
3181 if (dobj == NULL)
3182 continue;
3183
3184 Assert(dobj->objType == DO_TABLE);
3185 tbinfo = (TableInfo *) dobj;
3186 Assert(tbinfo->relkind == RELKIND_MATVIEW);
3187 dobj = (DumpableObject *) tbinfo->dataObj;
3188 if (dobj == NULL)
3189 continue;
3191
3193 if (refdobj == NULL)
3194 continue;
3195
3196 Assert(refdobj->objType == DO_TABLE);
3198 Assert(reftbinfo->relkind == RELKIND_MATVIEW);
3199 refdobj = (DumpableObject *) reftbinfo->dataObj;
3200 if (refdobj == NULL)
3201 continue;
3202 Assert(refdobj->objType == DO_REFRESH_MATVIEW);
3203
3205
3206 if (!reftbinfo->relispopulated)
3207 tbinfo->relispopulated = false;
3208 }
3209
3210 PQclear(res);
3211
3212 destroyPQExpBuffer(query);
3213}
3214
3215/*
3216 * getTableDataFKConstraints -
3217 * add dump-order dependencies reflecting foreign key constraints
3218 *
3219 * This code is executed only in a data-only dump --- in schema+data dumps
3220 * we handle foreign key issues by not creating the FK constraints until
3221 * after the data is loaded. In a data-only dump, however, we want to
3222 * order the table data objects in such a way that a table's referenced
3223 * tables are restored first. (In the presence of circular references or
3224 * self-references this may be impossible; we'll detect and complain about
3225 * that during the dependency sorting step.)
3226 */
3227static void
3229{
3231 int numObjs;
3232 int i;
3233
3234 /* Search through all the dumpable objects for FK constraints */
3236 for (i = 0; i < numObjs; i++)
3237 {
3238 if (dobjs[i]->objType == DO_FK_CONSTRAINT)
3239 {
3242
3243 /* Not interesting unless both tables are to be dumped */
3244 if (cinfo->contable == NULL ||
3245 cinfo->contable->dataObj == NULL)
3246 continue;
3247 ftable = findTableByOid(cinfo->confrelid);
3248 if (ftable == NULL ||
3249 ftable->dataObj == NULL)
3250 continue;
3251
3252 /*
3253 * Okay, make referencing table's TABLE_DATA object depend on the
3254 * referenced table's TABLE_DATA object.
3255 */
3256 addObjectDependency(&cinfo->contable->dataObj->dobj,
3257 ftable->dataObj->dobj.dumpId);
3258 }
3259 }
3260 free(dobjs);
3261}
3262
3263
3264/*
3265 * dumpDatabase:
3266 * dump the database definition
3267 */
3268static void
3270{
3271 DumpOptions *dopt = fout->dopt;
3277 PGresult *res;
3278 int i_tableoid,
3279 i_oid,
3280 i_datname,
3281 i_datdba,
3282 i_encoding,
3284 i_collate,
3285 i_ctype,
3289 i_minmxid,
3290 i_datacl,
3299 const char *datname,
3300 *dba,
3301 *encoding,
3303 *collate,
3304 *ctype,
3305 *locale,
3306 *icurules,
3308 *datconnlimit,
3309 *tablespace;
3310 uint32 frozenxid,
3311 minmxid;
3312 char *qdatname;
3313
3314 pg_log_info("saving database definition");
3315
3316 /*
3317 * Fetch the database-level properties for this database.
3318 */
3319 appendPQExpBufferStr(dbQry, "SELECT tableoid, oid, datname, "
3320 "datdba, "
3321 "pg_encoding_to_char(encoding) AS encoding, "
3322 "datcollate, datctype, datfrozenxid, "
3323 "datacl, acldefault('d', datdba) AS acldefault, "
3324 "datistemplate, datconnlimit, ");
3325 if (fout->remoteVersion >= 90300)
3326 appendPQExpBufferStr(dbQry, "datminmxid, ");
3327 else
3328 appendPQExpBufferStr(dbQry, "0 AS datminmxid, ");
3329 if (fout->remoteVersion >= 170000)
3330 appendPQExpBufferStr(dbQry, "datlocprovider, datlocale, datcollversion, ");
3331 else if (fout->remoteVersion >= 150000)
3332 appendPQExpBufferStr(dbQry, "datlocprovider, daticulocale AS datlocale, datcollversion, ");
3333 else
3334 appendPQExpBufferStr(dbQry, "'c' AS datlocprovider, NULL AS datlocale, NULL AS datcollversion, ");
3335 if (fout->remoteVersion >= 160000)
3336 appendPQExpBufferStr(dbQry, "daticurules, ");
3337 else
3338 appendPQExpBufferStr(dbQry, "NULL AS daticurules, ");
3340 "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
3341 "shobj_description(oid, 'pg_database') AS description "
3342 "FROM pg_database "
3343 "WHERE datname = current_database()");
3344
3346
3347 i_tableoid = PQfnumber(res, "tableoid");
3348 i_oid = PQfnumber(res, "oid");
3349 i_datname = PQfnumber(res, "datname");
3350 i_datdba = PQfnumber(res, "datdba");
3351 i_encoding = PQfnumber(res, "encoding");
3352 i_datlocprovider = PQfnumber(res, "datlocprovider");
3353 i_collate = PQfnumber(res, "datcollate");
3354 i_ctype = PQfnumber(res, "datctype");
3355 i_datlocale = PQfnumber(res, "datlocale");
3356 i_daticurules = PQfnumber(res, "daticurules");
3357 i_frozenxid = PQfnumber(res, "datfrozenxid");
3358 i_minmxid = PQfnumber(res, "datminmxid");
3359 i_datacl = PQfnumber(res, "datacl");
3360 i_acldefault = PQfnumber(res, "acldefault");
3361 i_datistemplate = PQfnumber(res, "datistemplate");
3362 i_datconnlimit = PQfnumber(res, "datconnlimit");
3363 i_datcollversion = PQfnumber(res, "datcollversion");
3364 i_tablespace = PQfnumber(res, "tablespace");
3365
3366 dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
3367 dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
3368 datname = PQgetvalue(res, 0, i_datname);
3369 dba = getRoleName(PQgetvalue(res, 0, i_datdba));
3370 encoding = PQgetvalue(res, 0, i_encoding);
3372 collate = PQgetvalue(res, 0, i_collate);
3373 ctype = PQgetvalue(res, 0, i_ctype);
3374 if (!PQgetisnull(res, 0, i_datlocale))
3375 locale = PQgetvalue(res, 0, i_datlocale);
3376 else
3377 locale = NULL;
3378 if (!PQgetisnull(res, 0, i_daticurules))
3380 else
3381 icurules = NULL;
3382 frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
3383 minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
3384 dbdacl.acl = PQgetvalue(res, 0, i_datacl);
3385 dbdacl.acldefault = PQgetvalue(res, 0, i_acldefault);
3389
3391
3392 /*
3393 * Prepare the CREATE DATABASE command. We must specify OID (if we want
3394 * to preserve that), as well as the encoding, locale, and tablespace
3395 * since those can't be altered later. Other DB properties are left to
3396 * the DATABASE PROPERTIES entry, so that they can be applied after
3397 * reconnecting to the target DB.
3398 *
3399 * For binary upgrade, we use the FILE_COPY strategy because testing has
3400 * shown it to be faster. When the server is in binary upgrade mode, it
3401 * will also skip the checkpoints this strategy ordinarily performs.
3402 */
3403 if (dopt->binary_upgrade)
3404 {
3406 "CREATE DATABASE %s WITH TEMPLATE = template0 "
3407 "OID = %u STRATEGY = FILE_COPY",
3408 qdatname, dbCatId.oid);
3409 }
3410 else
3411 {
3412 appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
3413 qdatname);
3414 }
3415 if (strlen(encoding) > 0)
3416 {
3417 appendPQExpBufferStr(creaQry, " ENCODING = ");
3419 }
3420
3421 appendPQExpBufferStr(creaQry, " LOCALE_PROVIDER = ");
3422 if (datlocprovider[0] == 'b')
3423 appendPQExpBufferStr(creaQry, "builtin");
3424 else if (datlocprovider[0] == 'c')
3426 else if (datlocprovider[0] == 'i')
3428 else
3429 pg_fatal("unrecognized locale provider: %s",
3431
3432 if (strlen(collate) > 0 && strcmp(collate, ctype) == 0)
3433 {
3434 appendPQExpBufferStr(creaQry, " LOCALE = ");
3436 }
3437 else
3438 {
3439 if (strlen(collate) > 0)
3440 {
3441 appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
3443 }
3444 if (strlen(ctype) > 0)
3445 {
3446 appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
3448 }
3449 }
3450 if (locale)
3451 {
3452 if (datlocprovider[0] == 'b')
3453 appendPQExpBufferStr(creaQry, " BUILTIN_LOCALE = ");
3454 else
3455 appendPQExpBufferStr(creaQry, " ICU_LOCALE = ");
3456
3458 }
3459
3460 if (icurules)
3461 {
3462 appendPQExpBufferStr(creaQry, " ICU_RULES = ");
3464 }
3465
3466 /*
3467 * For binary upgrade, carry over the collation version. For normal
3468 * dump/restore, omit the version, so that it is computed upon restore.
3469 */
3470 if (dopt->binary_upgrade)
3471 {
3472 if (!PQgetisnull(res, 0, i_datcollversion))
3473 {
3474 appendPQExpBufferStr(creaQry, " COLLATION_VERSION = ");
3477 fout);
3478 }
3479 }
3480
3481 /*
3482 * Note: looking at dopt->outputNoTablespaces here is completely the wrong
3483 * thing; the decision whether to specify a tablespace should be left till
3484 * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
3485 * label the DATABASE entry with the tablespace and let the normal
3486 * tablespace selection logic work ... but CREATE DATABASE doesn't pay
3487 * attention to default_tablespace, so that won't work.
3488 */
3489 if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
3490 !dopt->outputNoTablespaces)
3491 appendPQExpBuffer(creaQry, " TABLESPACE = %s",
3492 fmtId(tablespace));
3494
3495 appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
3496 qdatname);
3497
3499
3501 dbCatId, /* catalog ID */
3502 dbDumpId, /* dump ID */
3503 ARCHIVE_OPTS(.tag = datname,
3504 .owner = dba,
3505 .description = "DATABASE",
3506 .section = SECTION_PRE_DATA,
3507 .createStmt = creaQry->data,
3508 .dropStmt = delQry->data));
3509
3510 /* Compute correct tag for archive entry */
3511 appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
3512
3513 /* Dump DB comment if any */
3514 {
3515 /*
3516 * 8.2 and up keep comments on shared objects in a shared table, so we
3517 * cannot use the dumpComment() code used for other database objects.
3518 * Be careful that the ArchiveEntry parameters match that function.
3519 */
3520 char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
3521
3522 if (comment && *comment && !dopt->no_comments)
3523 {
3525
3526 /*
3527 * Generates warning when loaded into a differently-named
3528 * database.
3529 */
3530 appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
3533
3535 ARCHIVE_OPTS(.tag = labelq->data,
3536 .owner = dba,
3537 .description = "COMMENT",
3538 .section = SECTION_NONE,
3539 .createStmt = dbQry->data,
3540 .deps = &dbDumpId,
3541 .nDeps = 1));
3542 }
3543 }
3544
3545 /* Dump DB security label, if enabled */
3546 if (!dopt->no_security_labels)
3547 {
3548 PGresult *shres;
3550
3552
3553 buildShSecLabelQuery("pg_database", dbCatId.oid, seclabelQry);
3557 if (seclabelQry->len > 0)
3559 ARCHIVE_OPTS(.tag = labelq->data,
3560 .owner = dba,
3561 .description = "SECURITY LABEL",
3562 .section = SECTION_NONE,
3563 .createStmt = seclabelQry->data,
3564 .deps = &dbDumpId,
3565 .nDeps = 1));
3567 PQclear(shres);
3568 }
3569
3570 /*
3571 * Dump ACL if any. Note that we do not support initial privileges
3572 * (pg_init_privs) on databases.
3573 */
3574 dbdacl.privtype = 0;
3575 dbdacl.initprivs = NULL;
3576
3577 dumpACL(fout, dbDumpId, InvalidDumpId, "DATABASE",
3578 qdatname, NULL, NULL,
3579 NULL, dba, &dbdacl);
3580
3581 /*
3582 * Now construct a DATABASE PROPERTIES archive entry to restore any
3583 * non-default database-level properties. (The reason this must be
3584 * separate is that we cannot put any additional commands into the TOC
3585 * entry that has CREATE DATABASE. pg_restore would execute such a group
3586 * in an implicit transaction block, and the backend won't allow CREATE
3587 * DATABASE in that context.)
3588 */
3591
3592 if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
3593 appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
3595
3596 if (strcmp(datistemplate, "t") == 0)
3597 {
3598 appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
3599 qdatname);
3600
3601 /*
3602 * The backend won't accept DROP DATABASE on a template database. We
3603 * can deal with that by removing the template marking before the DROP
3604 * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
3605 * since no such command is currently supported, fake it with a direct
3606 * UPDATE on pg_database.
3607 */
3608 appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
3609 "SET datistemplate = false WHERE datname = ");
3612 }
3613
3614 /*
3615 * We do not restore pg_database.dathasloginevt because it is set
3616 * automatically on login event trigger creation.
3617 */
3618
3619 /* Add database-specific SET options */
3621
3622 /*
3623 * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
3624 * entry, too, for lack of a better place.
3625 */
3626 if (dopt->binary_upgrade)
3627 {
3628 appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
3629 appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
3630 "SET datfrozenxid = '%u', datminmxid = '%u'\n"
3631 "WHERE datname = ",
3632 frozenxid, minmxid);
3635 }
3636
3637 if (creaQry->len > 0)
3639 ARCHIVE_OPTS(.tag = datname,
3640 .owner = dba,
3641 .description = "DATABASE PROPERTIES",
3642 .section = SECTION_PRE_DATA,
3643 .createStmt = creaQry->data,
3644 .dropStmt = delQry->data,
3645 .deps = &dbDumpId));
3646
3647 /*
3648 * pg_largeobject comes from the old system intact, so set its
3649 * relfrozenxids, relminmxids and relfilenode.
3650 *
3651 * pg_largeobject_metadata also comes from the old system intact for
3652 * upgrades from v16 and newer, so set its relfrozenxids, relminmxids, and
3653 * relfilenode, too. pg_upgrade can't copy/link the files from older
3654 * versions because aclitem (needed by pg_largeobject_metadata.lomacl)
3655 * changed its storage format in v16.
3656 */
3657 if (dopt->binary_upgrade)
3658 {
3665 int ii_relfrozenxid,
3667 ii_oid,
3669
3670 if (fout->remoteVersion >= 90300)
3671 appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid, relfilenode, oid\n"
3672 "FROM pg_catalog.pg_class\n"
3673 "WHERE oid IN (%u, %u, %u, %u);\n",
3676 else
3677 appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid, relfilenode, oid\n"
3678 "FROM pg_catalog.pg_class\n"
3679 "WHERE oid IN (%u, %u);\n",
3681
3683
3684 ii_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
3685 ii_relminmxid = PQfnumber(lo_res, "relminmxid");
3686 ii_relfilenode = PQfnumber(lo_res, "relfilenode");
3687 ii_oid = PQfnumber(lo_res, "oid");
3688
3689 appendPQExpBufferStr(loHorizonQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
3690 appendPQExpBufferStr(lomHorizonQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
3691 appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n");
3692 appendPQExpBufferStr(lomOutQry, "\n-- For binary upgrade, preserve pg_largeobject_metadata and index relfilenodes\n");
3693 for (int i = 0; i < PQntuples(lo_res); ++i)
3694 {
3695 Oid oid;
3696 RelFileNumber relfilenumber;
3699
3700 oid = atooid(PQgetvalue(lo_res, i, ii_oid));
3701 relfilenumber = atooid(PQgetvalue(lo_res, i, ii_relfilenode));
3702
3703 if (oid == LargeObjectRelationId ||
3705 {
3707 outQry = loOutQry;
3708 }
3709 else
3710 {
3712 outQry = lomOutQry;
3713 }
3714
3715 appendPQExpBuffer(horizonQry, "UPDATE pg_catalog.pg_class\n"
3716 "SET relfrozenxid = '%u', relminmxid = '%u'\n"
3717 "WHERE oid = %u;\n",
3721
3722 if (oid == LargeObjectRelationId ||
3725 "SELECT pg_catalog.binary_upgrade_set_next_heap_relfilenode('%u'::pg_catalog.oid);\n",
3726 relfilenumber);
3727 else if (oid == LargeObjectLOidPNIndexId ||
3730 "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
3731 relfilenumber);
3732 }
3733
3735 "TRUNCATE pg_catalog.pg_largeobject;\n");
3737 "TRUNCATE pg_catalog.pg_largeobject_metadata;\n");
3738
3741
3743 ARCHIVE_OPTS(.tag = "pg_largeobject",
3744 .description = "pg_largeobject",
3745 .section = SECTION_PRE_DATA,
3746 .createStmt = loOutQry->data));
3747
3748 if (fout->remoteVersion >= 160000)
3750 ARCHIVE_OPTS(.tag = "pg_largeobject_metadata",
3751 .description = "pg_largeobject_metadata",
3752 .section = SECTION_PRE_DATA,
3753 .createStmt = lomOutQry->data));
3754
3755 PQclear(lo_res);
3756
3762 }
3763
3764 PQclear(res);
3765
3766 free(qdatname);
3771}
3772
3773/*
3774 * Collect any database-specific or role-and-database-specific SET options
3775 * for this database, and append them to outbuf.
3776 */
3777static void
3779 const char *dbname, Oid dboid)
3780{
3781 PGconn *conn = GetConnection(AH);
3783 PGresult *res;
3784
3785 /* First collect database-specific options */
3786 printfPQExpBuffer(buf, "SELECT unnest(setconfig) FROM pg_db_role_setting "
3787 "WHERE setrole = 0 AND setdatabase = '%u'::oid",
3788 dboid);
3789
3790 res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3791
3792 for (int i = 0; i < PQntuples(res); i++)
3794 "DATABASE", dbname, NULL, NULL,
3795 outbuf);
3796
3797 PQclear(res);
3798
3799 /* Now look for role-and-database-specific options */
3800 printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
3801 "FROM pg_db_role_setting s, pg_roles r "
3802 "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
3803 dboid);
3804
3805 res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3806
3807 for (int i = 0; i < PQntuples(res); i++)
3809 "ROLE", PQgetvalue(res, i, 0),
3810 "DATABASE", dbname,
3811 outbuf);
3812
3813 PQclear(res);
3814
3816}
3817
3818/*
3819 * dumpEncoding: put the correct encoding into the archive
3820 */
3821static void
3823{
3824 const char *encname = pg_encoding_to_char(AH->encoding);
3826
3827 pg_log_info("saving encoding = %s", encname);
3828
3829 appendPQExpBufferStr(qry, "SET client_encoding = ");
3831 appendPQExpBufferStr(qry, ";\n");
3832
3834 ARCHIVE_OPTS(.tag = "ENCODING",
3835 .description = "ENCODING",
3836 .section = SECTION_PRE_DATA,
3837 .createStmt = qry->data));
3838
3839 destroyPQExpBuffer(qry);
3840}
3841
3842
3843/*
3844 * dumpStdStrings: put the correct escape string behavior into the archive
3845 */
3846static void
3848{
3849 const char *stdstrings = AH->std_strings ? "on" : "off";
3851
3852 pg_log_info("saving \"standard_conforming_strings = %s\"",
3853 stdstrings);
3854
3855 appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3856 stdstrings);
3857
3859 ARCHIVE_OPTS(.tag = "STDSTRINGS",
3860 .description = "STDSTRINGS",
3861 .section = SECTION_PRE_DATA,
3862 .createStmt = qry->data));
3863
3864 destroyPQExpBuffer(qry);
3865}
3866
3867/*
3868 * dumpSearchPath: record the active search_path in the archive
3869 */
3870static void
3872{
3875 PGresult *res;
3876 char **schemanames = NULL;
3877 int nschemanames = 0;
3878 int i;
3879
3880 /*
3881 * We use the result of current_schemas(), not the search_path GUC,
3882 * because that might contain wildcards such as "$user", which won't
3883 * necessarily have the same value during restore. Also, this way avoids
3884 * listing schemas that may appear in search_path but not actually exist,
3885 * which seems like a prudent exclusion.
3886 */
3888 "SELECT pg_catalog.current_schemas(false)");
3889
3890 if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3891 pg_fatal("could not parse result of current_schemas()");
3892
3893 /*
3894 * We use set_config(), not a simple "SET search_path" command, because
3895 * the latter has less-clean behavior if the search path is empty. While
3896 * that's likely to get fixed at some point, it seems like a good idea to
3897 * be as backwards-compatible as possible in what we put into archives.
3898 */
3899 for (i = 0; i < nschemanames; i++)
3900 {
3901 if (i > 0)
3902 appendPQExpBufferStr(path, ", ");
3904 }
3905
3906 appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3907 appendStringLiteralAH(qry, path->data, AH);
3908 appendPQExpBufferStr(qry, ", false);\n");
3909
3910 pg_log_info("saving \"search_path = %s\"", path->data);
3911
3913 ARCHIVE_OPTS(.tag = "SEARCHPATH",
3914 .description = "SEARCHPATH",
3915 .section = SECTION_PRE_DATA,
3916 .createStmt = qry->data));
3917
3918 /* Also save it in AH->searchpath, in case we're doing plain text dump */
3919 AH->searchpath = pg_strdup(qry->data);
3920
3922 PQclear(res);
3923 destroyPQExpBuffer(qry);
3924 destroyPQExpBuffer(path);
3925}
3926
3927
3928/*
3929 * getLOs:
3930 * Collect schema-level data about large objects
3931 */
3932static void
3934{
3935 DumpOptions *dopt = fout->dopt;
3937 PGresult *res;
3938 int ntups;
3939 int i;
3940 int n;
3941 int i_oid;
3942 int i_lomowner;
3943 int i_lomacl;
3944 int i_acldefault;
3945
3946 pg_log_info("reading large objects");
3947
3948 /*
3949 * Fetch LO OIDs and owner/ACL data. Order the data so that all the blobs
3950 * with the same owner/ACL appear together.
3951 */
3953 "SELECT oid, lomowner, lomacl, "
3954 "acldefault('L', lomowner) AS acldefault "
3955 "FROM pg_largeobject_metadata ");
3956
3957 /*
3958 * For binary upgrades, we transfer pg_largeobject_metadata via COPY or by
3959 * copying/linking its files from the old cluster. On such upgrades, we
3960 * only need to consider large objects that have comments or security
3961 * labels, since we still restore those objects via COMMENT/SECURITY LABEL
3962 * commands.
3963 */
3964 if (dopt->binary_upgrade)
3966 "WHERE oid IN "
3967 "(SELECT objoid FROM pg_description "
3968 "WHERE classoid = " CppAsString2(LargeObjectRelationId) " "
3969 "UNION SELECT objoid FROM pg_seclabel "
3970 "WHERE classoid = " CppAsString2(LargeObjectRelationId) ") ");
3971
3973 "ORDER BY lomowner, lomacl::pg_catalog.text, oid");
3974
3976
3977 i_oid = PQfnumber(res, "oid");
3978 i_lomowner = PQfnumber(res, "lomowner");
3979 i_lomacl = PQfnumber(res, "lomacl");
3980 i_acldefault = PQfnumber(res, "acldefault");
3981
3982 ntups = PQntuples(res);
3983
3984 /*
3985 * Group the blobs into suitably-sized groups that have the same owner and
3986 * ACL setting, and build a metadata and a data DumpableObject for each
3987 * group. (If we supported initprivs for blobs, we'd have to insist that
3988 * groups also share initprivs settings, since the DumpableObject only has
3989 * room for one.) i is the index of the first tuple in the current group,
3990 * and n is the number of tuples we include in the group.
3991 */
3992 for (i = 0; i < ntups; i += n)
3993 {
3994 Oid thisoid = atooid(PQgetvalue(res, i, i_oid));
3995 char *thisowner = PQgetvalue(res, i, i_lomowner);
3996 char *thisacl = PQgetvalue(res, i, i_lomacl);
3997 LoInfo *loinfo;
3999 char namebuf[64];
4000
4001 /* Scan to find first tuple not to be included in group */
4002 n = 1;
4003 while (n < MAX_BLOBS_PER_ARCHIVE_ENTRY && i + n < ntups)
4004 {
4005 if (strcmp(thisowner, PQgetvalue(res, i + n, i_lomowner)) != 0 ||
4006 strcmp(thisacl, PQgetvalue(res, i + n, i_lomacl)) != 0)
4007 break;
4008 n++;
4009 }
4010
4011 /* Build the metadata DumpableObject */
4012 loinfo = (LoInfo *) pg_malloc(offsetof(LoInfo, looids) + n * sizeof(Oid));
4013
4015 loinfo->dobj.catId.tableoid = LargeObjectRelationId;
4016 loinfo->dobj.catId.oid = thisoid;
4017 AssignDumpId(&loinfo->dobj);
4018
4019 if (n > 1)
4020 snprintf(namebuf, sizeof(namebuf), "%u..%u", thisoid,
4021 atooid(PQgetvalue(res, i + n - 1, i_oid)));
4022 else
4023 snprintf(namebuf, sizeof(namebuf), "%u", thisoid);
4024 loinfo->dobj.name = pg_strdup(namebuf);
4025 loinfo->dacl.acl = pg_strdup(thisacl);
4026 loinfo->dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
4027 loinfo->dacl.privtype = 0;
4028 loinfo->dacl.initprivs = NULL;
4029 loinfo->rolname = getRoleName(thisowner);
4030 loinfo->numlos = n;
4031 loinfo->looids[0] = thisoid;
4032 /* Collect OIDs of the remaining blobs in this group */
4033 for (int k = 1; k < n; k++)
4034 {
4036
4037 loinfo->looids[k] = atooid(PQgetvalue(res, i + k, i_oid));
4038
4039 /* Make sure we can look up loinfo by any of the blobs' OIDs */
4040 extraID.tableoid = LargeObjectRelationId;
4041 extraID.oid = loinfo->looids[k];
4043 }
4044
4045 /* LOs have data */
4046 loinfo->dobj.components |= DUMP_COMPONENT_DATA;
4047
4048 /* Mark whether LO group has a non-empty ACL */
4049 if (!PQgetisnull(res, i, i_lomacl))
4050 loinfo->dobj.components |= DUMP_COMPONENT_ACL;
4051
4052 /*
4053 * In binary upgrade mode, pg_largeobject and pg_largeobject_metadata
4054 * are transferred via COPY or by copying/linking the files from the
4055 * old cluster. Thus, we do not need to dump LO data, definitions, or
4056 * ACLs.
4057 */
4058 if (dopt->binary_upgrade)
4060
4061 /*
4062 * Create a "BLOBS" data item for the group, too. This is just a
4063 * placeholder for sorting; it carries no data now.
4064 */
4066 lodata->objType = DO_LARGE_OBJECT_DATA;
4067 lodata->catId = nilCatalogId;
4069 lodata->name = pg_strdup(namebuf);
4070 lodata->components |= DUMP_COMPONENT_DATA;
4071 /* Set up explicit dependency from data to metadata */
4072 lodata->dependencies = pg_malloc_object(DumpId);
4073 lodata->dependencies[0] = loinfo->dobj.dumpId;
4074 lodata->nDeps = lodata->allocDeps = 1;
4075 }
4076
4077 PQclear(res);
4079}
4080
4081/*
4082 * dumpLO
4083 *
4084 * dump the definition (metadata) of the given large object group
4085 */
4086static void
4088{
4090
4091 /*
4092 * The "definition" is just a newline-separated list of OIDs. We need to
4093 * put something into the dropStmt too, but it can just be a comment.
4094 */
4095 for (int i = 0; i < loinfo->numlos; i++)
4096 appendPQExpBuffer(cquery, "%u\n", loinfo->looids[i]);
4097
4098 if (loinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4099 ArchiveEntry(fout, loinfo->dobj.catId, loinfo->dobj.dumpId,
4100 ARCHIVE_OPTS(.tag = loinfo->dobj.name,
4101 .owner = loinfo->rolname,
4102 .description = "BLOB METADATA",
4103 .section = SECTION_DATA,
4104 .createStmt = cquery->data,
4105 .dropStmt = "-- dummy"));
4106
4107 /*
4108 * Dump per-blob comments and seclabels if any. We assume these are rare
4109 * enough that it's okay to generate retail TOC entries for them.
4110 */
4111 if (loinfo->dobj.dump & (DUMP_COMPONENT_COMMENT |
4113 {
4114 for (int i = 0; i < loinfo->numlos; i++)
4115 {
4116 CatalogId catId;
4117 char namebuf[32];
4118
4119 /* Build identifying info for this blob */
4120 catId.tableoid = loinfo->dobj.catId.tableoid;
4121 catId.oid = loinfo->looids[i];
4122 snprintf(namebuf, sizeof(namebuf), "%u", loinfo->looids[i]);
4123
4124 if (loinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4125 dumpComment(fout, "LARGE OBJECT", namebuf,
4126 NULL, loinfo->rolname,
4127 catId, 0, loinfo->dobj.dumpId);
4128
4129 if (loinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4130 dumpSecLabel(fout, "LARGE OBJECT", namebuf,
4131 NULL, loinfo->rolname,
4132 catId, 0, loinfo->dobj.dumpId);
4133 }
4134 }
4135
4136 /*
4137 * Dump the ACLs if any (remember that all blobs in the group will have
4138 * the same ACL). If there's just one blob, dump a simple ACL entry; if
4139 * there's more, make a "LARGE OBJECTS" entry that really contains only
4140 * the ACL for the first blob. _printTocEntry() will be cued by the tag
4141 * string to emit a mutated version for each blob.
4142 */
4143 if (loinfo->dobj.dump & DUMP_COMPONENT_ACL)
4144 {
4145 char namebuf[32];
4146
4147 /* Build identifying info for the first blob */
4148 snprintf(namebuf, sizeof(namebuf), "%u", loinfo->looids[0]);
4149
4150 if (loinfo->numlos > 1)
4151 {
4152 char tagbuf[64];
4153
4154 snprintf(tagbuf, sizeof(tagbuf), "LARGE OBJECTS %u..%u",
4155 loinfo->looids[0], loinfo->looids[loinfo->numlos - 1]);
4156
4157 dumpACL(fout, loinfo->dobj.dumpId, InvalidDumpId,
4158 "LARGE OBJECT", namebuf, NULL, NULL,
4159 tagbuf, loinfo->rolname, &loinfo->dacl);
4160 }
4161 else
4162 {
4163 dumpACL(fout, loinfo->dobj.dumpId, InvalidDumpId,
4164 "LARGE OBJECT", namebuf, NULL, NULL,
4165 NULL, loinfo->rolname, &loinfo->dacl);
4166 }
4167 }
4168
4170}
4171
4172/*
4173 * dumpLOs:
4174 * dump the data contents of the large objects in the given group
4175 */
4176static int
4177dumpLOs(Archive *fout, const void *arg)
4178{
4179 const LoInfo *loinfo = (const LoInfo *) arg;
4181 char buf[LOBBUFSIZE];
4182
4183 pg_log_info("saving large objects \"%s\"", loinfo->dobj.name);
4184
4185 for (int i = 0; i < loinfo->numlos; i++)
4186 {
4187 Oid loOid = loinfo->looids[i];
4188 int loFd;
4189 int cnt;
4190
4191 /* Open the LO */
4192 loFd = lo_open(conn, loOid, INV_READ);
4193 if (loFd == -1)
4194 pg_fatal("could not open large object %u: %s",
4196
4197 StartLO(fout, loOid);
4198
4199 /* Now read it in chunks, sending data to archive */
4200 do
4201 {
4202 cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
4203 if (cnt < 0)
4204 pg_fatal("error reading large object %u: %s",
4206
4207 WriteData(fout, buf, cnt);
4208 } while (cnt > 0);
4209
4210 lo_close(conn, loFd);
4211
4212 EndLO(fout, loOid);
4213 }
4214
4215 return 1;
4216}
4217
4218/*
4219 * getPolicies
4220 * get information about all RLS policies on dumpable tables.
4221 */
4222void
4224{
4225 DumpOptions *dopt = fout->dopt;
4226 PQExpBuffer query;
4228 PGresult *res;
4230 int i_oid;
4231 int i_tableoid;
4232 int i_polrelid;
4233 int i_polname;
4234 int i_polcmd;
4235 int i_polpermissive;
4236 int i_polroles;
4237 int i_polqual;
4238 int i_polwithcheck;
4239 int i,
4240 j,
4241 ntups;
4242
4243 /* No policies before 9.5 */
4244 if (fout->remoteVersion < 90500)
4245 return;
4246
4247 /* Skip if --no-policies was specified */
4248 if (dopt->no_policies)
4249 return;
4250
4251 query = createPQExpBuffer();
4253
4254 /*
4255 * Identify tables of interest, and check which ones have RLS enabled.
4256 */
4258 for (i = 0; i < numTables; i++)
4259 {
4260 TableInfo *tbinfo = &tblinfo[i];
4261
4262 /* Ignore row security on tables not to be dumped */
4263 if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
4264 continue;
4265
4266 /* It can't have RLS or policies if it's not a table */
4267 if (tbinfo->relkind != RELKIND_RELATION &&
4269 continue;
4270
4271 /* Add it to the list of table OIDs to be probed below */
4272 if (tbloids->len > 1) /* do we have more than the '{'? */
4274 appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
4275
4276 /* Is RLS enabled? (That's separate from whether it has policies) */
4277 if (tbinfo->rowsec)
4278 {
4279 tbinfo->dobj.components |= DUMP_COMPONENT_POLICY;
4280
4281 /*
4282 * We represent RLS being enabled on a table by creating a
4283 * PolicyInfo object with null polname.
4284 *
4285 * Note: use tableoid 0 so that this object won't be mistaken for
4286 * something that pg_depend entries apply to.
4287 */
4289 polinfo->dobj.objType = DO_POLICY;
4290 polinfo->dobj.catId.tableoid = 0;
4291 polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
4292 AssignDumpId(&polinfo->dobj);
4293 polinfo->dobj.namespace = tbinfo->dobj.namespace;
4294 polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
4295 polinfo->poltable = tbinfo;
4296 polinfo->polname = NULL;
4297 polinfo->polcmd = '\0';
4298 polinfo->polpermissive = 0;
4299 polinfo->polroles = NULL;
4300 polinfo->polqual = NULL;
4301 polinfo->polwithcheck = NULL;
4302 }
4303 }
4305
4306 /*
4307 * Now, read all RLS policies belonging to the tables of interest, and
4308 * create PolicyInfo objects for them. (Note that we must filter the
4309 * results server-side not locally, because we dare not apply pg_get_expr
4310 * to tables we don't have lock on.)
4311 */
4312 pg_log_info("reading row-level security policies");
4313
4314 printfPQExpBuffer(query,
4315 "SELECT pol.oid, pol.tableoid, pol.polrelid, pol.polname, pol.polcmd, ");
4316 if (fout->remoteVersion >= 100000)
4317 appendPQExpBufferStr(query, "pol.polpermissive, ");
4318 else
4319 appendPQExpBufferStr(query, "'t' as polpermissive, ");
4320 appendPQExpBuffer(query,
4321 "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
4322 " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
4323 "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
4324 "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
4325 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
4326 "JOIN pg_catalog.pg_policy pol ON (src.tbloid = pol.polrelid)",
4327 tbloids->data);
4328
4329 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4330
4331 ntups = PQntuples(res);
4332 if (ntups > 0)
4333 {
4334 i_oid = PQfnumber(res, "oid");
4335 i_tableoid = PQfnumber(res, "tableoid");
4336 i_polrelid = PQfnumber(res, "polrelid");
4337 i_polname = PQfnumber(res, "polname");
4338 i_polcmd = PQfnumber(res, "polcmd");
4339 i_polpermissive = PQfnumber(res, "polpermissive");
4340 i_polroles = PQfnumber(res, "polroles");
4341 i_polqual = PQfnumber(res, "polqual");
4342 i_polwithcheck = PQfnumber(res, "polwithcheck");
4343
4345
4346 for (j = 0; j < ntups; j++)
4347 {
4350
4351 tbinfo->dobj.components |= DUMP_COMPONENT_POLICY;
4352
4353 polinfo[j].dobj.objType = DO_POLICY;
4354 polinfo[j].dobj.catId.tableoid =
4355 atooid(PQgetvalue(res, j, i_tableoid));
4356 polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
4357 AssignDumpId(&polinfo[j].dobj);
4358 polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4359 polinfo[j].poltable = tbinfo;
4360 polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
4361 polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
4362
4363 polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
4364 polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
4365
4366 if (PQgetisnull(res, j, i_polroles))
4367 polinfo[j].polroles = NULL;
4368 else
4369 polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
4370
4371 if (PQgetisnull(res, j, i_polqual))
4372 polinfo[j].polqual = NULL;
4373 else
4374 polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
4375
4376 if (PQgetisnull(res, j, i_polwithcheck))
4377 polinfo[j].polwithcheck = NULL;
4378 else
4379 polinfo[j].polwithcheck
4381 }
4382 }
4383
4384 PQclear(res);
4385
4386 destroyPQExpBuffer(query);
4388}
4389
4390/*
4391 * dumpPolicy
4392 * dump the definition of the given policy
4393 */
4394static void
4396{
4397 DumpOptions *dopt = fout->dopt;
4398 TableInfo *tbinfo = polinfo->poltable;
4399 PQExpBuffer query;
4402 char *qtabname;
4403 const char *cmd;
4404 char *tag;
4405
4406 /* Do nothing if not dumping schema */
4407 if (!dopt->dumpSchema)
4408 return;
4409
4410 /*
4411 * If polname is NULL, then this record is just indicating that ROW LEVEL
4412 * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
4413 * ROW LEVEL SECURITY.
4414 */
4415 if (polinfo->polname == NULL)
4416 {
4417 query = createPQExpBuffer();
4418
4419 appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
4421
4422 /*
4423 * We must emit the ROW SECURITY object's dependency on its table
4424 * explicitly, because it will not match anything in pg_depend (unlike
4425 * the case for other PolicyInfo objects).
4426 */
4427 if (polinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4428 ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
4429 ARCHIVE_OPTS(.tag = polinfo->dobj.name,
4430 .namespace = polinfo->dobj.namespace->dobj.name,
4431 .owner = tbinfo->rolname,
4432 .description = "ROW SECURITY",
4433 .section = SECTION_POST_DATA,
4434 .createStmt = query->data,
4435 .deps = &(tbinfo->dobj.dumpId),
4436 .nDeps = 1));
4437
4438 destroyPQExpBuffer(query);
4439 return;
4440 }
4441
4442 if (polinfo->polcmd == '*')
4443 cmd = "";
4444 else if (polinfo->polcmd == 'r')
4445 cmd = " FOR SELECT";
4446 else if (polinfo->polcmd == 'a')
4447 cmd = " FOR INSERT";
4448 else if (polinfo->polcmd == 'w')
4449 cmd = " FOR UPDATE";
4450 else if (polinfo->polcmd == 'd')
4451 cmd = " FOR DELETE";
4452 else
4453 pg_fatal("unexpected policy command type: %c",
4454 polinfo->polcmd);
4455
4456 query = createPQExpBuffer();
4459
4460 qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
4461
4462 appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
4463
4464 appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
4465 !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
4466
4467 if (polinfo->polroles != NULL)
4468 appendPQExpBuffer(query, " TO %s", polinfo->polroles);
4469
4470 if (polinfo->polqual != NULL)
4471 appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
4472
4473 if (polinfo->polwithcheck != NULL)
4474 appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
4475
4476 appendPQExpBufferStr(query, ";\n");
4477
4478 appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
4480
4481 appendPQExpBuffer(polprefix, "POLICY %s ON",
4482 fmtId(polinfo->polname));
4483
4484 tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
4485
4486 if (polinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4487 ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
4488 ARCHIVE_OPTS(.tag = tag,
4489 .namespace = polinfo->dobj.namespace->dobj.name,
4490 .owner = tbinfo->rolname,
4491 .description = "POLICY",
4492 .section = SECTION_POST_DATA,
4493 .createStmt = query->data,
4494 .dropStmt = delqry->data));
4495
4496 if (polinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4498 tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
4499 polinfo->dobj.catId, 0, polinfo->dobj.dumpId);
4500
4501 free(tag);
4502 destroyPQExpBuffer(query);
4505 free(qtabname);
4506}
4507
4508/*
4509 * getPublications
4510 * get information about publications
4511 */
4512void
4514{
4515 DumpOptions *dopt = fout->dopt;
4516 PQExpBuffer query;
4517 PGresult *res;
4519 int i_tableoid;
4520 int i_oid;
4521 int i_pubname;
4522 int i_pubowner;
4523 int i_puballtables;
4525 int i_pubinsert;
4526 int i_pubupdate;
4527 int i_pubdelete;
4528 int i_pubtruncate;
4529 int i_pubviaroot;
4530 int i_pubgencols;
4531 int i,
4532 ntups;
4533
4534 if (dopt->no_publications || fout->remoteVersion < 100000)
4535 return;
4536
4537 query = createPQExpBuffer();
4538
4539 /* Get the publications. */
4540 appendPQExpBufferStr(query, "SELECT p.tableoid, p.oid, p.pubname, "
4541 "p.pubowner, p.puballtables, p.pubinsert, "
4542 "p.pubupdate, p.pubdelete, ");
4543
4544 if (fout->remoteVersion >= 110000)
4545 appendPQExpBufferStr(query, "p.pubtruncate, ");
4546 else
4547 appendPQExpBufferStr(query, "false AS pubtruncate, ");
4548
4549 if (fout->remoteVersion >= 130000)
4550 appendPQExpBufferStr(query, "p.pubviaroot, ");
4551 else
4552 appendPQExpBufferStr(query, "false AS pubviaroot, ");
4553
4554 if (fout->remoteVersion >= 180000)
4555 appendPQExpBufferStr(query, "p.pubgencols, ");
4556 else
4557 appendPQExpBuffer(query, "'%c' AS pubgencols, ", PUBLISH_GENCOLS_NONE);
4558
4559 if (fout->remoteVersion >= 190000)
4560 appendPQExpBufferStr(query, "p.puballsequences ");
4561 else
4562 appendPQExpBufferStr(query, "false AS puballsequences ");
4563
4564 appendPQExpBufferStr(query, "FROM pg_publication p");
4565
4566 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4567
4568 ntups = PQntuples(res);
4569
4570 if (ntups == 0)
4571 goto cleanup;
4572
4573 i_tableoid = PQfnumber(res, "tableoid");
4574 i_oid = PQfnumber(res, "oid");
4575 i_pubname = PQfnumber(res, "pubname");
4576 i_pubowner = PQfnumber(res, "pubowner");
4577 i_puballtables = PQfnumber(res, "puballtables");
4578 i_puballsequences = PQfnumber(res, "puballsequences");
4579 i_pubinsert = PQfnumber(res, "pubinsert");
4580 i_pubupdate = PQfnumber(res, "pubupdate");
4581 i_pubdelete = PQfnumber(res, "pubdelete");
4582 i_pubtruncate = PQfnumber(res, "pubtruncate");
4583 i_pubviaroot = PQfnumber(res, "pubviaroot");
4584 i_pubgencols = PQfnumber(res, "pubgencols");
4585
4587
4588 for (i = 0; i < ntups; i++)
4589 {
4590 pubinfo[i].dobj.objType = DO_PUBLICATION;
4591 pubinfo[i].dobj.catId.tableoid =
4592 atooid(PQgetvalue(res, i, i_tableoid));
4593 pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4594 AssignDumpId(&pubinfo[i].dobj);
4595 pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
4596 pubinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_pubowner));
4597 pubinfo[i].puballtables =
4598 (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
4599 pubinfo[i].puballsequences =
4600 (strcmp(PQgetvalue(res, i, i_puballsequences), "t") == 0);
4601 pubinfo[i].pubinsert =
4602 (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
4603 pubinfo[i].pubupdate =
4604 (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
4605 pubinfo[i].pubdelete =
4606 (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
4607 pubinfo[i].pubtruncate =
4608 (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
4609 pubinfo[i].pubviaroot =
4610 (strcmp(PQgetvalue(res, i, i_pubviaroot), "t") == 0);
4611 pubinfo[i].pubgencols_type =
4612 *(PQgetvalue(res, i, i_pubgencols));
4613 pubinfo[i].except_tables = (SimplePtrList)
4614 {
4615 NULL, NULL
4616 };
4617
4618 /* Decide whether we want to dump it */
4620
4621 /*
4622 * Get the list of tables for publications specified in the EXCEPT
4623 * TABLE clause.
4624 *
4625 * Although individual EXCEPT TABLE entries could be stored in
4626 * PublicationRelInfo, dumpPublicationTable cannot be used to emit
4627 * them, because there is no ALTER PUBLICATION ... ADD command to add
4628 * individual table entries to the EXCEPT TABLE list.
4629 *
4630 * Therefore, the approach is to dump the complete EXCEPT TABLE list
4631 * in a single CREATE PUBLICATION statement. PublicationInfo is used
4632 * to collect this information, which is then emitted by
4633 * dumpPublication().
4634 */
4635 if (fout->remoteVersion >= 190000)
4636 {
4637 int ntbls;
4639
4640 resetPQExpBuffer(query);
4641 appendPQExpBuffer(query,
4642 "SELECT prrelid\n"
4643 "FROM pg_catalog.pg_publication_rel\n"
4644 "WHERE prpubid = %u AND prexcept",
4645 pubinfo[i].dobj.catId.oid);
4646
4648
4650
4651 for (int j = 0; j < ntbls; j++)
4652 {
4653 Oid prrelid;
4655
4657
4659
4660 if (tbinfo != NULL)
4661 simple_ptr_list_append(&pubinfo[i].except_tables, tbinfo);
4662 }
4663
4665 }
4666 }
4667
4668cleanup:
4669 PQclear(res);
4670
4671 destroyPQExpBuffer(query);
4672}
4673
4674/*
4675 * dumpPublication
4676 * dump the definition of the given publication
4677 */
4678static void
4680{
4681 DumpOptions *dopt = fout->dopt;
4683 PQExpBuffer query;
4684 char *qpubname;
4685 bool first = true;
4686
4687 /* Do nothing if not dumping schema */
4688 if (!dopt->dumpSchema)
4689 return;
4690
4692 query = createPQExpBuffer();
4693
4694 qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
4695
4696 appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
4697 qpubname);
4698
4699 appendPQExpBuffer(query, "CREATE PUBLICATION %s",
4700 qpubname);
4701
4702 if (pubinfo->puballtables)
4703 {
4704 int n_except = 0;
4705
4706 appendPQExpBufferStr(query, " FOR ALL TABLES");
4707
4708 /* Include EXCEPT TABLE clause if there are except_tables. */
4709 for (SimplePtrListCell *cell = pubinfo->except_tables.head; cell; cell = cell->next)
4710 {
4711 TableInfo *tbinfo = (TableInfo *) cell->ptr;
4712
4713 if (++n_except == 1)
4714 appendPQExpBufferStr(query, " EXCEPT TABLE (");
4715 else
4716 appendPQExpBufferStr(query, ", ");
4717 appendPQExpBuffer(query, "ONLY %s", fmtQualifiedDumpable(tbinfo));
4718 }
4719 if (n_except > 0)
4720 appendPQExpBufferStr(query, ")");
4721
4722 if (pubinfo->puballsequences)
4723 appendPQExpBufferStr(query, ", ALL SEQUENCES");
4724 }
4725 else if (pubinfo->puballsequences)
4726 appendPQExpBufferStr(query, " FOR ALL SEQUENCES");
4727
4728 appendPQExpBufferStr(query, " WITH (publish = '");
4729 if (pubinfo->pubinsert)
4730 {
4731 appendPQExpBufferStr(query, "insert");
4732 first = false;
4733 }
4734
4735 if (pubinfo->pubupdate)
4736 {
4737 if (!first)
4738 appendPQExpBufferStr(query, ", ");
4739
4740 appendPQExpBufferStr(query, "update");
4741 first = false;
4742 }
4743
4744 if (pubinfo->pubdelete)
4745 {
4746 if (!first)
4747 appendPQExpBufferStr(query, ", ");
4748
4749 appendPQExpBufferStr(query, "delete");
4750 first = false;
4751 }
4752
4753 if (pubinfo->pubtruncate)
4754 {
4755 if (!first)
4756 appendPQExpBufferStr(query, ", ");
4757
4758 appendPQExpBufferStr(query, "truncate");
4759 first = false;
4760 }
4761
4762 appendPQExpBufferChar(query, '\'');
4763
4764 if (pubinfo->pubviaroot)
4765 appendPQExpBufferStr(query, ", publish_via_partition_root = true");
4766
4767 if (pubinfo->pubgencols_type == PUBLISH_GENCOLS_STORED)
4768 appendPQExpBufferStr(query, ", publish_generated_columns = stored");
4769
4770 appendPQExpBufferStr(query, ");\n");
4771
4772 if (pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4773 ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
4774 ARCHIVE_OPTS(.tag = pubinfo->dobj.name,
4775 .owner = pubinfo->rolname,
4776 .description = "PUBLICATION",
4777 .section = SECTION_POST_DATA,
4778 .createStmt = query->data,
4779 .dropStmt = delq->data));
4780
4781 if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4782 dumpComment(fout, "PUBLICATION", qpubname,
4783 NULL, pubinfo->rolname,
4784 pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4785
4786 if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4787 dumpSecLabel(fout, "PUBLICATION", qpubname,
4788 NULL, pubinfo->rolname,
4789 pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4790
4792 destroyPQExpBuffer(query);
4793 free(qpubname);
4794}
4795
4796/*
4797 * getPublicationNamespaces
4798 * get information about publication membership for dumpable schemas.
4799 */
4800void
4802{
4803 PQExpBuffer query;
4804 PGresult *res;
4806 DumpOptions *dopt = fout->dopt;
4807 int i_tableoid;
4808 int i_oid;
4809 int i_pnpubid;
4810 int i_pnnspid;
4811 int i,
4812 j,
4813 ntups;
4814
4815 if (dopt->no_publications || fout->remoteVersion < 150000)
4816 return;
4817
4818 query = createPQExpBuffer();
4819
4820 /* Collect all publication membership info. */
4822 "SELECT tableoid, oid, pnpubid, pnnspid "
4823 "FROM pg_catalog.pg_publication_namespace");
4824 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4825
4826 ntups = PQntuples(res);
4827
4828 i_tableoid = PQfnumber(res, "tableoid");
4829 i_oid = PQfnumber(res, "oid");
4830 i_pnpubid = PQfnumber(res, "pnpubid");
4831 i_pnnspid = PQfnumber(res, "pnnspid");
4832
4833 /* this allocation may be more than we need */
4835 j = 0;
4836
4837 for (i = 0; i < ntups; i++)
4838 {
4843
4844 /*
4845 * Ignore any entries for which we aren't interested in either the
4846 * publication or the rel.
4847 */
4849 if (pubinfo == NULL)
4850 continue;
4852 if (nspinfo == NULL)
4853 continue;
4854
4855 /* OK, make a DumpableObject for this relationship */
4857 pubsinfo[j].dobj.catId.tableoid =
4858 atooid(PQgetvalue(res, i, i_tableoid));
4859 pubsinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4860 AssignDumpId(&pubsinfo[j].dobj);
4861 pubsinfo[j].dobj.namespace = nspinfo->dobj.namespace;
4862 pubsinfo[j].dobj.name = nspinfo->dobj.name;
4863 pubsinfo[j].publication = pubinfo;
4864 pubsinfo[j].pubschema = nspinfo;
4865
4866 /* Decide whether we want to dump it */
4868
4869 j++;
4870 }
4871
4872 PQclear(res);
4873 destroyPQExpBuffer(query);
4874}
4875
4876/*
4877 * getPublicationTables
4878 * get information about publication membership for dumpable tables.
4879 */
4880void
4882{
4883 PQExpBuffer query;
4884 PGresult *res;
4886 DumpOptions *dopt = fout->dopt;
4887 int i_tableoid;
4888 int i_oid;
4889 int i_prpubid;
4890 int i_prrelid;
4891 int i_prrelqual;
4892 int i_prattrs;
4893 int i,
4894 j,
4895 ntups;
4896
4897 if (dopt->no_publications || fout->remoteVersion < 100000)
4898 return;
4899
4900 query = createPQExpBuffer();
4901
4902 /* Collect all publication membership info. */
4903 if (fout->remoteVersion >= 150000)
4904 {
4906 "SELECT tableoid, oid, prpubid, prrelid, "
4907 "pg_catalog.pg_get_expr(prqual, prrelid) AS prrelqual, "
4908 "(CASE\n"
4909 " WHEN pr.prattrs IS NOT NULL THEN\n"
4910 " (SELECT array_agg(attname)\n"
4911 " FROM\n"
4912 " pg_catalog.generate_series(0, pg_catalog.array_upper(pr.prattrs::pg_catalog.int2[], 1)) s,\n"
4913 " pg_catalog.pg_attribute\n"
4914 " WHERE attrelid = pr.prrelid AND attnum = prattrs[s])\n"
4915 " ELSE NULL END) prattrs "
4916 "FROM pg_catalog.pg_publication_rel pr");
4917 if (fout->remoteVersion >= 190000)
4918 appendPQExpBufferStr(query, " WHERE NOT pr.prexcept");
4919 }
4920 else
4922 "SELECT tableoid, oid, prpubid, prrelid, "
4923 "NULL AS prrelqual, NULL AS prattrs "
4924 "FROM pg_catalog.pg_publication_rel");
4925 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4926
4927 ntups = PQntuples(res);
4928
4929 i_tableoid = PQfnumber(res, "tableoid");
4930 i_oid = PQfnumber(res, "oid");
4931 i_prpubid = PQfnumber(res, "prpubid");
4932 i_prrelid = PQfnumber(res, "prrelid");
4933 i_prrelqual = PQfnumber(res, "prrelqual");
4934 i_prattrs = PQfnumber(res, "prattrs");
4935
4936 /* this allocation may be more than we need */
4938 j = 0;
4939
4940 for (i = 0; i < ntups; i++)
4941 {
4946
4947 /*
4948 * Ignore any entries for which we aren't interested in either the
4949 * publication or the rel.
4950 */
4952 if (pubinfo == NULL)
4953 continue;
4955 if (tbinfo == NULL)
4956 continue;
4957
4958 /* OK, make a DumpableObject for this relationship */
4959 pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
4960 pubrinfo[j].dobj.catId.tableoid =
4961 atooid(PQgetvalue(res, i, i_tableoid));
4962 pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4963 AssignDumpId(&pubrinfo[j].dobj);
4964 pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4965 pubrinfo[j].dobj.name = tbinfo->dobj.name;
4966 pubrinfo[j].publication = pubinfo;
4967 pubrinfo[j].pubtable = tbinfo;
4968 if (PQgetisnull(res, i, i_prrelqual))
4969 pubrinfo[j].pubrelqual = NULL;
4970 else
4971 pubrinfo[j].pubrelqual = pg_strdup(PQgetvalue(res, i, i_prrelqual));
4972
4973 if (!PQgetisnull(res, i, i_prattrs))
4974 {
4975 char **attnames;
4976 int nattnames;
4978
4979 if (!parsePGArray(PQgetvalue(res, i, i_prattrs),
4980 &attnames, &nattnames))
4981 pg_fatal("could not parse %s array", "prattrs");
4983 for (int k = 0; k < nattnames; k++)
4984 {
4985 if (k > 0)
4987
4988 appendPQExpBufferStr(attribs, fmtId(attnames[k]));
4989 }
4990 pubrinfo[j].pubrattrs = attribs->data;
4991 free(attribs); /* but not attribs->data */
4992 free(attnames);
4993 }
4994 else
4995 pubrinfo[j].pubrattrs = NULL;
4996
4997 /* Decide whether we want to dump it */
4999
5000 j++;
5001 }
5002
5003 PQclear(res);
5004 destroyPQExpBuffer(query);
5005}
5006
5007/*
5008 * dumpPublicationNamespace
5009 * dump the definition of the given publication schema mapping.
5010 */
5011static void
5013{
5014 DumpOptions *dopt = fout->dopt;
5015 NamespaceInfo *schemainfo = pubsinfo->pubschema;
5016 PublicationInfo *pubinfo = pubsinfo->publication;
5017 PQExpBuffer query;
5018 char *tag;
5019
5020 /* Do nothing if not dumping schema */
5021 if (!dopt->dumpSchema)
5022 return;
5023
5024 tag = psprintf("%s %s", pubinfo->dobj.name, schemainfo->dobj.name);
5025
5026 query = createPQExpBuffer();
5027
5028 appendPQExpBuffer(query, "ALTER PUBLICATION %s ", fmtId(pubinfo->dobj.name));
5029 appendPQExpBuffer(query, "ADD TABLES IN SCHEMA %s;\n", fmtId(schemainfo->dobj.name));
5030
5031 /*
5032 * There is no point in creating drop query as the drop is done by schema
5033 * drop.
5034 */
5035 if (pubsinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5036 ArchiveEntry(fout, pubsinfo->dobj.catId, pubsinfo->dobj.dumpId,
5037 ARCHIVE_OPTS(.tag = tag,
5038 .namespace = schemainfo->dobj.name,
5039 .owner = pubinfo->rolname,
5040 .description = "PUBLICATION TABLES IN SCHEMA",
5041 .section = SECTION_POST_DATA,
5042 .createStmt = query->data));
5043
5044 /* These objects can't currently have comments or seclabels */
5045
5046 free(tag);
5047 destroyPQExpBuffer(query);
5048}
5049
5050/*
5051 * dumpPublicationTable
5052 * dump the definition of the given publication table mapping
5053 */
5054static void
5056{
5057 DumpOptions *dopt = fout->dopt;
5058 PublicationInfo *pubinfo = pubrinfo->publication;
5059 TableInfo *tbinfo = pubrinfo->pubtable;
5060 PQExpBuffer query;
5061 char *tag;
5062
5063 /* Do nothing if not dumping schema */
5064 if (!dopt->dumpSchema)
5065 return;
5066
5067 tag = psprintf("%s %s", pubinfo->dobj.name, tbinfo->dobj.name);
5068
5069 query = createPQExpBuffer();
5070
5071 appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
5072 fmtId(pubinfo->dobj.name));
5073 appendPQExpBuffer(query, " %s",
5075
5076 if (pubrinfo->pubrattrs)
5077 appendPQExpBuffer(query, " (%s)", pubrinfo->pubrattrs);
5078
5079 if (pubrinfo->pubrelqual)
5080 {
5081 /*
5082 * It's necessary to add parentheses around the expression because
5083 * pg_get_expr won't supply the parentheses for things like WHERE
5084 * TRUE.
5085 */
5086 appendPQExpBuffer(query, " WHERE (%s)", pubrinfo->pubrelqual);
5087 }
5088 appendPQExpBufferStr(query, ";\n");
5089
5090 /*
5091 * There is no point in creating a drop query as the drop is done by table
5092 * drop. (If you think to change this, see also _printTocEntry().)
5093 * Although this object doesn't really have ownership as such, set the
5094 * owner field anyway to ensure that the command is run by the correct
5095 * role at restore time.
5096 */
5097 if (pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5098 ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
5099 ARCHIVE_OPTS(.tag = tag,
5100 .namespace = tbinfo->dobj.namespace->dobj.name,
5101 .owner = pubinfo->rolname,
5102 .description = "PUBLICATION TABLE",
5103 .section = SECTION_POST_DATA,
5104 .createStmt = query->data));
5105
5106 /* These objects can't currently have comments or seclabels */
5107
5108 free(tag);
5109 destroyPQExpBuffer(query);
5110}
5111
5112/*
5113 * Is the currently connected user a superuser?
5114 */
5115static bool
5117{
5119 const char *val;
5120
5121 val = PQparameterStatus(AH->connection, "is_superuser");
5122
5123 if (val && strcmp(val, "on") == 0)
5124 return true;
5125
5126 return false;
5127}
5128
5129/*
5130 * Set the given value to restrict_nonsystem_relation_kind value. Since
5131 * restrict_nonsystem_relation_kind is introduced in minor version releases,
5132 * the setting query is effective only where available.
5133 */
5134static void
5136{
5138 PGresult *res;
5139
5140 appendPQExpBuffer(query,
5141 "SELECT set_config(name, '%s', false) "
5142 "FROM pg_settings "
5143 "WHERE name = 'restrict_nonsystem_relation_kind'",
5144 value);
5145 res = ExecuteSqlQuery(AH, query->data, PGRES_TUPLES_OK);
5146
5147 PQclear(res);
5148 destroyPQExpBuffer(query);
5149}
5150
5151/*
5152 * getSubscriptions
5153 * get information about subscriptions
5154 */
5155void
5157{
5158 DumpOptions *dopt = fout->dopt;
5159 PQExpBuffer query;
5160 PGresult *res;
5161 SubscriptionInfo *subinfo;
5162 int i_tableoid;
5163 int i_oid;
5164 int i_subname;
5165 int i_subowner;
5166 int i_subbinary;
5167 int i_substream;
5171 int i_subrunasowner;
5172 int i_subservername;
5173 int i_subconninfo;
5174 int i_subslotname;
5175 int i_subsynccommit;
5178 int i_suborigin;
5180 int i_subenabled;
5181 int i_subfailover;
5184 int i,
5185 ntups;
5186
5187 if (dopt->no_subscriptions || fout->remoteVersion < 100000)
5188 return;
5189
5190 if (!is_superuser(fout))
5191 {
5192 int n;
5193
5194 res = ExecuteSqlQuery(fout,
5195 "SELECT count(*) FROM pg_subscription "
5196 "WHERE subdbid = (SELECT oid FROM pg_database"
5197 " WHERE datname = current_database())",
5199 n = atoi(PQgetvalue(res, 0, 0));
5200 if (n > 0)
5201 pg_log_warning("subscriptions not dumped because current user is not a superuser");
5202 PQclear(res);
5203 return;
5204 }
5205
5206 query = createPQExpBuffer();
5207
5208 /* Get the subscriptions in current database. */
5210 "SELECT s.tableoid, s.oid, s.subname,\n"
5211 " s.subowner,\n"
5212 " s.subconninfo, s.subslotname, s.subsynccommit,\n"
5213 " s.subpublications,\n");
5214
5215 if (fout->remoteVersion >= 140000)
5216 appendPQExpBufferStr(query, " s.subbinary,\n");
5217 else
5218 appendPQExpBufferStr(query, " false AS subbinary,\n");
5219
5220 if (fout->remoteVersion >= 140000)
5221 appendPQExpBufferStr(query, " s.substream,\n");
5222 else
5223 appendPQExpBufferStr(query, " 'f' AS substream,\n");
5224
5225 if (fout->remoteVersion >= 150000)
5227 " s.subtwophasestate,\n"
5228 " s.subdisableonerr,\n");
5229 else
5230 appendPQExpBuffer(query,
5231 " '%c' AS subtwophasestate,\n"
5232 " false AS subdisableonerr,\n",
5234
5235 if (fout->remoteVersion >= 160000)
5237 " s.subpasswordrequired,\n"
5238 " s.subrunasowner,\n"
5239 " s.suborigin,\n");
5240 else
5241 appendPQExpBuffer(query,
5242 " 't' AS subpasswordrequired,\n"
5243 " 't' AS subrunasowner,\n"
5244 " '%s' AS suborigin,\n",
5246
5247 if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
5248 appendPQExpBufferStr(query, " o.remote_lsn AS suboriginremotelsn,\n"
5249 " s.subenabled,\n");
5250 else
5251 appendPQExpBufferStr(query, " NULL AS suboriginremotelsn,\n"
5252 " false AS subenabled,\n");
5253
5254 if (fout->remoteVersion >= 170000)
5256 " s.subfailover,\n");
5257 else
5259 " false AS subfailover,\n");
5260
5261 if (fout->remoteVersion >= 190000)
5263 " s.subretaindeadtuples,\n");
5264 else
5266 " false AS subretaindeadtuples,\n");
5267
5268 if (fout->remoteVersion >= 190000)
5270 " s.submaxretention,\n");
5271 else
5272 appendPQExpBuffer(query,
5273 " 0 AS submaxretention,\n");
5274
5275 if (fout->remoteVersion >= 190000)
5277 " s.subwalrcvtimeout,\n");
5278 else
5280 " '-1' AS subwalrcvtimeout,\n");
5281
5282 if (fout->remoteVersion >= 190000)
5283 appendPQExpBufferStr(query, " fs.srvname AS subservername\n");
5284 else
5285 appendPQExpBufferStr(query, " NULL AS subservername\n");
5286
5288 "FROM pg_subscription s\n");
5289
5290 if (fout->remoteVersion >= 190000)
5292 "LEFT JOIN pg_catalog.pg_foreign_server fs \n"
5293 " ON fs.oid = s.subserver \n");
5294
5295 if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
5297 "LEFT JOIN pg_catalog.pg_replication_origin_status o \n"
5298 " ON o.external_id = 'pg_' || s.oid::text \n");
5299
5301 "WHERE s.subdbid = (SELECT oid FROM pg_database\n"
5302 " WHERE datname = current_database())");
5303
5304 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5305
5306 ntups = PQntuples(res);
5307
5308 /*
5309 * Get subscription fields. We don't include subskiplsn in the dump as
5310 * after restoring the dump this value may no longer be relevant.
5311 */
5312 i_tableoid = PQfnumber(res, "tableoid");
5313 i_oid = PQfnumber(res, "oid");
5314 i_subname = PQfnumber(res, "subname");
5315 i_subowner = PQfnumber(res, "subowner");
5316 i_subenabled = PQfnumber(res, "subenabled");
5317 i_subbinary = PQfnumber(res, "subbinary");
5318 i_substream = PQfnumber(res, "substream");
5319 i_subtwophasestate = PQfnumber(res, "subtwophasestate");
5320 i_subdisableonerr = PQfnumber(res, "subdisableonerr");
5321 i_subpasswordrequired = PQfnumber(res, "subpasswordrequired");
5322 i_subrunasowner = PQfnumber(res, "subrunasowner");
5323 i_subfailover = PQfnumber(res, "subfailover");
5324 i_subretaindeadtuples = PQfnumber(res, "subretaindeadtuples");
5325 i_submaxretention = PQfnumber(res, "submaxretention");
5326 i_subservername = PQfnumber(res, "subservername");
5327 i_subconninfo = PQfnumber(res, "subconninfo");
5328 i_subslotname = PQfnumber(res, "subslotname");
5329 i_subsynccommit = PQfnumber(res, "subsynccommit");
5330 i_subwalrcvtimeout = PQfnumber(res, "subwalrcvtimeout");
5331 i_subpublications = PQfnumber(res, "subpublications");
5332 i_suborigin = PQfnumber(res, "suborigin");
5333 i_suboriginremotelsn = PQfnumber(res, "suboriginremotelsn");
5334
5335 subinfo = pg_malloc_array(SubscriptionInfo, ntups);
5336
5337 for (i = 0; i < ntups; i++)
5338 {
5339 subinfo[i].dobj.objType = DO_SUBSCRIPTION;
5340 subinfo[i].dobj.catId.tableoid =
5341 atooid(PQgetvalue(res, i, i_tableoid));
5342 subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5343 AssignDumpId(&subinfo[i].dobj);
5344 subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
5345 subinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_subowner));
5346
5347 subinfo[i].subenabled =
5348 (strcmp(PQgetvalue(res, i, i_subenabled), "t") == 0);
5349 if (PQgetisnull(res, i, i_subservername))
5350 subinfo[i].subservername = NULL;
5351 else
5353 subinfo[i].subbinary =
5354 (strcmp(PQgetvalue(res, i, i_subbinary), "t") == 0);
5355 subinfo[i].substream = *(PQgetvalue(res, i, i_substream));
5356 subinfo[i].subtwophasestate = *(PQgetvalue(res, i, i_subtwophasestate));
5357 subinfo[i].subdisableonerr =
5358 (strcmp(PQgetvalue(res, i, i_subdisableonerr), "t") == 0);
5359 subinfo[i].subpasswordrequired =
5360 (strcmp(PQgetvalue(res, i, i_subpasswordrequired), "t") == 0);
5361 subinfo[i].subrunasowner =
5362 (strcmp(PQgetvalue(res, i, i_subrunasowner), "t") == 0);
5363 subinfo[i].subfailover =
5364 (strcmp(PQgetvalue(res, i, i_subfailover), "t") == 0);
5365 subinfo[i].subretaindeadtuples =
5366 (strcmp(PQgetvalue(res, i, i_subretaindeadtuples), "t") == 0);
5367 subinfo[i].submaxretention =
5369 if (PQgetisnull(res, i, i_subconninfo))
5370 subinfo[i].subconninfo = NULL;
5371 else
5372 subinfo[i].subconninfo =
5374 if (PQgetisnull(res, i, i_subslotname))
5375 subinfo[i].subslotname = NULL;
5376 else
5377 subinfo[i].subslotname =
5379 subinfo[i].subsynccommit =
5381 subinfo[i].subwalrcvtimeout =
5383 subinfo[i].subpublications =
5385 subinfo[i].suborigin = pg_strdup(PQgetvalue(res, i, i_suborigin));
5387 subinfo[i].suboriginremotelsn = NULL;
5388 else
5389 subinfo[i].suboriginremotelsn =
5391
5392 /* Decide whether we want to dump it */
5393 selectDumpableObject(&(subinfo[i].dobj), fout);
5394 }
5395 PQclear(res);
5396
5397 destroyPQExpBuffer(query);
5398}
5399
5400/*
5401 * getSubscriptionRelations
5402 * Get information about subscription membership for dumpable relations. This
5403 * will be used only in binary-upgrade mode for PG17 or later versions.
5404 */
5405void
5407{
5408 DumpOptions *dopt = fout->dopt;
5409 SubscriptionInfo *subinfo = NULL;
5411 PGresult *res;
5412 int i_srsubid;
5413 int i_srrelid;
5414 int i_srsubstate;
5415 int i_srsublsn;
5416 int ntups;
5418
5419 if (dopt->no_subscriptions || !dopt->binary_upgrade ||
5420 fout->remoteVersion < 170000)
5421 return;
5422
5423 res = ExecuteSqlQuery(fout,
5424 "SELECT srsubid, srrelid, srsubstate, srsublsn "
5425 "FROM pg_catalog.pg_subscription_rel "
5426 "ORDER BY srsubid",
5428 ntups = PQntuples(res);
5429 if (ntups == 0)
5430 goto cleanup;
5431
5432 /* Get pg_subscription_rel attributes */
5433 i_srsubid = PQfnumber(res, "srsubid");
5434 i_srrelid = PQfnumber(res, "srrelid");
5435 i_srsubstate = PQfnumber(res, "srsubstate");
5436 i_srsublsn = PQfnumber(res, "srsublsn");
5437
5439 for (int i = 0; i < ntups; i++)
5440 {
5442 Oid relid = atooid(PQgetvalue(res, i, i_srrelid));
5443 TableInfo *tblinfo;
5444
5445 /*
5446 * If we switched to a new subscription, check if the subscription
5447 * exists.
5448 */
5450 {
5452 if (subinfo == NULL)
5453 pg_fatal("subscription with OID %u does not exist", cur_srsubid);
5454
5456 }
5457
5458 tblinfo = findTableByOid(relid);
5459 if (tblinfo == NULL)
5460 pg_fatal("failed sanity check, relation with OID %u not found",
5461 relid);
5462
5463 /* OK, make a DumpableObject for this relationship */
5464 subrinfo[i].dobj.objType = DO_SUBSCRIPTION_REL;
5465 subrinfo[i].dobj.catId.tableoid = relid;
5466 subrinfo[i].dobj.catId.oid = cur_srsubid;
5467 AssignDumpId(&subrinfo[i].dobj);
5468 subrinfo[i].dobj.namespace = tblinfo->dobj.namespace;
5469 subrinfo[i].dobj.name = tblinfo->dobj.name;
5470 subrinfo[i].subinfo = subinfo;
5471 subrinfo[i].tblinfo = tblinfo;
5472 subrinfo[i].srsubstate = PQgetvalue(res, i, i_srsubstate)[0];
5473 if (PQgetisnull(res, i, i_srsublsn))
5474 subrinfo[i].srsublsn = NULL;
5475 else
5476 subrinfo[i].srsublsn = pg_strdup(PQgetvalue(res, i, i_srsublsn));
5477
5478 /* Decide whether we want to dump it */
5480 }
5481
5482cleanup:
5483 PQclear(res);
5484}
5485
5486/*
5487 * dumpSubscriptionTable
5488 * Dump the definition of the given subscription table mapping. This will be
5489 * used only in binary-upgrade mode for PG17 or later versions.
5490 */
5491static void
5493{
5494 DumpOptions *dopt = fout->dopt;
5495 SubscriptionInfo *subinfo = subrinfo->subinfo;
5496 PQExpBuffer query;
5497 char *tag;
5498
5499 /* Do nothing if not dumping schema */
5500 if (!dopt->dumpSchema)
5501 return;
5502
5504
5505 tag = psprintf("%s %s", subinfo->dobj.name, subrinfo->tblinfo->dobj.name);
5506
5507 query = createPQExpBuffer();
5508
5509 if (subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5510 {
5511 /*
5512 * binary_upgrade_add_sub_rel_state will add the subscription relation
5513 * to pg_subscription_rel table. This will be used only in
5514 * binary-upgrade mode.
5515 */
5517 "\n-- For binary upgrade, must preserve the subscriber table.\n");
5519 "SELECT pg_catalog.binary_upgrade_add_sub_rel_state(");
5520 appendStringLiteralAH(query, subinfo->dobj.name, fout);
5521 appendPQExpBuffer(query,
5522 ", %u, '%c'",
5523 subrinfo->tblinfo->dobj.catId.oid,
5524 subrinfo->srsubstate);
5525
5526 if (subrinfo->srsublsn && subrinfo->srsublsn[0] != '\0')
5527 appendPQExpBuffer(query, ", '%s'", subrinfo->srsublsn);
5528 else
5529 appendPQExpBufferStr(query, ", NULL");
5530
5531 appendPQExpBufferStr(query, ");\n");
5532 }
5533
5534 /*
5535 * There is no point in creating a drop query as the drop is done by table
5536 * drop. (If you think to change this, see also _printTocEntry().)
5537 * Although this object doesn't really have ownership as such, set the
5538 * owner field anyway to ensure that the command is run by the correct
5539 * role at restore time.
5540 */
5541 if (subrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5542 ArchiveEntry(fout, subrinfo->dobj.catId, subrinfo->dobj.dumpId,
5543 ARCHIVE_OPTS(.tag = tag,
5544 .namespace = subrinfo->tblinfo->dobj.namespace->dobj.name,
5545 .owner = subinfo->rolname,
5546 .description = "SUBSCRIPTION TABLE",
5547 .section = SECTION_POST_DATA,
5548 .createStmt = query->data));
5549
5550 /* These objects can't currently have comments or seclabels */
5551
5552 free(tag);
5553 destroyPQExpBuffer(query);
5554}
5555
5556/*
5557 * dumpSubscription
5558 * dump the definition of the given subscription
5559 */
5560static void
5562{
5563 DumpOptions *dopt = fout->dopt;
5565 PQExpBuffer query;
5566 PQExpBuffer publications;
5567 char *qsubname;
5568 char **pubnames = NULL;
5569 int npubnames = 0;
5570 int i;
5571
5572 /* Do nothing if not dumping schema */
5573 if (!dopt->dumpSchema)
5574 return;
5575
5577 query = createPQExpBuffer();
5578
5579 qsubname = pg_strdup(fmtId(subinfo->dobj.name));
5580
5581 appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
5582 qsubname);
5583
5584 appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s ",
5585 qsubname);
5586 if (subinfo->subservername)
5587 {
5588 appendPQExpBuffer(query, "SERVER %s", fmtId(subinfo->subservername));
5589 }
5590 else
5591 {
5592 appendPQExpBuffer(query, "CONNECTION ");
5593 appendStringLiteralAH(query, subinfo->subconninfo, fout);
5594 }
5595
5596 /* Build list of quoted publications and append them to query. */
5598 pg_fatal("could not parse %s array", "subpublications");
5599
5600 publications = createPQExpBuffer();
5601 for (i = 0; i < npubnames; i++)
5602 {
5603 if (i > 0)
5604 appendPQExpBufferStr(publications, ", ");
5605
5606 appendPQExpBufferStr(publications, fmtId(pubnames[i]));
5607 }
5608
5609 appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
5610 if (subinfo->subslotname)
5611 appendStringLiteralAH(query, subinfo->subslotname, fout);
5612 else
5613 appendPQExpBufferStr(query, "NONE");
5614
5615 if (subinfo->subbinary)
5616 appendPQExpBufferStr(query, ", binary = true");
5617
5618 if (subinfo->substream == LOGICALREP_STREAM_ON)
5619 appendPQExpBufferStr(query, ", streaming = on");
5620 else if (subinfo->substream == LOGICALREP_STREAM_PARALLEL)
5621 appendPQExpBufferStr(query, ", streaming = parallel");
5622 else
5623 appendPQExpBufferStr(query, ", streaming = off");
5624
5626 appendPQExpBufferStr(query, ", two_phase = on");
5627
5628 if (subinfo->subdisableonerr)
5629 appendPQExpBufferStr(query, ", disable_on_error = true");
5630
5631 if (!subinfo->subpasswordrequired)
5632 appendPQExpBufferStr(query, ", password_required = false");
5633
5634 if (subinfo->subrunasowner)
5635 appendPQExpBufferStr(query, ", run_as_owner = true");
5636
5637 if (subinfo->subfailover)
5638 appendPQExpBufferStr(query, ", failover = true");
5639
5640 if (subinfo->subretaindeadtuples)
5641 appendPQExpBufferStr(query, ", retain_dead_tuples = true");
5642
5643 if (subinfo->submaxretention)
5644 appendPQExpBuffer(query, ", max_retention_duration = %d", subinfo->submaxretention);
5645
5646 if (strcmp(subinfo->subsynccommit, "off") != 0)
5647 appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
5648
5649 if (strcmp(subinfo->subwalrcvtimeout, "-1") != 0)
5650 appendPQExpBuffer(query, ", wal_receiver_timeout = %s", fmtId(subinfo->subwalrcvtimeout));
5651
5652 if (pg_strcasecmp(subinfo->suborigin, LOGICALREP_ORIGIN_ANY) != 0)
5653 appendPQExpBuffer(query, ", origin = %s", subinfo->suborigin);
5654
5655 appendPQExpBufferStr(query, ");\n");
5656
5657 /*
5658 * In binary-upgrade mode, we allow the replication to continue after the
5659 * upgrade.
5660 */
5661 if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
5662 {
5663 if (subinfo->suboriginremotelsn)
5664 {
5665 /*
5666 * Preserve the remote_lsn for the subscriber's replication
5667 * origin. This value is required to start the replication from
5668 * the position before the upgrade. This value will be stale if
5669 * the publisher gets upgraded before the subscriber node.
5670 * However, this shouldn't be a problem as the upgrade of the
5671 * publisher ensures that all the transactions were replicated
5672 * before upgrading it.
5673 */
5675 "\n-- For binary upgrade, must preserve the remote_lsn for the subscriber's replication origin.\n");
5677 "SELECT pg_catalog.binary_upgrade_replorigin_advance(");
5678 appendStringLiteralAH(query, subinfo->dobj.name, fout);
5679 appendPQExpBuffer(query, ", '%s');\n", subinfo->suboriginremotelsn);
5680 }
5681
5682 if (subinfo->subenabled)
5683 {
5684 /*
5685 * Enable the subscription to allow the replication to continue
5686 * after the upgrade.
5687 */
5689 "\n-- For binary upgrade, must preserve the subscriber's running state.\n");
5690 appendPQExpBuffer(query, "ALTER SUBSCRIPTION %s ENABLE;\n", qsubname);
5691 }
5692 }
5693
5694 if (subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5695 ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
5696 ARCHIVE_OPTS(.tag = subinfo->dobj.name,
5697 .owner = subinfo->rolname,
5698 .description = "SUBSCRIPTION",
5699 .section = SECTION_POST_DATA,
5700 .createStmt = query->data,
5701 .dropStmt = delq->data));
5702
5703 if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
5704 dumpComment(fout, "SUBSCRIPTION", qsubname,
5705 NULL, subinfo->rolname,
5706 subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
5707
5708 if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
5709 dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
5710 NULL, subinfo->rolname,
5711 subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
5712
5713 destroyPQExpBuffer(publications);
5714 free(pubnames);
5715
5717 destroyPQExpBuffer(query);
5718 free(qsubname);
5719}
5720
5721/*
5722 * Given a "create query", append as many ALTER ... DEPENDS ON EXTENSION as
5723 * the object needs.
5724 */
5725static void
5727 PQExpBuffer create,
5728 const DumpableObject *dobj,
5729 const char *catalog,
5730 const char *keyword,
5731 const char *objname)
5732{
5733 if (dobj->depends_on_ext)
5734 {
5735 char *nm;
5736 PGresult *res;
5737 PQExpBuffer query;
5738 int ntups;
5739 int i_extname;
5740 int i;
5741
5742 /* dodge fmtId() non-reentrancy */
5743 nm = pg_strdup(objname);
5744
5745 query = createPQExpBuffer();
5746 appendPQExpBuffer(query,
5747 "SELECT e.extname "
5748 "FROM pg_catalog.pg_depend d, pg_catalog.pg_extension e "
5749 "WHERE d.refobjid = e.oid AND classid = '%s'::pg_catalog.regclass "
5750 "AND objid = '%u'::pg_catalog.oid AND deptype = 'x' "
5751 "AND refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass",
5752 catalog,
5753 dobj->catId.oid);
5754 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5755 ntups = PQntuples(res);
5756 i_extname = PQfnumber(res, "extname");
5757 for (i = 0; i < ntups; i++)
5758 {
5759 appendPQExpBuffer(create, "\nALTER %s %s DEPENDS ON EXTENSION %s;",
5760 keyword, nm,
5761 fmtId(PQgetvalue(res, i, i_extname)));
5762 }
5763
5764 PQclear(res);
5765 destroyPQExpBuffer(query);
5766 pg_free(nm);
5767 }
5768}
5769
5770static Oid
5772{
5773 /*
5774 * If the old version didn't assign an array type, but the new version
5775 * does, we must select an unused type OID to assign. This currently only
5776 * happens for domains, when upgrading pre-v11 to v11 and up.
5777 *
5778 * Note: local state here is kind of ugly, but we must have some, since we
5779 * mustn't choose the same unused OID more than once.
5780 */
5782 PGresult *res;
5783 bool is_dup;
5784
5785 do
5786 {
5789 "SELECT EXISTS(SELECT 1 "
5790 "FROM pg_catalog.pg_type "
5791 "WHERE oid = '%u'::pg_catalog.oid);",
5794 is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
5795 PQclear(res);
5796 } while (is_dup);
5797
5799}
5800
5801static void
5805 bool force_array_type,
5807{
5809 PGresult *res;
5813 TypeInfo *tinfo;
5814
5815 appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
5817 "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5818 pg_type_oid);
5819
5821 if (tinfo)
5822 pg_type_array_oid = tinfo->typarray;
5823 else
5825
5828
5830 {
5832 "\n-- For binary upgrade, must preserve pg_type array oid\n");
5834 "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5836 }
5837
5838 /*
5839 * Pre-set the multirange type oid and its own array type oid.
5840 */
5842 {
5843 if (fout->remoteVersion >= 140000)
5844 {
5846 "SELECT t.oid, t.typarray "
5847 "FROM pg_catalog.pg_type t "
5848 "JOIN pg_catalog.pg_range r "
5849 "ON t.oid = r.rngmultitypid "
5850 "WHERE r.rngtypid = '%u'::pg_catalog.oid;",
5851 pg_type_oid);
5852
5854
5855 pg_type_multirange_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "oid")));
5856 pg_type_multirange_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
5857
5858 PQclear(res);
5859 }
5860 else
5861 {
5864 }
5865
5867 "\n-- For binary upgrade, must preserve multirange pg_type oid\n");
5869 "SELECT pg_catalog.binary_upgrade_set_next_multirange_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5872 "\n-- For binary upgrade, must preserve multirange pg_type array oid\n");
5874 "SELECT pg_catalog.binary_upgrade_set_next_multirange_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5876 }
5877
5879}
5880
5881static void
5892
5893/*
5894 * bsearch() comparator for BinaryUpgradeClassOidItem
5895 */
5896static int
5897BinaryUpgradeClassOidItemCmp(const void *p1, const void *p2)
5898{
5901
5902 return pg_cmp_u32(v1.oid, v2.oid);
5903}
5904
5905/*
5906 * collectBinaryUpgradeClassOids
5907 *
5908 * Construct a table of pg_class information required for
5909 * binary_upgrade_set_pg_class_oids(). The table is sorted by OID for speed in
5910 * lookup.
5911 */
5912static void
5914{
5915 PGresult *res;
5916 const char *query;
5917
5918 query = "SELECT c.oid, c.relkind, c.relfilenode, c.reltoastrelid, "
5919 "ct.relfilenode, i.indexrelid, cti.relfilenode "
5920 "FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_index i "
5921 "ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
5922 "LEFT JOIN pg_catalog.pg_class ct ON (c.reltoastrelid = ct.oid) "
5923 "LEFT JOIN pg_catalog.pg_class AS cti ON (i.indexrelid = cti.oid) "
5924 "ORDER BY c.oid;";
5925
5926 res = ExecuteSqlQuery(fout, query, PGRES_TUPLES_OK);
5927
5931
5932 for (int i = 0; i < nbinaryUpgradeClassOids; i++)
5933 {
5941 }
5942
5943 PQclear(res);
5944}
5945
5946static void
5949{
5950 BinaryUpgradeClassOidItem key = {0};
5952
5954
5955 /*
5956 * Preserve the OID and relfilenumber of the table, table's index, table's
5957 * toast table and toast table's index if any.
5958 *
5959 * One complexity is that the current table definition might not require
5960 * the creation of a TOAST table, but the old database might have a TOAST
5961 * table that was created earlier, before some wide columns were dropped.
5962 * By setting the TOAST oid we force creation of the TOAST heap and index
5963 * by the new backend, so we can copy the files during binary upgrade
5964 * without worrying about this case.
5965 */
5966 key.oid = pg_class_oid;
5970
5972 "\n-- For binary upgrade, must preserve pg_class oids and relfilenodes\n");
5973
5974 if (entry->relkind != RELKIND_INDEX &&
5976 {
5978 "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
5979 pg_class_oid);
5980
5981 /*
5982 * Not every relation has storage. Also, in a pre-v12 database,
5983 * partitioned tables have a relfilenumber, which should not be
5984 * preserved when upgrading.
5985 */
5986 if (RelFileNumberIsValid(entry->relfilenumber) &&
5989 "SELECT pg_catalog.binary_upgrade_set_next_heap_relfilenode('%u'::pg_catalog.oid);\n",
5990 entry->relfilenumber);
5991
5992 /*
5993 * In a pre-v12 database, partitioned tables might be marked as having
5994 * toast tables, but we should ignore them if so.
5995 */
5996 if (OidIsValid(entry->toast_oid) &&
5998 {
6000 "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
6001 entry->toast_oid);
6003 "SELECT pg_catalog.binary_upgrade_set_next_toast_relfilenode('%u'::pg_catalog.oid);\n",
6004 entry->toast_relfilenumber);
6005
6006 /* every toast table has an index */
6008 "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
6009 entry->toast_index_oid);
6011 "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
6013 }
6014 }
6015 else
6016 {
6017 /* Preserve the OID and relfilenumber of the index */
6019 "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
6020 pg_class_oid);
6022 "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
6023 entry->relfilenumber);
6024 }
6025
6027}
6028
6029/*
6030 * If the DumpableObject is a member of an extension, add a suitable
6031 * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
6032 *
6033 * For somewhat historical reasons, objname should already be quoted,
6034 * but not objnamespace (if any).
6035 */
6036static void
6038 const DumpableObject *dobj,
6039 const char *objtype,
6040 const char *objname,
6041 const char *objnamespace)
6042{
6044 int i;
6045
6046 if (!dobj->ext_member)
6047 return;
6048
6049 /*
6050 * Find the parent extension. We could avoid this search if we wanted to
6051 * add a link field to DumpableObject, but the space costs of that would
6052 * be considerable. We assume that member objects could only have a
6053 * direct dependency on their own extension, not any others.
6054 */
6055 for (i = 0; i < dobj->nDeps; i++)
6056 {
6058 if (extobj && extobj->objType == DO_EXTENSION)
6059 break;
6060 extobj = NULL;
6061 }
6062 if (extobj == NULL)
6063 pg_fatal("could not find parent extension for %s %s",
6064 objtype, objname);
6065
6067 "\n-- For binary upgrade, handle extension membership the hard way\n");
6068 appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
6069 fmtId(extobj->name),
6070 objtype);
6071 if (objnamespace && *objnamespace)
6073 appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
6074}
6075
6076/*
6077 * getNamespaces:
6078 * get information about all namespaces in the system catalogs
6079 */
6080void
6082{
6083 PGresult *res;
6084 int ntups;
6085 int i;
6086 PQExpBuffer query;
6088 int i_tableoid;
6089 int i_oid;
6090 int i_nspname;
6091 int i_nspowner;
6092 int i_nspacl;
6093 int i_acldefault;
6094
6095 query = createPQExpBuffer();
6096
6097 /*
6098 * we fetch all namespaces including system ones, so that every object we
6099 * read in can be linked to a containing namespace.
6100 */
6101 appendPQExpBufferStr(query, "SELECT n.tableoid, n.oid, n.nspname, "
6102 "n.nspowner, "
6103 "n.nspacl, "
6104 "acldefault('n', n.nspowner) AS acldefault "
6105 "FROM pg_namespace n");
6106
6107 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6108
6109 ntups = PQntuples(res);
6110
6112
6113 i_tableoid = PQfnumber(res, "tableoid");
6114 i_oid = PQfnumber(res, "oid");
6115 i_nspname = PQfnumber(res, "nspname");
6116 i_nspowner = PQfnumber(res, "nspowner");
6117 i_nspacl = PQfnumber(res, "nspacl");
6118 i_acldefault = PQfnumber(res, "acldefault");
6119
6120 for (i = 0; i < ntups; i++)
6121 {
6122 const char *nspowner;
6123
6124 nsinfo[i].dobj.objType = DO_NAMESPACE;
6125 nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6126 nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6127 AssignDumpId(&nsinfo[i].dobj);
6128 nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
6129 nsinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_nspacl));
6130 nsinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
6131 nsinfo[i].dacl.privtype = 0;
6132 nsinfo[i].dacl.initprivs = NULL;
6133 nspowner = PQgetvalue(res, i, i_nspowner);
6134 nsinfo[i].nspowner = atooid(nspowner);
6135 nsinfo[i].rolname = getRoleName(nspowner);
6136
6137 /* Decide whether to dump this namespace */
6139
6140 /* Mark whether namespace has an ACL */
6141 if (!PQgetisnull(res, i, i_nspacl))
6142 nsinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
6143
6144 /*
6145 * We ignore any pg_init_privs.initprivs entry for the public schema
6146 * and assume a predetermined default, for several reasons. First,
6147 * dropping and recreating the schema removes its pg_init_privs entry,
6148 * but an empty destination database starts with this ACL nonetheless.
6149 * Second, we support dump/reload of public schema ownership changes.
6150 * ALTER SCHEMA OWNER filters nspacl through aclnewowner(), but
6151 * initprivs continues to reflect the initial owner. Hence,
6152 * synthesize the value that nspacl will have after the restore's
6153 * ALTER SCHEMA OWNER. Third, this makes the destination database
6154 * match the source's ACL, even if the latter was an initdb-default
6155 * ACL, which changed in v15. An upgrade pulls in changes to most
6156 * system object ACLs that the DBA had not customized. We've made the
6157 * public schema depart from that, because changing its ACL so easily
6158 * breaks applications.
6159 */
6160 if (strcmp(nsinfo[i].dobj.name, "public") == 0)
6161 {
6164
6165 /* Standard ACL as of v15 is {owner=UC/owner,=U/owner} */
6176
6177 nsinfo[i].dacl.privtype = 'i';
6178 nsinfo[i].dacl.initprivs = pstrdup(aclarray->data);
6179 nsinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
6180
6183 }
6184 }
6185
6186 PQclear(res);
6187 destroyPQExpBuffer(query);
6188}
6189
6190/*
6191 * findNamespace:
6192 * given a namespace OID, look up the info read by getNamespaces
6193 */
6194static NamespaceInfo *
6196{
6198
6200 if (nsinfo == NULL)
6201 pg_fatal("schema with OID %u does not exist", nsoid);
6202 return nsinfo;
6203}
6204
6205/*
6206 * getExtensions:
6207 * read all extensions in the system catalogs and return them in the
6208 * ExtensionInfo* structure
6209 *
6210 * numExtensions is set to the number of extensions read in
6211 */
6214{
6215 DumpOptions *dopt = fout->dopt;
6216 PGresult *res;
6217 int ntups;
6218 int i;
6219 PQExpBuffer query;
6221 int i_tableoid;
6222 int i_oid;
6223 int i_extname;
6224 int i_nspname;
6225 int i_extrelocatable;
6226 int i_extversion;
6227 int i_extconfig;
6228 int i_extcondition;
6229
6230 query = createPQExpBuffer();
6231
6232 appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
6233 "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
6234 "FROM pg_extension x "
6235 "JOIN pg_namespace n ON n.oid = x.extnamespace");
6236
6237 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6238
6239 ntups = PQntuples(res);
6240 if (ntups == 0)
6241 goto cleanup;
6242
6244
6245 i_tableoid = PQfnumber(res, "tableoid");
6246 i_oid = PQfnumber(res, "oid");
6247 i_extname = PQfnumber(res, "extname");
6248 i_nspname = PQfnumber(res, "nspname");
6249 i_extrelocatable = PQfnumber(res, "extrelocatable");
6250 i_extversion = PQfnumber(res, "extversion");
6251 i_extconfig = PQfnumber(res, "extconfig");
6252 i_extcondition = PQfnumber(res, "extcondition");
6253
6254 for (i = 0; i < ntups; i++)
6255 {
6256 extinfo[i].dobj.objType = DO_EXTENSION;
6257 extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6258 extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6259 AssignDumpId(&extinfo[i].dobj);
6260 extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
6261 extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
6262 extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
6263 extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
6264 extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
6265 extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
6266
6267 /* Decide whether we want to dump it */
6269 }
6270
6271cleanup:
6272 PQclear(res);
6273 destroyPQExpBuffer(query);
6274
6275 *numExtensions = ntups;
6276
6277 return extinfo;
6278}
6279
6280/*
6281 * getTypes:
6282 * get information about all types in the system catalogs
6283 *
6284 * NB: this must run after getFuncs() because we assume we can do
6285 * findFuncByOid().
6286 */
6287void
6289{
6290 PGresult *res;
6291 int ntups;
6292 int i;
6296 int i_tableoid;
6297 int i_oid;
6298 int i_typname;
6299 int i_typnamespace;
6300 int i_typacl;
6301 int i_acldefault;
6302 int i_typowner;
6303 int i_typelem;
6304 int i_typrelid;
6305 int i_typrelkind;
6306 int i_typtype;
6307 int i_typisdefined;
6308 int i_isarray;
6309 int i_typarray;
6310
6311 /*
6312 * we include even the built-in types because those may be used as array
6313 * elements by user-defined types
6314 *
6315 * we filter out the built-in types when we dump out the types
6316 *
6317 * same approach for undefined (shell) types and array types
6318 *
6319 * Note: as of 8.3 we can reliably detect whether a type is an
6320 * auto-generated array type by checking the element type's typarray.
6321 * (Before that the test is capable of generating false positives.) We
6322 * still check for name beginning with '_', though, so as to avoid the
6323 * cost of the subselect probe for all standard types. This would have to
6324 * be revisited if the backend ever allows renaming of array types.
6325 */
6326 appendPQExpBufferStr(query, "SELECT tableoid, oid, typname, "
6327 "typnamespace, typacl, "
6328 "acldefault('T', typowner) AS acldefault, "
6329 "typowner, "
6330 "typelem, typrelid, typarray, "
6331 "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
6332 "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
6333 "typtype, typisdefined, "
6334 "typname[0] = '_' AND typelem != 0 AND "
6335 "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
6336 "FROM pg_type");
6337
6338 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6339
6340 ntups = PQntuples(res);
6341
6343
6344 i_tableoid = PQfnumber(res, "tableoid");
6345 i_oid = PQfnumber(res, "oid");
6346 i_typname = PQfnumber(res, "typname");
6347 i_typnamespace = PQfnumber(res, "typnamespace");
6348 i_typacl = PQfnumber(res, "typacl");
6349 i_acldefault = PQfnumber(res, "acldefault");
6350 i_typowner = PQfnumber(res, "typowner");
6351 i_typelem = PQfnumber(res, "typelem");
6352 i_typrelid = PQfnumber(res, "typrelid");
6353 i_typrelkind = PQfnumber(res, "typrelkind");
6354 i_typtype = PQfnumber(res, "typtype");
6355 i_typisdefined = PQfnumber(res, "typisdefined");
6356 i_isarray = PQfnumber(res, "isarray");
6357 i_typarray = PQfnumber(res, "typarray");
6358
6359 for (i = 0; i < ntups; i++)
6360 {
6361 tyinfo[i].dobj.objType = DO_TYPE;
6362 tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6363 tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6364 AssignDumpId(&tyinfo[i].dobj);
6365 tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
6366 tyinfo[i].dobj.namespace =
6368 tyinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_typacl));
6369 tyinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
6370 tyinfo[i].dacl.privtype = 0;
6371 tyinfo[i].dacl.initprivs = NULL;
6372 tyinfo[i].ftypname = NULL; /* may get filled later */
6373 tyinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_typowner));
6374 tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
6375 tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
6376 tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
6377 tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
6378 tyinfo[i].shellType = NULL;
6379
6380 if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
6381 tyinfo[i].isDefined = true;
6382 else
6383 tyinfo[i].isDefined = false;
6384
6385 if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
6386 tyinfo[i].isArray = true;
6387 else
6388 tyinfo[i].isArray = false;
6389
6390 tyinfo[i].typarray = atooid(PQgetvalue(res, i, i_typarray));
6391
6392 if (tyinfo[i].typtype == TYPTYPE_MULTIRANGE)
6393 tyinfo[i].isMultirange = true;
6394 else
6395 tyinfo[i].isMultirange = false;
6396
6397 /* Decide whether we want to dump it */
6399
6400 /* Mark whether type has an ACL */
6401 if (!PQgetisnull(res, i, i_typacl))
6402 tyinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
6403
6404 /*
6405 * If it's a domain, fetch info about its constraints, if any
6406 */
6407 tyinfo[i].nDomChecks = 0;
6408 tyinfo[i].domChecks = NULL;
6409 tyinfo[i].notnull = NULL;
6410 if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
6411 tyinfo[i].typtype == TYPTYPE_DOMAIN)
6413
6414 /*
6415 * If it's a base type, make a DumpableObject representing a shell
6416 * definition of the type. We will need to dump that ahead of the I/O
6417 * functions for the type. Similarly, range types need a shell
6418 * definition in case they have a canonicalize function.
6419 *
6420 * Note: the shell type doesn't have a catId. You might think it
6421 * should copy the base type's catId, but then it might capture the
6422 * pg_depend entries for the type, which we don't want.
6423 */
6424 if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
6425 (tyinfo[i].typtype == TYPTYPE_BASE ||
6426 tyinfo[i].typtype == TYPTYPE_RANGE))
6427 {
6429 stinfo->dobj.objType = DO_SHELL_TYPE;
6430 stinfo->dobj.catId = nilCatalogId;
6431 AssignDumpId(&stinfo->dobj);
6432 stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
6433 stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
6434 stinfo->baseType = &(tyinfo[i]);
6435 tyinfo[i].shellType = stinfo;
6436
6437 /*
6438 * Initially mark the shell type as not to be dumped. We'll only
6439 * dump it if the I/O or canonicalize functions need to be dumped;
6440 * this is taken care of while sorting dependencies.
6441 */
6442 stinfo->dobj.dump = DUMP_COMPONENT_NONE;
6443 }
6444 }
6445
6446 PQclear(res);
6447
6448 destroyPQExpBuffer(query);
6449}
6450
6451/*
6452 * getOperators:
6453 * get information about all operators in the system catalogs
6454 */
6455void
6457{
6458 PGresult *res;
6459 int ntups;
6460 int i;
6463 int i_tableoid;
6464 int i_oid;
6465 int i_oprname;
6466 int i_oprnamespace;
6467 int i_oprowner;
6468 int i_oprkind;
6469 int i_oprleft;
6470 int i_oprright;
6471 int i_oprcode;
6472
6473 /*
6474 * find all operators, including builtin operators; we filter out
6475 * system-defined operators at dump-out time.
6476 */
6477
6478 appendPQExpBufferStr(query, "SELECT tableoid, oid, oprname, "
6479 "oprnamespace, "
6480 "oprowner, "
6481 "oprkind, "
6482 "oprleft, "
6483 "oprright, "
6484 "oprcode::oid AS oprcode "
6485 "FROM pg_operator");
6486
6487 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6488
6489 ntups = PQntuples(res);
6490
6492
6493 i_tableoid = PQfnumber(res, "tableoid");
6494 i_oid = PQfnumber(res, "oid");
6495 i_oprname = PQfnumber(res, "oprname");
6496 i_oprnamespace = PQfnumber(res, "oprnamespace");
6497 i_oprowner = PQfnumber(res, "oprowner");
6498 i_oprkind = PQfnumber(res, "oprkind");
6499 i_oprleft = PQfnumber(res, "oprleft");
6500 i_oprright = PQfnumber(res, "oprright");
6501 i_oprcode = PQfnumber(res, "oprcode");
6502
6503 for (i = 0; i < ntups; i++)
6504 {
6505 oprinfo[i].dobj.objType = DO_OPERATOR;
6506 oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6507 oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6508 AssignDumpId(&oprinfo[i].dobj);
6509 oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
6510 oprinfo[i].dobj.namespace =
6512 oprinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_oprowner));
6513 oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
6514 oprinfo[i].oprleft = atooid(PQgetvalue(res, i, i_oprleft));
6515 oprinfo[i].oprright = atooid(PQgetvalue(res, i, i_oprright));
6516 oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
6517
6518 /* Decide whether we want to dump it */
6520 }
6521
6522 PQclear(res);
6523
6524 destroyPQExpBuffer(query);
6525}
6526
6527/*
6528 * getCollations:
6529 * get information about all collations in the system catalogs
6530 */
6531void
6533{
6534 PGresult *res;
6535 int ntups;
6536 int i;
6537 PQExpBuffer query;
6539 int i_tableoid;
6540 int i_oid;
6541 int i_collname;
6542 int i_collnamespace;
6543 int i_collowner;
6544 int i_collencoding;
6545
6546 query = createPQExpBuffer();
6547
6548 /*
6549 * find all collations, including builtin collations; we filter out
6550 * system-defined collations at dump-out time.
6551 */
6552
6553 appendPQExpBufferStr(query, "SELECT tableoid, oid, collname, "
6554 "collnamespace, "
6555 "collowner, "
6556 "collencoding "
6557 "FROM pg_collation");
6558
6559 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6560
6561 ntups = PQntuples(res);
6562
6564
6565 i_tableoid = PQfnumber(res, "tableoid");
6566 i_oid = PQfnumber(res, "oid");
6567 i_collname = PQfnumber(res, "collname");
6568 i_collnamespace = PQfnumber(res, "collnamespace");
6569 i_collowner = PQfnumber(res, "collowner");
6570 i_collencoding = PQfnumber(res, "collencoding");
6571
6572 for (i = 0; i < ntups; i++)
6573 {
6574 collinfo[i].dobj.objType = DO_COLLATION;
6575 collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6576 collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6577 AssignDumpId(&collinfo[i].dobj);
6578 collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
6579 collinfo[i].dobj.namespace =
6581 collinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_collowner));
6582 collinfo[i].collencoding = atoi(PQgetvalue(res, i, i_collencoding));
6583
6584 /* Decide whether we want to dump it */
6586 }
6587
6588 PQclear(res);
6589
6590 destroyPQExpBuffer(query);
6591}
6592
6593/*
6594 * getConversions:
6595 * get information about all conversions in the system catalogs
6596 */
6597void
6599{
6600 PGresult *res;
6601 int ntups;
6602 int i;
6603 PQExpBuffer query;
6605 int i_tableoid;
6606 int i_oid;
6607 int i_conname;
6608 int i_connamespace;
6609 int i_conowner;
6610
6611 query = createPQExpBuffer();
6612
6613 /*
6614 * find all conversions, including builtin conversions; we filter out
6615 * system-defined conversions at dump-out time.
6616 */
6617
6618 appendPQExpBufferStr(query, "SELECT tableoid, oid, conname, "
6619 "connamespace, "
6620 "conowner "
6621 "FROM pg_conversion");
6622
6623 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6624
6625 ntups = PQntuples(res);
6626
6628
6629 i_tableoid = PQfnumber(res, "tableoid");
6630 i_oid = PQfnumber(res, "oid");
6631 i_conname = PQfnumber(res, "conname");
6632 i_connamespace = PQfnumber(res, "connamespace");
6633 i_conowner = PQfnumber(res, "conowner");
6634
6635 for (i = 0; i < ntups; i++)
6636 {
6637 convinfo[i].dobj.objType = DO_CONVERSION;
6638 convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6639 convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6640 AssignDumpId(&convinfo[i].dobj);
6641 convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
6642 convinfo[i].dobj.namespace =
6644 convinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_conowner));
6645
6646 /* Decide whether we want to dump it */
6648 }
6649
6650 PQclear(res);
6651
6652 destroyPQExpBuffer(query);
6653}
6654
6655/*
6656 * getAccessMethods:
6657 * get information about all user-defined access methods
6658 */
6659void
6661{
6662 PGresult *res;
6663 int ntups;
6664 int i;
6665 PQExpBuffer query;
6667 int i_tableoid;
6668 int i_oid;
6669 int i_amname;
6670 int i_amhandler;
6671 int i_amtype;
6672
6673 query = createPQExpBuffer();
6674
6675 /*
6676 * Select all access methods from pg_am table. v9.6 introduced CREATE
6677 * ACCESS METHOD, so earlier versions usually have only built-in access
6678 * methods. v9.6 also changed the access method API, replacing dozens of
6679 * pg_am columns with amhandler. Even if a user created an access method
6680 * by "INSERT INTO pg_am", we have no way to translate pre-v9.6 pg_am
6681 * columns to a v9.6+ CREATE ACCESS METHOD. Hence, before v9.6, read
6682 * pg_am just to facilitate findAccessMethodByOid() providing the
6683 * OID-to-name mapping.
6684 */
6685 appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, ");
6686 if (fout->remoteVersion >= 90600)
6688 "amtype, "
6689 "amhandler::pg_catalog.regproc AS amhandler ");
6690 else
6692 "'i'::pg_catalog.\"char\" AS amtype, "
6693 "'-'::pg_catalog.regproc AS amhandler ");
6694 appendPQExpBufferStr(query, "FROM pg_am");
6695
6696 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6697
6698 ntups = PQntuples(res);
6699
6701
6702 i_tableoid = PQfnumber(res, "tableoid");
6703 i_oid = PQfnumber(res, "oid");
6704 i_amname = PQfnumber(res, "amname");
6705 i_amhandler = PQfnumber(res, "amhandler");
6706 i_amtype = PQfnumber(res, "amtype");
6707
6708 for (i = 0; i < ntups; i++)
6709 {
6710 aminfo[i].dobj.objType = DO_ACCESS_METHOD;
6711 aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6712 aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6713 AssignDumpId(&aminfo[i].dobj);
6714 aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
6715 aminfo[i].dobj.namespace = NULL;
6716 aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
6717 aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
6718
6719 /* Decide whether we want to dump it */
6721 }
6722
6723 PQclear(res);
6724
6725 destroyPQExpBuffer(query);
6726}
6727
6728
6729/*
6730 * getOpclasses:
6731 * get information about all opclasses in the system catalogs
6732 */
6733void
6735{
6736 PGresult *res;
6737 int ntups;
6738 int i;
6741 int i_tableoid;
6742 int i_oid;
6743 int i_opcmethod;
6744 int i_opcname;
6745 int i_opcnamespace;
6746 int i_opcowner;
6747
6748 /*
6749 * find all opclasses, including builtin opclasses; we filter out
6750 * system-defined opclasses at dump-out time.
6751 */
6752
6753 appendPQExpBufferStr(query, "SELECT tableoid, oid, opcmethod, opcname, "
6754 "opcnamespace, "
6755 "opcowner "
6756 "FROM pg_opclass");
6757
6758 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6759
6760 ntups = PQntuples(res);
6761
6763
6764 i_tableoid = PQfnumber(res, "tableoid");
6765 i_oid = PQfnumber(res, "oid");
6766 i_opcmethod = PQfnumber(res, "opcmethod");
6767 i_opcname = PQfnumber(res, "opcname");
6768 i_opcnamespace = PQfnumber(res, "opcnamespace");
6769 i_opcowner = PQfnumber(res, "opcowner");
6770
6771 for (i = 0; i < ntups; i++)
6772 {
6773 opcinfo[i].dobj.objType = DO_OPCLASS;
6774 opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6775 opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6776 AssignDumpId(&opcinfo[i].dobj);
6777 opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
6778 opcinfo[i].dobj.namespace =
6780 opcinfo[i].opcmethod = atooid(PQgetvalue(res, i, i_opcmethod));
6781 opcinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opcowner));
6782
6783 /* Decide whether we want to dump it */
6785 }
6786
6787 PQclear(res);
6788
6789 destroyPQExpBuffer(query);
6790}
6791
6792/*
6793 * getOpfamilies:
6794 * get information about all opfamilies in the system catalogs
6795 */
6796void
6798{
6799 PGresult *res;
6800 int ntups;
6801 int i;
6802 PQExpBuffer query;
6804 int i_tableoid;
6805 int i_oid;
6806 int i_opfmethod;
6807 int i_opfname;
6808 int i_opfnamespace;
6809 int i_opfowner;
6810
6811 query = createPQExpBuffer();
6812
6813 /*
6814 * find all opfamilies, including builtin opfamilies; we filter out
6815 * system-defined opfamilies at dump-out time.
6816 */
6817
6818 appendPQExpBufferStr(query, "SELECT tableoid, oid, opfmethod, opfname, "
6819 "opfnamespace, "
6820 "opfowner "
6821 "FROM pg_opfamily");
6822
6823 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6824
6825 ntups = PQntuples(res);
6826
6828
6829 i_tableoid = PQfnumber(res, "tableoid");
6830 i_oid = PQfnumber(res, "oid");
6831 i_opfname = PQfnumber(res, "opfname");
6832 i_opfmethod = PQfnumber(res, "opfmethod");
6833 i_opfnamespace = PQfnumber(res, "opfnamespace");
6834 i_opfowner = PQfnumber(res, "opfowner");
6835
6836 for (i = 0; i < ntups; i++)
6837 {
6838 opfinfo[i].dobj.objType = DO_OPFAMILY;
6839 opfinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6840 opfinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6841 AssignDumpId(&opfinfo[i].dobj);
6842 opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
6843 opfinfo[i].dobj.namespace =
6845 opfinfo[i].opfmethod = atooid(PQgetvalue(res, i, i_opfmethod));
6846 opfinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opfowner));
6847
6848 /* Decide whether we want to dump it */
6850 }
6851
6852 PQclear(res);
6853
6854 destroyPQExpBuffer(query);
6855}
6856
6857/*
6858 * getAggregates:
6859 * get information about all user-defined aggregates in the system catalogs
6860 */
6861void
6863{
6864 DumpOptions *dopt = fout->dopt;
6865 PGresult *res;
6866 int ntups;
6867 int i;
6870 int i_tableoid;
6871 int i_oid;
6872 int i_aggname;
6873 int i_aggnamespace;
6874 int i_pronargs;
6875 int i_proargtypes;
6876 int i_proowner;
6877 int i_aggacl;
6878 int i_acldefault;
6879
6880 /*
6881 * Find all interesting aggregates. See comment in getFuncs() for the
6882 * rationale behind the filtering logic.
6883 */
6884 if (fout->remoteVersion >= 90600)
6885 {
6886 const char *agg_check;
6887
6888 agg_check = (fout->remoteVersion >= 110000 ? "p.prokind = 'a'"
6889 : "p.proisagg");
6890
6891 appendPQExpBuffer(query, "SELECT p.tableoid, p.oid, "
6892 "p.proname AS aggname, "
6893 "p.pronamespace AS aggnamespace, "
6894 "p.pronargs, p.proargtypes, "
6895 "p.proowner, "
6896 "p.proacl AS aggacl, "
6897 "acldefault('f', p.proowner) AS acldefault "
6898 "FROM pg_proc p "
6899 "LEFT JOIN pg_init_privs pip ON "
6900 "(p.oid = pip.objoid "
6901 "AND pip.classoid = 'pg_proc'::regclass "
6902 "AND pip.objsubid = 0) "
6903 "WHERE %s AND ("
6904 "p.pronamespace != "
6905 "(SELECT oid FROM pg_namespace "
6906 "WHERE nspname = 'pg_catalog') OR "
6907 "p.proacl IS DISTINCT FROM pip.initprivs",
6908 agg_check);
6909 if (dopt->binary_upgrade)
6911 " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
6912 "classid = 'pg_proc'::regclass AND "
6913 "objid = p.oid AND "
6914 "refclassid = 'pg_extension'::regclass AND "
6915 "deptype = 'e')");
6916 appendPQExpBufferChar(query, ')');
6917 }
6918 else
6919 {
6920 appendPQExpBufferStr(query, "SELECT tableoid, oid, proname AS aggname, "
6921 "pronamespace AS aggnamespace, "
6922 "pronargs, proargtypes, "
6923 "proowner, "
6924 "proacl AS aggacl, "
6925 "acldefault('f', proowner) AS acldefault "
6926 "FROM pg_proc p "
6927 "WHERE proisagg AND ("
6928 "pronamespace != "
6929 "(SELECT oid FROM pg_namespace "
6930 "WHERE nspname = 'pg_catalog')");
6931 if (dopt->binary_upgrade)
6933 " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
6934 "classid = 'pg_proc'::regclass AND "
6935 "objid = p.oid AND "
6936 "refclassid = 'pg_extension'::regclass AND "
6937 "deptype = 'e')");
6938 appendPQExpBufferChar(query, ')');
6939 }
6940
6941 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6942
6943 ntups = PQntuples(res);
6944
6946
6947 i_tableoid = PQfnumber(res, "tableoid");
6948 i_oid = PQfnumber(res, "oid");
6949 i_aggname = PQfnumber(res, "aggname");
6950 i_aggnamespace = PQfnumber(res, "aggnamespace");
6951 i_pronargs = PQfnumber(res, "pronargs");
6952 i_proargtypes = PQfnumber(res, "proargtypes");
6953 i_proowner = PQfnumber(res, "proowner");
6954 i_aggacl = PQfnumber(res, "aggacl");
6955 i_acldefault = PQfnumber(res, "acldefault");
6956
6957 for (i = 0; i < ntups; i++)
6958 {
6959 agginfo[i].aggfn.dobj.objType = DO_AGG;
6960 agginfo[i].aggfn.dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6961 agginfo[i].aggfn.dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6962 AssignDumpId(&agginfo[i].aggfn.dobj);
6963 agginfo[i].aggfn.dobj.name = pg_strdup(PQgetvalue(res, i, i_aggname));
6964 agginfo[i].aggfn.dobj.namespace =
6966 agginfo[i].aggfn.dacl.acl = pg_strdup(PQgetvalue(res, i, i_aggacl));
6967 agginfo[i].aggfn.dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
6968 agginfo[i].aggfn.dacl.privtype = 0;
6969 agginfo[i].aggfn.dacl.initprivs = NULL;
6970 agginfo[i].aggfn.rolname = getRoleName(PQgetvalue(res, i, i_proowner));
6971 agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */
6972 agginfo[i].aggfn.prorettype = InvalidOid; /* not saved */
6973 agginfo[i].aggfn.nargs = atoi(PQgetvalue(res, i, i_pronargs));
6974 if (agginfo[i].aggfn.nargs == 0)
6975 agginfo[i].aggfn.argtypes = NULL;
6976 else
6977 {
6978 agginfo[i].aggfn.argtypes = pg_malloc_array(Oid, agginfo[i].aggfn.nargs);
6980 agginfo[i].aggfn.argtypes,
6981 agginfo[i].aggfn.nargs);
6982 }
6983 agginfo[i].aggfn.postponed_def = false; /* might get set during sort */
6984
6985 /* Decide whether we want to dump it */
6986 selectDumpableObject(&(agginfo[i].aggfn.dobj), fout);
6987
6988 /* Mark whether aggregate has an ACL */
6989 if (!PQgetisnull(res, i, i_aggacl))
6990 agginfo[i].aggfn.dobj.components |= DUMP_COMPONENT_ACL;
6991 }
6992
6993 PQclear(res);
6994
6995 destroyPQExpBuffer(query);
6996}
6997
6998/*
6999 * getFuncs:
7000 * get information about all user-defined functions in the system catalogs
7001 */
7002void
7004{
7005 DumpOptions *dopt = fout->dopt;
7006 PGresult *res;
7007 int ntups;
7008 int i;
7010 FuncInfo *finfo;
7011 int i_tableoid;
7012 int i_oid;
7013 int i_proname;
7014 int i_pronamespace;
7015 int i_proowner;
7016 int i_prolang;
7017 int i_pronargs;
7018 int i_proargtypes;
7019 int i_prorettype;
7020 int i_proacl;
7021 int i_acldefault;
7022
7023 /*
7024 * Find all interesting functions. This is a bit complicated:
7025 *
7026 * 1. Always exclude aggregates; those are handled elsewhere.
7027 *
7028 * 2. Always exclude functions that are internally dependent on something
7029 * else, since presumably those will be created as a result of creating
7030 * the something else. This currently acts only to suppress constructor
7031 * functions for range types. Note this is OK only because the
7032 * constructors don't have any dependencies the range type doesn't have;
7033 * otherwise we might not get creation ordering correct.
7034 *
7035 * 3. Otherwise, we normally exclude functions in pg_catalog. However, if
7036 * they're members of extensions and we are in binary-upgrade mode then
7037 * include them, since we want to dump extension members individually in
7038 * that mode. Also, if they are used by casts or transforms then we need
7039 * to gather the information about them, though they won't be dumped if
7040 * they are built-in. Also, in 9.6 and up, include functions in
7041 * pg_catalog if they have an ACL different from what's shown in
7042 * pg_init_privs (so we have to join to pg_init_privs; annoying).
7043 */
7044 if (fout->remoteVersion >= 90600)
7045 {
7046 const char *not_agg_check;
7047
7048 not_agg_check = (fout->remoteVersion >= 110000 ? "p.prokind <> 'a'"
7049 : "NOT p.proisagg");
7050
7051 appendPQExpBuffer(query,
7052 "SELECT p.tableoid, p.oid, p.proname, p.prolang, "
7053 "p.pronargs, p.proargtypes, p.prorettype, "
7054 "p.proacl, "
7055 "acldefault('f', p.proowner) AS acldefault, "
7056 "p.pronamespace, "
7057 "p.proowner "
7058 "FROM pg_proc p "
7059 "LEFT JOIN pg_init_privs pip ON "
7060 "(p.oid = pip.objoid "
7061 "AND pip.classoid = 'pg_proc'::regclass "
7062 "AND pip.objsubid = 0) "
7063 "WHERE %s"
7064 "\n AND NOT EXISTS (SELECT 1 FROM pg_depend "
7065 "WHERE classid = 'pg_proc'::regclass AND "
7066 "objid = p.oid AND deptype = 'i')"
7067 "\n AND ("
7068 "\n pronamespace != "
7069 "(SELECT oid FROM pg_namespace "
7070 "WHERE nspname = 'pg_catalog')"
7071 "\n OR EXISTS (SELECT 1 FROM pg_cast"
7072 "\n WHERE pg_cast.oid > %u "
7073 "\n AND p.oid = pg_cast.castfunc)"
7074 "\n OR EXISTS (SELECT 1 FROM pg_transform"
7075 "\n WHERE pg_transform.oid > %u AND "
7076 "\n (p.oid = pg_transform.trffromsql"
7077 "\n OR p.oid = pg_transform.trftosql))",
7081 if (dopt->binary_upgrade)
7083 "\n OR EXISTS(SELECT 1 FROM pg_depend WHERE "
7084 "classid = 'pg_proc'::regclass AND "
7085 "objid = p.oid AND "
7086 "refclassid = 'pg_extension'::regclass AND "
7087 "deptype = 'e')");
7089 "\n OR p.proacl IS DISTINCT FROM pip.initprivs");
7090 appendPQExpBufferChar(query, ')');
7091 }
7092 else
7093 {
7094 appendPQExpBuffer(query,
7095 "SELECT tableoid, oid, proname, prolang, "
7096 "pronargs, proargtypes, prorettype, proacl, "
7097 "acldefault('f', proowner) AS acldefault, "
7098 "pronamespace, "
7099 "proowner "
7100 "FROM pg_proc p "
7101 "WHERE NOT proisagg"
7102 "\n AND NOT EXISTS (SELECT 1 FROM pg_depend "
7103 "WHERE classid = 'pg_proc'::regclass AND "
7104 "objid = p.oid AND deptype = 'i')"
7105 "\n AND ("
7106 "\n pronamespace != "
7107 "(SELECT oid FROM pg_namespace "
7108 "WHERE nspname = 'pg_catalog')"
7109 "\n OR EXISTS (SELECT 1 FROM pg_cast"
7110 "\n WHERE pg_cast.oid > '%u'::oid"
7111 "\n AND p.oid = pg_cast.castfunc)",
7113
7114 if (fout->remoteVersion >= 90500)
7115 appendPQExpBuffer(query,
7116 "\n OR EXISTS (SELECT 1 FROM pg_transform"
7117 "\n WHERE pg_transform.oid > '%u'::oid"
7118 "\n AND (p.oid = pg_transform.trffromsql"
7119 "\n OR p.oid = pg_transform.trftosql))",
7121
7122 if (dopt->binary_upgrade)
7124 "\n OR EXISTS(SELECT 1 FROM pg_depend WHERE "
7125 "classid = 'pg_proc'::regclass AND "
7126 "objid = p.oid AND "
7127 "refclassid = 'pg_extension'::regclass AND "
7128 "deptype = 'e')");
7129 appendPQExpBufferChar(query, ')');
7130 }
7131
7132 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7133
7134 ntups = PQntuples(res);
7135
7136 finfo = pg_malloc0_array(FuncInfo, ntups);
7137
7138 i_tableoid = PQfnumber(res, "tableoid");
7139 i_oid = PQfnumber(res, "oid");
7140 i_proname = PQfnumber(res, "proname");
7141 i_pronamespace = PQfnumber(res, "pronamespace");
7142 i_proowner = PQfnumber(res, "proowner");
7143 i_prolang = PQfnumber(res, "prolang");
7144 i_pronargs = PQfnumber(res, "pronargs");
7145 i_proargtypes = PQfnumber(res, "proargtypes");
7146 i_prorettype = PQfnumber(res, "prorettype");
7147 i_proacl = PQfnumber(res, "proacl");
7148 i_acldefault = PQfnumber(res, "acldefault");
7149
7150 for (i = 0; i < ntups; i++)
7151 {
7152 finfo[i].dobj.objType = DO_FUNC;
7153 finfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
7154 finfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
7155 AssignDumpId(&finfo[i].dobj);
7156 finfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_proname));
7157 finfo[i].dobj.namespace =
7159 finfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_proacl));
7161 finfo[i].dacl.privtype = 0;
7162 finfo[i].dacl.initprivs = NULL;
7163 finfo[i].rolname = getRoleName(PQgetvalue(res, i, i_proowner));
7164 finfo[i].lang = atooid(PQgetvalue(res, i, i_prolang));
7165 finfo[i].prorettype = atooid(PQgetvalue(res, i, i_prorettype));
7166 finfo[i].nargs = atoi(PQgetvalue(res, i, i_pronargs));
7167 if (finfo[i].nargs == 0)
7168 finfo[i].argtypes = NULL;
7169 else
7170 {
7171 finfo[i].argtypes = pg_malloc_array(Oid, finfo[i].nargs);
7173 finfo[i].argtypes, finfo[i].nargs);
7174 }
7175 finfo[i].postponed_def = false; /* might get set during sort */
7176
7177 /* Decide whether we want to dump it */
7178 selectDumpableObject(&(finfo[i].dobj), fout);
7179
7180 /* Mark whether function has an ACL */
7181 if (!PQgetisnull(res, i, i_proacl))
7183 }
7184
7185 PQclear(res);
7186
7187 destroyPQExpBuffer(query);
7188}
7189
7190/*
7191 * getRelationStatistics
7192 * register the statistics object as a dependent of the relation.
7193 *
7194 * reltuples is passed as a string to avoid complexities in converting from/to
7195 * floating point.
7196 */
7197static RelStatsInfo *
7199 char *reltuples, int32 relallvisible,
7200 int32 relallfrozen, char relkind,
7201 char **indAttNames, int nindAttNames)
7202{
7203 if (!fout->dopt->dumpStatistics)
7204 return NULL;
7205
7206 if ((relkind == RELKIND_RELATION) ||
7207 (relkind == RELKIND_PARTITIONED_TABLE) ||
7208 (relkind == RELKIND_INDEX) ||
7209 (relkind == RELKIND_PARTITIONED_INDEX) ||
7210 (relkind == RELKIND_MATVIEW ||
7211 relkind == RELKIND_FOREIGN_TABLE))
7212 {
7214 DumpableObject *dobj = &info->dobj;
7215
7216 dobj->objType = DO_REL_STATS;
7217 dobj->catId.tableoid = 0;
7218 dobj->catId.oid = 0;
7219 AssignDumpId(dobj);
7221 dobj->dependencies[0] = rel->dumpId;
7222 dobj->nDeps = 1;
7223 dobj->allocDeps = 1;
7225 dobj->name = pg_strdup(rel->name);
7226 dobj->namespace = rel->namespace;
7227 info->relpages = relpages;
7228 info->reltuples = pstrdup(reltuples);
7229 info->relallvisible = relallvisible;
7230 info->relallfrozen = relallfrozen;
7231 info->relkind = relkind;
7232 info->indAttNames = indAttNames;
7233 info->nindAttNames = nindAttNames;
7234
7235 /*
7236 * Ordinarily, stats go in SECTION_DATA for tables and
7237 * SECTION_POST_DATA for indexes.
7238 *
7239 * However, the section may be updated later for materialized view
7240 * stats. REFRESH MATERIALIZED VIEW replaces the storage and resets
7241 * the stats, so the stats must be restored after the data. Also, the
7242 * materialized view definition may be postponed to SECTION_POST_DATA
7243 * (see repairMatViewBoundaryMultiLoop()).
7244 */
7245 switch (info->relkind)
7246 {
7247 case RELKIND_RELATION:
7249 case RELKIND_MATVIEW:
7251 info->section = SECTION_DATA;
7252 break;
7253 case RELKIND_INDEX:
7255 info->section = SECTION_POST_DATA;
7256 break;
7257 default:
7258 pg_fatal("cannot dump statistics for relation kind \"%c\"",
7259 info->relkind);
7260 }
7261
7262 return info;
7263 }
7264 return NULL;
7265}
7266
7267/*
7268 * getTables
7269 * read all the tables (no indexes) in the system catalogs,
7270 * and return them as an array of TableInfo structures
7271 *
7272 * *numTables is set to the number of tables read in
7273 */
7274TableInfo *
7276{
7277 DumpOptions *dopt = fout->dopt;
7278 PGresult *res;
7279 int ntups;
7280 int i;
7282 TableInfo *tblinfo;
7283 int i_reltableoid;
7284 int i_reloid;
7285 int i_relname;
7286 int i_relnamespace;
7287 int i_relkind;
7288 int i_reltype;
7289 int i_relowner;
7290 int i_relchecks;
7291 int i_relhasindex;
7292 int i_relhasrules;
7293 int i_relpages;
7294 int i_reltuples;
7295 int i_relallvisible;
7296 int i_relallfrozen;
7297 int i_toastpages;
7298 int i_owning_tab;
7299 int i_owning_col;
7300 int i_reltablespace;
7301 int i_relhasoids;
7302 int i_relhastriggers;
7303 int i_relpersistence;
7304 int i_relispopulated;
7305 int i_relreplident;
7306 int i_relrowsec;
7307 int i_relforcerowsec;
7308 int i_relfrozenxid;
7309 int i_toastfrozenxid;
7310 int i_toastoid;
7311 int i_relminmxid;
7312 int i_toastminmxid;
7313 int i_reloptions;
7314 int i_checkoption;
7316 int i_reloftype;
7317 int i_foreignserver;
7318 int i_amname;
7320 int i_relacl;
7321 int i_acldefault;
7322 int i_ispartition;
7323
7324 /*
7325 * Find all the tables and table-like objects.
7326 *
7327 * We must fetch all tables in this phase because otherwise we cannot
7328 * correctly identify inherited columns, owned sequences, etc.
7329 *
7330 * We include system catalogs, so that we can work if a user table is
7331 * defined to inherit from a system catalog (pretty weird, but...)
7332 *
7333 * Note: in this phase we should collect only a minimal amount of
7334 * information about each table, basically just enough to decide if it is
7335 * interesting. In particular, since we do not yet have lock on any user
7336 * table, we MUST NOT invoke any server-side data collection functions
7337 * (for instance, pg_get_partkeydef()). Those are likely to fail or give
7338 * wrong answers if any concurrent DDL is happening.
7339 */
7340
7342 "SELECT c.tableoid, c.oid, c.relname, "
7343 "c.relnamespace, c.relkind, c.reltype, "
7344 "c.relowner, "
7345 "c.relchecks, "
7346 "c.relhasindex, c.relhasrules, c.relpages, "
7347 "c.reltuples, c.relallvisible, ");
7348
7349 if (fout->remoteVersion >= 180000)
7350 appendPQExpBufferStr(query, "c.relallfrozen, ");
7351 else
7352 appendPQExpBufferStr(query, "0 AS relallfrozen, ");
7353
7355 "c.relhastriggers, c.relpersistence, "
7356 "c.reloftype, "
7357 "c.relacl, "
7358 "acldefault(CASE WHEN c.relkind = " CppAsString2(RELKIND_SEQUENCE)
7359 " THEN 's'::\"char\" ELSE 'r'::\"char\" END, c.relowner) AS acldefault, "
7360 "CASE WHEN c.relkind = " CppAsString2(RELKIND_FOREIGN_TABLE) " THEN "
7361 "(SELECT ftserver FROM pg_catalog.pg_foreign_table WHERE ftrelid = c.oid) "
7362 "ELSE 0 END AS foreignserver, "
7363 "c.relfrozenxid, tc.relfrozenxid AS tfrozenxid, "
7364 "tc.oid AS toid, "
7365 "tc.relpages AS toastpages, "
7366 "tc.reloptions AS toast_reloptions, "
7367 "d.refobjid AS owning_tab, "
7368 "d.refobjsubid AS owning_col, "
7369 "tsp.spcname AS reltablespace, ");
7370
7371 if (fout->remoteVersion >= 120000)
7373 "false AS relhasoids, ");
7374 else
7376 "c.relhasoids, ");
7377
7378 if (fout->remoteVersion >= 90300)
7380 "c.relispopulated, ");
7381 else
7383 "'t' as relispopulated, ");
7384
7385 if (fout->remoteVersion >= 90400)
7387 "c.relreplident, ");
7388 else
7390 "'d' AS relreplident, ");
7391
7392 if (fout->remoteVersion >= 90500)
7394 "c.relrowsecurity, c.relforcerowsecurity, ");
7395 else
7397 "false AS relrowsecurity, "
7398 "false AS relforcerowsecurity, ");
7399
7400 if (fout->remoteVersion >= 90300)
7402 "c.relminmxid, tc.relminmxid AS tminmxid, ");
7403 else
7405 "0 AS relminmxid, 0 AS tminmxid, ");
7406
7407 if (fout->remoteVersion >= 90300)
7409 "array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded') AS reloptions, "
7410 "CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
7411 "WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, ");
7412 else
7414 "c.reloptions, NULL AS checkoption, ");
7415
7416 if (fout->remoteVersion >= 90600)
7418 "am.amname, ");
7419 else
7421 "NULL AS amname, ");
7422
7423 if (fout->remoteVersion >= 90600)
7425 "(d.deptype = 'i') IS TRUE AS is_identity_sequence, ");
7426 else
7428 "false AS is_identity_sequence, ");
7429
7430 if (fout->remoteVersion >= 100000)
7432 "c.relispartition AS ispartition ");
7433 else
7435 "false AS ispartition ");
7436
7437 /*
7438 * Left join to pg_depend to pick up dependency info linking sequences to
7439 * their owning column, if any (note this dependency is AUTO except for
7440 * identity sequences, where it's INTERNAL). Also join to pg_tablespace to
7441 * collect the spcname.
7442 */
7444 "\nFROM pg_class c\n"
7445 "LEFT JOIN pg_depend d ON "
7446 "(c.relkind = " CppAsString2(RELKIND_SEQUENCE) " AND "
7447 "d.classid = 'pg_class'::regclass AND d.objid = c.oid AND "
7448 "d.objsubid = 0 AND "
7449 "d.refclassid = 'pg_class'::regclass AND d.deptype IN ('a', 'i'))\n"
7450 "LEFT JOIN pg_tablespace tsp ON (tsp.oid = c.reltablespace)\n");
7451
7452 /*
7453 * In 9.6 and up, left join to pg_am to pick up the amname.
7454 */
7455 if (fout->remoteVersion >= 90600)
7457 "LEFT JOIN pg_am am ON (c.relam = am.oid)\n");
7458
7459 /*
7460 * We purposefully ignore toast OIDs for partitioned tables; the reason is
7461 * that versions 10 and 11 have them, but later versions do not, so
7462 * emitting them causes the upgrade to fail.
7463 */
7465 "LEFT JOIN pg_class tc ON (c.reltoastrelid = tc.oid"
7466 " AND tc.relkind = " CppAsString2(RELKIND_TOASTVALUE)
7467 " AND c.relkind <> " CppAsString2(RELKIND_PARTITIONED_TABLE) ")\n");
7468
7469 /*
7470 * Restrict to interesting relkinds (in particular, not indexes). Not all
7471 * relkinds are possible in older servers, but it's not worth the trouble
7472 * to emit a version-dependent list.
7473 *
7474 * Composite-type table entries won't be dumped as such, but we have to
7475 * make a DumpableObject for them so that we can track dependencies of the
7476 * composite type (pg_depend entries for columns of the composite type
7477 * link to the pg_class entry not the pg_type entry).
7478 */
7480 "WHERE c.relkind IN ("
7488 "ORDER BY c.oid");
7489
7490 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7491
7492 ntups = PQntuples(res);
7493
7494 *numTables = ntups;
7495
7496 /*
7497 * Extract data from result and lock dumpable tables. We do the locking
7498 * before anything else, to minimize the window wherein a table could
7499 * disappear under us.
7500 *
7501 * Note that we have to save info about all tables here, even when dumping
7502 * only one, because we don't yet know which tables might be inheritance
7503 * ancestors of the target table.
7504 */
7505 tblinfo = pg_malloc0_array(TableInfo, ntups);
7506
7507 i_reltableoid = PQfnumber(res, "tableoid");
7508 i_reloid = PQfnumber(res, "oid");
7509 i_relname = PQfnumber(res, "relname");
7510 i_relnamespace = PQfnumber(res, "relnamespace");
7511 i_relkind = PQfnumber(res, "relkind");
7512 i_reltype = PQfnumber(res, "reltype");
7513 i_relowner = PQfnumber(res, "relowner");
7514 i_relchecks = PQfnumber(res, "relchecks");
7515 i_relhasindex = PQfnumber(res, "relhasindex");
7516 i_relhasrules = PQfnumber(res, "relhasrules");
7517 i_relpages = PQfnumber(res, "relpages");
7518 i_reltuples = PQfnumber(res, "reltuples");
7519 i_relallvisible = PQfnumber(res, "relallvisible");
7520 i_relallfrozen = PQfnumber(res, "relallfrozen");
7521 i_toastpages = PQfnumber(res, "toastpages");
7522 i_owning_tab = PQfnumber(res, "owning_tab");
7523 i_owning_col = PQfnumber(res, "owning_col");
7524 i_reltablespace = PQfnumber(res, "reltablespace");
7525 i_relhasoids = PQfnumber(res, "relhasoids");
7526 i_relhastriggers = PQfnumber(res, "relhastriggers");
7527 i_relpersistence = PQfnumber(res, "relpersistence");
7528 i_relispopulated = PQfnumber(res, "relispopulated");
7529 i_relreplident = PQfnumber(res, "relreplident");
7530 i_relrowsec = PQfnumber(res, "relrowsecurity");
7531 i_relforcerowsec = PQfnumber(res, "relforcerowsecurity");
7532 i_relfrozenxid = PQfnumber(res, "relfrozenxid");
7533 i_toastfrozenxid = PQfnumber(res, "tfrozenxid");
7534 i_toastoid = PQfnumber(res, "toid");
7535 i_relminmxid = PQfnumber(res, "relminmxid");
7536 i_toastminmxid = PQfnumber(res, "tminmxid");
7537 i_reloptions = PQfnumber(res, "reloptions");
7538 i_checkoption = PQfnumber(res, "checkoption");
7539 i_toastreloptions = PQfnumber(res, "toast_reloptions");
7540 i_reloftype = PQfnumber(res, "reloftype");
7541 i_foreignserver = PQfnumber(res, "foreignserver");
7542 i_amname = PQfnumber(res, "amname");
7543 i_is_identity_sequence = PQfnumber(res, "is_identity_sequence");
7544 i_relacl = PQfnumber(res, "relacl");
7545 i_acldefault = PQfnumber(res, "acldefault");
7546 i_ispartition = PQfnumber(res, "ispartition");
7547
7548 if (dopt->lockWaitTimeout)
7549 {
7550 /*
7551 * Arrange to fail instead of waiting forever for a table lock.
7552 *
7553 * NB: this coding assumes that the only queries issued within the
7554 * following loop are LOCK TABLEs; else the timeout may be undesirably
7555 * applied to other things too.
7556 */
7557 resetPQExpBuffer(query);
7558 appendPQExpBufferStr(query, "SET statement_timeout = ");
7560 ExecuteSqlStatement(fout, query->data);
7561 }
7562
7563 resetPQExpBuffer(query);
7564
7565 for (i = 0; i < ntups; i++)
7566 {
7567 int32 relallvisible = atoi(PQgetvalue(res, i, i_relallvisible));
7568 int32 relallfrozen = atoi(PQgetvalue(res, i, i_relallfrozen));
7569
7570 tblinfo[i].dobj.objType = DO_TABLE;
7571 tblinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_reltableoid));
7572 tblinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_reloid));
7573 AssignDumpId(&tblinfo[i].dobj);
7574 tblinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_relname));
7575 tblinfo[i].dobj.namespace =
7577 tblinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_relacl));
7578 tblinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
7579 tblinfo[i].dacl.privtype = 0;
7580 tblinfo[i].dacl.initprivs = NULL;
7581 tblinfo[i].relkind = *(PQgetvalue(res, i, i_relkind));
7582 tblinfo[i].reltype = atooid(PQgetvalue(res, i, i_reltype));
7583 tblinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_relowner));
7584 tblinfo[i].ncheck = atoi(PQgetvalue(res, i, i_relchecks));
7585 tblinfo[i].hasindex = (strcmp(PQgetvalue(res, i, i_relhasindex), "t") == 0);
7586 tblinfo[i].hasrules = (strcmp(PQgetvalue(res, i, i_relhasrules), "t") == 0);
7587 tblinfo[i].relpages = atoi(PQgetvalue(res, i, i_relpages));
7588 if (PQgetisnull(res, i, i_toastpages))
7589 tblinfo[i].toastpages = 0;
7590 else
7591 tblinfo[i].toastpages = atoi(PQgetvalue(res, i, i_toastpages));
7592 if (PQgetisnull(res, i, i_owning_tab))
7593 {
7594 tblinfo[i].owning_tab = InvalidOid;
7595 tblinfo[i].owning_col = 0;
7596 }
7597 else
7598 {
7599 tblinfo[i].owning_tab = atooid(PQgetvalue(res, i, i_owning_tab));
7600 tblinfo[i].owning_col = atoi(PQgetvalue(res, i, i_owning_col));
7601 }
7603 tblinfo[i].hasoids = (strcmp(PQgetvalue(res, i, i_relhasoids), "t") == 0);
7604 tblinfo[i].hastriggers = (strcmp(PQgetvalue(res, i, i_relhastriggers), "t") == 0);
7605 tblinfo[i].relpersistence = *(PQgetvalue(res, i, i_relpersistence));
7606 tblinfo[i].relispopulated = (strcmp(PQgetvalue(res, i, i_relispopulated), "t") == 0);
7607 tblinfo[i].relreplident = *(PQgetvalue(res, i, i_relreplident));
7608 tblinfo[i].rowsec = (strcmp(PQgetvalue(res, i, i_relrowsec), "t") == 0);
7609 tblinfo[i].forcerowsec = (strcmp(PQgetvalue(res, i, i_relforcerowsec), "t") == 0);
7610 tblinfo[i].frozenxid = atooid(PQgetvalue(res, i, i_relfrozenxid));
7612 tblinfo[i].toast_oid = atooid(PQgetvalue(res, i, i_toastoid));
7613 tblinfo[i].minmxid = atooid(PQgetvalue(res, i, i_relminmxid));
7614 tblinfo[i].toast_minmxid = atooid(PQgetvalue(res, i, i_toastminmxid));
7615 tblinfo[i].reloptions = pg_strdup(PQgetvalue(res, i, i_reloptions));
7616 if (PQgetisnull(res, i, i_checkoption))
7617 tblinfo[i].checkoption = NULL;
7618 else
7619 tblinfo[i].checkoption = pg_strdup(PQgetvalue(res, i, i_checkoption));
7621 tblinfo[i].reloftype = atooid(PQgetvalue(res, i, i_reloftype));
7623 if (PQgetisnull(res, i, i_amname))
7624 tblinfo[i].amname = NULL;
7625 else
7626 tblinfo[i].amname = pg_strdup(PQgetvalue(res, i, i_amname));
7627 tblinfo[i].is_identity_sequence = (strcmp(PQgetvalue(res, i, i_is_identity_sequence), "t") == 0);
7628 tblinfo[i].ispartition = (strcmp(PQgetvalue(res, i, i_ispartition), "t") == 0);
7629
7630 /* other fields were zeroed above */
7631
7632 /*
7633 * Decide whether we want to dump this table.
7634 */
7635 if (tblinfo[i].relkind == RELKIND_COMPOSITE_TYPE)
7636 tblinfo[i].dobj.dump = DUMP_COMPONENT_NONE;
7637 else
7638 selectDumpableTable(&tblinfo[i], fout);
7639
7640 /*
7641 * Now, consider the table "interesting" if we need to dump its
7642 * definition, data or its statistics. Later on, we'll skip a lot of
7643 * data collection for uninteresting tables.
7644 *
7645 * Note: the "interesting" flag will also be set by flagInhTables for
7646 * parents of interesting tables, so that we collect necessary
7647 * inheritance info even when the parents are not themselves being
7648 * dumped. This is the main reason why we need an "interesting" flag
7649 * that's separate from the components-to-dump bitmask.
7650 */
7651 tblinfo[i].interesting = (tblinfo[i].dobj.dump &
7655
7656 tblinfo[i].dummy_view = false; /* might get set during sort */
7657 tblinfo[i].postponed_def = false; /* might get set during sort */
7658
7659 /* Tables have data */
7661
7662 /* Mark whether table has an ACL */
7663 if (!PQgetisnull(res, i, i_relacl))
7664 tblinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
7665 tblinfo[i].hascolumnACLs = false; /* may get set later */
7666
7667 /* Add statistics */
7668 if (tblinfo[i].interesting)
7669 {
7670 RelStatsInfo *stats;
7671
7672 stats = getRelationStatistics(fout, &tblinfo[i].dobj,
7673 tblinfo[i].relpages,
7674 PQgetvalue(res, i, i_reltuples),
7675 relallvisible, relallfrozen,
7676 tblinfo[i].relkind, NULL, 0);
7677 if (tblinfo[i].relkind == RELKIND_MATVIEW)
7678 tblinfo[i].stats = stats;
7679 }
7680
7681 /*
7682 * Read-lock target tables to make sure they aren't DROPPED or altered
7683 * in schema before we get around to dumping them.
7684 *
7685 * Note that we don't explicitly lock parents of the target tables; we
7686 * assume our lock on the child is enough to prevent schema
7687 * alterations to parent tables.
7688 *
7689 * NOTE: it'd be kinda nice to lock other relations too, not only
7690 * plain or partitioned tables, but the backend doesn't presently
7691 * allow that.
7692 *
7693 * We only need to lock the table for certain components; see
7694 * pg_dump.h
7695 */
7696 if ((tblinfo[i].dobj.dump & DUMP_COMPONENTS_REQUIRING_LOCK) &&
7697 (tblinfo[i].relkind == RELKIND_RELATION ||
7698 tblinfo[i].relkind == RELKIND_PARTITIONED_TABLE))
7699 {
7700 /*
7701 * Tables are locked in batches. When dumping from a remote
7702 * server this can save a significant amount of time by reducing
7703 * the number of round trips.
7704 */
7705 if (query->len == 0)
7706 appendPQExpBuffer(query, "LOCK TABLE %s",
7707 fmtQualifiedDumpable(&tblinfo[i]));
7708 else
7709 {
7710 appendPQExpBuffer(query, ", %s",
7711 fmtQualifiedDumpable(&tblinfo[i]));
7712
7713 /* Arbitrarily end a batch when query length reaches 100K. */
7714 if (query->len >= 100000)
7715 {
7716 /* Lock another batch of tables. */
7717 appendPQExpBufferStr(query, " IN ACCESS SHARE MODE");
7718 ExecuteSqlStatement(fout, query->data);
7719 resetPQExpBuffer(query);
7720 }
7721 }
7722 }
7723 }
7724
7725 if (query->len != 0)
7726 {
7727 /* Lock the tables in the last batch. */
7728 appendPQExpBufferStr(query, " IN ACCESS SHARE MODE");
7729 ExecuteSqlStatement(fout, query->data);
7730 }
7731
7732 if (dopt->lockWaitTimeout)
7733 {
7734 ExecuteSqlStatement(fout, "SET statement_timeout = 0");
7735 }
7736
7737 PQclear(res);
7738
7739 destroyPQExpBuffer(query);
7740
7741 return tblinfo;
7742}
7743
7744/*
7745 * getOwnedSeqs
7746 * identify owned sequences and mark them as dumpable if owning table is
7747 *
7748 * We used to do this in getTables(), but it's better to do it after the
7749 * index used by findTableByOid() has been set up.
7750 */
7751void
7753{
7754 int i;
7755
7756 /*
7757 * Force sequences that are "owned" by table columns to be dumped whenever
7758 * their owning table is being dumped.
7759 */
7760 for (i = 0; i < numTables; i++)
7761 {
7762 TableInfo *seqinfo = &tblinfo[i];
7763 TableInfo *owning_tab;
7764
7765 if (!OidIsValid(seqinfo->owning_tab))
7766 continue; /* not an owned sequence */
7767
7768 owning_tab = findTableByOid(seqinfo->owning_tab);
7769 if (owning_tab == NULL)
7770 pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
7771 seqinfo->owning_tab, seqinfo->dobj.catId.oid);
7772
7773 /*
7774 * For an identity sequence, dump exactly the same components for the
7775 * sequence as for the owning table. This is important because we
7776 * treat the identity sequence as an integral part of the table. For
7777 * example, there is not any DDL command that allows creation of such
7778 * a sequence independently of the table.
7779 *
7780 * For other owned sequences such as serial sequences, we need to dump
7781 * the components that are being dumped for the table and any
7782 * components that the sequence is explicitly marked with.
7783 *
7784 * We can't simply use the set of components which are being dumped
7785 * for the table as the table might be in an extension (and only the
7786 * non-extension components, eg: ACLs if changed, security labels, and
7787 * policies, are being dumped) while the sequence is not (and
7788 * therefore the definition and other components should also be
7789 * dumped).
7790 *
7791 * If the sequence is part of the extension then it should be properly
7792 * marked by checkExtensionMembership() and this will be a no-op as
7793 * the table will be equivalently marked.
7794 */
7795 if (seqinfo->is_identity_sequence)
7796 seqinfo->dobj.dump = owning_tab->dobj.dump;
7797 else
7798 seqinfo->dobj.dump |= owning_tab->dobj.dump;
7799
7800 /* Make sure that necessary data is available if we're dumping it */
7801 if (seqinfo->dobj.dump != DUMP_COMPONENT_NONE)
7802 {
7803 seqinfo->interesting = true;
7804 owning_tab->interesting = true;
7805 }
7806 }
7807}
7808
7809/*
7810 * getInherits
7811 * read all the inheritance information
7812 * from the system catalogs return them in the InhInfo* structure
7813 *
7814 * numInherits is set to the number of pairs read in
7815 */
7816InhInfo *
7818{
7819 PGresult *res;
7820 int ntups;
7821 int i;
7824
7825 int i_inhrelid;
7826 int i_inhparent;
7827
7828 /* find all the inheritance information */
7829 appendPQExpBufferStr(query, "SELECT inhrelid, inhparent FROM pg_inherits");
7830
7831 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7832
7833 ntups = PQntuples(res);
7834
7835 *numInherits = ntups;
7836
7838
7839 i_inhrelid = PQfnumber(res, "inhrelid");
7840 i_inhparent = PQfnumber(res, "inhparent");
7841
7842 for (i = 0; i < ntups; i++)
7843 {
7844 inhinfo[i].inhrelid = atooid(PQgetvalue(res, i, i_inhrelid));
7845 inhinfo[i].inhparent = atooid(PQgetvalue(res, i, i_inhparent));
7846 }
7847
7848 PQclear(res);
7849
7850 destroyPQExpBuffer(query);
7851
7852 return inhinfo;
7853}
7854
7855/*
7856 * getPartitioningInfo
7857 * get information about partitioning
7858 *
7859 * For the most part, we only collect partitioning info about tables we
7860 * intend to dump. However, this function has to consider all partitioned
7861 * tables in the database, because we need to know about parents of partitions
7862 * we are going to dump even if the parents themselves won't be dumped.
7863 *
7864 * Specifically, what we need to know is whether each partitioned table
7865 * has an "unsafe" partitioning scheme that requires us to force
7866 * load-via-partition-root mode for its children. Currently the only case
7867 * for which we force that is hash partitioning on enum columns, since the
7868 * hash codes depend on enum value OIDs which won't be replicated across
7869 * dump-and-reload. There are other cases in which load-via-partition-root
7870 * might be necessary, but we expect users to cope with them.
7871 */
7872void
7874{
7875 PQExpBuffer query;
7876 PGresult *res;
7877 int ntups;
7878
7879 /* hash partitioning didn't exist before v11 */
7880 if (fout->remoteVersion < 110000)
7881 return;
7882 /* needn't bother if not dumping data */
7883 if (!fout->dopt->dumpData)
7884 return;
7885
7886 query = createPQExpBuffer();
7887
7888 /*
7889 * Unsafe partitioning schemes are exactly those for which hash enum_ops
7890 * appears among the partition opclasses. We needn't check partstrat.
7891 *
7892 * Note that this query may well retrieve info about tables we aren't
7893 * going to dump and hence have no lock on. That's okay since we need not
7894 * invoke any unsafe server-side functions.
7895 */
7897 "SELECT partrelid FROM pg_partitioned_table WHERE\n"
7898 "(SELECT c.oid FROM pg_opclass c JOIN pg_am a "
7899 "ON c.opcmethod = a.oid\n"
7900 "WHERE opcname = 'enum_ops' "
7901 "AND opcnamespace = 'pg_catalog'::regnamespace "
7902 "AND amname = 'hash') = ANY(partclass)");
7903
7904 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7905
7906 ntups = PQntuples(res);
7907
7908 for (int i = 0; i < ntups; i++)
7909 {
7910 Oid tabrelid = atooid(PQgetvalue(res, i, 0));
7912
7914 if (tbinfo == NULL)
7915 pg_fatal("failed sanity check, table OID %u appearing in pg_partitioned_table not found",
7916 tabrelid);
7917 tbinfo->unsafe_partitions = true;
7918 }
7919
7920 PQclear(res);
7921
7922 destroyPQExpBuffer(query);
7923}
7924
7925/*
7926 * getIndexes
7927 * get information about every index on a dumpable table
7928 *
7929 * Note: index data is not returned directly to the caller, but it
7930 * does get entered into the DumpableObject tables.
7931 */
7932void
7934{
7937 PGresult *res;
7938 int ntups;
7939 int curtblindx;
7941 int i_tableoid,
7942 i_oid,
7943 i_indrelid,
7945 i_relpages,
7950 i_indexdef,
7952 i_indnatts,
7953 i_indkey,
7957 i_contype,
7958 i_conname,
7963 i_conoid,
7964 i_condef,
7970
7971 /*
7972 * We want to perform just one query against pg_index. However, we
7973 * mustn't try to select every row of the catalog and then sort it out on
7974 * the client side, because some of the server-side functions we need
7975 * would be unsafe to apply to tables we don't have lock on. Hence, we
7976 * build an array of the OIDs of tables we care about (and now have lock
7977 * on!), and use a WHERE clause to constrain which rows are selected.
7978 */
7980 for (int i = 0; i < numTables; i++)
7981 {
7982 TableInfo *tbinfo = &tblinfo[i];
7983
7984 if (!tbinfo->hasindex)
7985 continue;
7986
7987 /*
7988 * We can ignore indexes of uninteresting tables.
7989 */
7990 if (!tbinfo->interesting)
7991 continue;
7992
7993 /* OK, we need info for this table */
7994 if (tbloids->len > 1) /* do we have more than the '{'? */
7996 appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
7997 }
7999
8001 "SELECT t.tableoid, t.oid, i.indrelid, "
8002 "t.relname AS indexname, "
8003 "t.relpages, t.reltuples, t.relallvisible, ");
8004
8005 if (fout->remoteVersion >= 180000)
8006 appendPQExpBufferStr(query, "t.relallfrozen, ");
8007 else
8008 appendPQExpBufferStr(query, "0 AS relallfrozen, ");
8009
8011 "pg_catalog.pg_get_indexdef(i.indexrelid) AS indexdef, "
8012 "i.indkey, i.indisclustered, "
8013 "c.contype, c.conname, "
8014 "c.condeferrable, c.condeferred, "
8015 "c.tableoid AS contableoid, "
8016 "c.oid AS conoid, "
8017 "pg_catalog.pg_get_constraintdef(c.oid, false) AS condef, "
8018 "CASE WHEN i.indexprs IS NOT NULL THEN "
8019 "(SELECT pg_catalog.array_agg(attname ORDER BY attnum)"
8020 " FROM pg_catalog.pg_attribute "
8021 " WHERE attrelid = i.indexrelid) "
8022 "ELSE NULL END AS indattnames, "
8023 "(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
8024 "t.reloptions AS indreloptions, ");
8025
8026
8027 if (fout->remoteVersion >= 90400)
8029 "i.indisreplident, ");
8030 else
8032 "false AS indisreplident, ");
8033
8034 if (fout->remoteVersion >= 110000)
8036 "inh.inhparent AS parentidx, "
8037 "i.indnkeyatts AS indnkeyatts, "
8038 "i.indnatts AS indnatts, "
8039 "(SELECT pg_catalog.array_agg(attnum ORDER BY attnum) "
8040 " FROM pg_catalog.pg_attribute "
8041 " WHERE attrelid = i.indexrelid AND "
8042 " attstattarget >= 0) AS indstatcols, "
8043 "(SELECT pg_catalog.array_agg(attstattarget ORDER BY attnum) "
8044 " FROM pg_catalog.pg_attribute "
8045 " WHERE attrelid = i.indexrelid AND "
8046 " attstattarget >= 0) AS indstatvals, ");
8047 else
8049 "0 AS parentidx, "
8050 "i.indnatts AS indnkeyatts, "
8051 "i.indnatts AS indnatts, "
8052 "'' AS indstatcols, "
8053 "'' AS indstatvals, ");
8054
8055 if (fout->remoteVersion >= 150000)
8057 "i.indnullsnotdistinct, ");
8058 else
8060 "false AS indnullsnotdistinct, ");
8061
8062 if (fout->remoteVersion >= 180000)
8064 "c.conperiod ");
8065 else
8067 "NULL AS conperiod ");
8068
8069 /*
8070 * The point of the messy-looking outer join is to find a constraint that
8071 * is related by an internal dependency link to the index. If we find one,
8072 * create a CONSTRAINT entry linked to the INDEX entry. We assume an
8073 * index won't have more than one internal dependency.
8074 *
8075 * Note: the check on conrelid is redundant, but useful because that
8076 * column is indexed while conindid is not.
8077 */
8078 if (fout->remoteVersion >= 110000)
8079 {
8080 appendPQExpBuffer(query,
8081 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8082 "JOIN pg_catalog.pg_index i ON (src.tbloid = i.indrelid) "
8083 "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
8084 "JOIN pg_catalog.pg_class t2 ON (t2.oid = i.indrelid) "
8085 "LEFT JOIN pg_catalog.pg_constraint c "
8086 "ON (i.indrelid = c.conrelid AND "
8087 "i.indexrelid = c.conindid AND "
8088 "c.contype IN ('p','u','x')) "
8089 "LEFT JOIN pg_catalog.pg_inherits inh "
8090 "ON (inh.inhrelid = indexrelid) "
8091 "WHERE (i.indisvalid OR t2.relkind = 'p') "
8092 "AND i.indisready "
8093 "ORDER BY i.indrelid, indexname",
8094 tbloids->data);
8095 }
8096 else
8097 {
8098 /*
8099 * the test on indisready is necessary in 9.2, and harmless in
8100 * earlier/later versions
8101 */
8102 appendPQExpBuffer(query,
8103 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8104 "JOIN pg_catalog.pg_index i ON (src.tbloid = i.indrelid) "
8105 "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
8106 "LEFT JOIN pg_catalog.pg_constraint c "
8107 "ON (i.indrelid = c.conrelid AND "
8108 "i.indexrelid = c.conindid AND "
8109 "c.contype IN ('p','u','x')) "
8110 "WHERE i.indisvalid AND i.indisready "
8111 "ORDER BY i.indrelid, indexname",
8112 tbloids->data);
8113 }
8114
8115 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8116
8117 ntups = PQntuples(res);
8118
8119 i_tableoid = PQfnumber(res, "tableoid");
8120 i_oid = PQfnumber(res, "oid");
8121 i_indrelid = PQfnumber(res, "indrelid");
8122 i_indexname = PQfnumber(res, "indexname");
8123 i_relpages = PQfnumber(res, "relpages");
8124 i_reltuples = PQfnumber(res, "reltuples");
8125 i_relallvisible = PQfnumber(res, "relallvisible");
8126 i_relallfrozen = PQfnumber(res, "relallfrozen");
8127 i_parentidx = PQfnumber(res, "parentidx");
8128 i_indexdef = PQfnumber(res, "indexdef");
8129 i_indnkeyatts = PQfnumber(res, "indnkeyatts");
8130 i_indnatts = PQfnumber(res, "indnatts");
8131 i_indkey = PQfnumber(res, "indkey");
8132 i_indisclustered = PQfnumber(res, "indisclustered");
8133 i_indisreplident = PQfnumber(res, "indisreplident");
8134 i_indnullsnotdistinct = PQfnumber(res, "indnullsnotdistinct");
8135 i_contype = PQfnumber(res, "contype");
8136 i_conname = PQfnumber(res, "conname");
8137 i_condeferrable = PQfnumber(res, "condeferrable");
8138 i_condeferred = PQfnumber(res, "condeferred");
8139 i_conperiod = PQfnumber(res, "conperiod");
8140 i_contableoid = PQfnumber(res, "contableoid");
8141 i_conoid = PQfnumber(res, "conoid");
8142 i_condef = PQfnumber(res, "condef");
8143 i_indattnames = PQfnumber(res, "indattnames");
8144 i_tablespace = PQfnumber(res, "tablespace");
8145 i_indreloptions = PQfnumber(res, "indreloptions");
8146 i_indstatcols = PQfnumber(res, "indstatcols");
8147 i_indstatvals = PQfnumber(res, "indstatvals");
8148
8150
8151 /*
8152 * Outer loop iterates once per table, not once per row. Incrementing of
8153 * j is handled by the inner loop.
8154 */
8155 curtblindx = -1;
8156 for (int j = 0; j < ntups;)
8157 {
8160 char **indAttNames = NULL;
8161 int nindAttNames = 0;
8162 int numinds;
8163
8164 /* Count rows for this table */
8165 for (numinds = 1; numinds < ntups - j; numinds++)
8166 if (atooid(PQgetvalue(res, j + numinds, i_indrelid)) != indrelid)
8167 break;
8168
8169 /*
8170 * Locate the associated TableInfo; we rely on tblinfo[] being in OID
8171 * order.
8172 */
8173 while (++curtblindx < numTables)
8174 {
8175 tbinfo = &tblinfo[curtblindx];
8176 if (tbinfo->dobj.catId.oid == indrelid)
8177 break;
8178 }
8179 if (curtblindx >= numTables)
8180 pg_fatal("unrecognized table OID %u", indrelid);
8181 /* cross-check that we only got requested tables */
8182 if (!tbinfo->hasindex ||
8183 !tbinfo->interesting)
8184 pg_fatal("unexpected index data for table \"%s\"",
8185 tbinfo->dobj.name);
8186
8187 /* Save data for this table */
8188 tbinfo->indexes = indxinfo + j;
8189 tbinfo->numIndexes = numinds;
8190
8191 for (int c = 0; c < numinds; c++, j++)
8192 {
8193 char contype;
8194 char indexkind;
8196 int32 relpages = atoi(PQgetvalue(res, j, i_relpages));
8197 int32 relallvisible = atoi(PQgetvalue(res, j, i_relallvisible));
8198 int32 relallfrozen = atoi(PQgetvalue(res, j, i_relallfrozen));
8199
8200 indxinfo[j].dobj.objType = DO_INDEX;
8201 indxinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
8202 indxinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
8203 AssignDumpId(&indxinfo[j].dobj);
8204 indxinfo[j].dobj.dump = tbinfo->dobj.dump;
8205 indxinfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_indexname));
8206 indxinfo[j].dobj.namespace = tbinfo->dobj.namespace;
8207 indxinfo[j].indextable = tbinfo;
8208 indxinfo[j].indexdef = pg_strdup(PQgetvalue(res, j, i_indexdef));
8209 indxinfo[j].indnkeyattrs = atoi(PQgetvalue(res, j, i_indnkeyatts));
8210 indxinfo[j].indnattrs = atoi(PQgetvalue(res, j, i_indnatts));
8211 indxinfo[j].tablespace = pg_strdup(PQgetvalue(res, j, i_tablespace));
8212 indxinfo[j].indreloptions = pg_strdup(PQgetvalue(res, j, i_indreloptions));
8213 indxinfo[j].indstatcols = pg_strdup(PQgetvalue(res, j, i_indstatcols));
8214 indxinfo[j].indstatvals = pg_strdup(PQgetvalue(res, j, i_indstatvals));
8215 indxinfo[j].indkeys = pg_malloc_array(Oid, indxinfo[j].indnattrs);
8217 indxinfo[j].indkeys, indxinfo[j].indnattrs);
8218 indxinfo[j].indisclustered = (PQgetvalue(res, j, i_indisclustered)[0] == 't');
8219 indxinfo[j].indisreplident = (PQgetvalue(res, j, i_indisreplident)[0] == 't');
8220 indxinfo[j].indnullsnotdistinct = (PQgetvalue(res, j, i_indnullsnotdistinct)[0] == 't');
8221 indxinfo[j].parentidx = atooid(PQgetvalue(res, j, i_parentidx));
8222 indxinfo[j].partattaches = (SimplePtrList)
8223 {
8224 NULL, NULL
8225 };
8226
8227 if (indxinfo[j].parentidx == 0)
8229 else
8231
8232 if (!PQgetisnull(res, j, i_indattnames))
8233 {
8235 &indAttNames, &nindAttNames))
8236 pg_fatal("could not parse %s array", "indattnames");
8237 }
8238
8239 relstats = getRelationStatistics(fout, &indxinfo[j].dobj, relpages,
8240 PQgetvalue(res, j, i_reltuples),
8241 relallvisible, relallfrozen, indexkind,
8242 indAttNames, nindAttNames);
8243
8244 contype = *(PQgetvalue(res, j, i_contype));
8245 if (contype == 'p' || contype == 'u' || contype == 'x')
8246 {
8247 /*
8248 * If we found a constraint matching the index, create an
8249 * entry for it.
8250 */
8252
8254 constrinfo->dobj.objType = DO_CONSTRAINT;
8255 constrinfo->dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid));
8256 constrinfo->dobj.catId.oid = atooid(PQgetvalue(res, j, i_conoid));
8257 AssignDumpId(&constrinfo->dobj);
8258 constrinfo->dobj.dump = tbinfo->dobj.dump;
8259 constrinfo->dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
8260 constrinfo->dobj.namespace = tbinfo->dobj.namespace;
8261 constrinfo->contable = tbinfo;
8262 constrinfo->condomain = NULL;
8263 constrinfo->contype = contype;
8264 if (contype == 'x')
8265 constrinfo->condef = pg_strdup(PQgetvalue(res, j, i_condef));
8266 else
8267 constrinfo->condef = NULL;
8268 constrinfo->confrelid = InvalidOid;
8269 constrinfo->conindex = indxinfo[j].dobj.dumpId;
8270 constrinfo->condeferrable = *(PQgetvalue(res, j, i_condeferrable)) == 't';
8271 constrinfo->condeferred = *(PQgetvalue(res, j, i_condeferred)) == 't';
8272 constrinfo->conperiod = *(PQgetvalue(res, j, i_conperiod)) == 't';
8273 constrinfo->conislocal = true;
8274 constrinfo->separate = true;
8275
8276 indxinfo[j].indexconstraint = constrinfo->dobj.dumpId;
8277 if (relstats != NULL)
8278 addObjectDependency(&relstats->dobj, constrinfo->dobj.dumpId);
8279 }
8280 else
8281 {
8282 /* Plain secondary index */
8283 indxinfo[j].indexconstraint = 0;
8284 }
8285 }
8286 }
8287
8288 PQclear(res);
8289
8290 destroyPQExpBuffer(query);
8292}
8293
8294/*
8295 * getExtendedStatistics
8296 * get information about extended-statistics objects.
8297 *
8298 * Note: extended statistics data is not returned directly to the caller, but
8299 * it does get entered into the DumpableObject tables.
8300 */
8301void
8303{
8304 PQExpBuffer query;
8305 PGresult *res;
8307 int ntups;
8308 int i_tableoid;
8309 int i_oid;
8310 int i_stxname;
8311 int i_stxnamespace;
8312 int i_stxowner;
8313 int i_stxrelid;
8314 int i_stattarget;
8315 int i;
8316
8317 /* Extended statistics were new in v10 */
8318 if (fout->remoteVersion < 100000)
8319 return;
8320
8321 query = createPQExpBuffer();
8322
8323 if (fout->remoteVersion < 130000)
8324 appendPQExpBufferStr(query, "SELECT tableoid, oid, stxname, "
8325 "stxnamespace, stxowner, stxrelid, NULL AS stxstattarget "
8326 "FROM pg_catalog.pg_statistic_ext");
8327 else
8328 appendPQExpBufferStr(query, "SELECT tableoid, oid, stxname, "
8329 "stxnamespace, stxowner, stxrelid, stxstattarget "
8330 "FROM pg_catalog.pg_statistic_ext");
8331
8332 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8333
8334 ntups = PQntuples(res);
8335
8336 i_tableoid = PQfnumber(res, "tableoid");
8337 i_oid = PQfnumber(res, "oid");
8338 i_stxname = PQfnumber(res, "stxname");
8339 i_stxnamespace = PQfnumber(res, "stxnamespace");
8340 i_stxowner = PQfnumber(res, "stxowner");
8341 i_stxrelid = PQfnumber(res, "stxrelid");
8342 i_stattarget = PQfnumber(res, "stxstattarget");
8343
8345
8346 for (i = 0; i < ntups; i++)
8347 {
8348 statsextinfo[i].dobj.objType = DO_STATSEXT;
8349 statsextinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8350 statsextinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8351 AssignDumpId(&statsextinfo[i].dobj);
8352 statsextinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_stxname));
8353 statsextinfo[i].dobj.namespace =
8355 statsextinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_stxowner));
8356 statsextinfo[i].stattable =
8358 if (PQgetisnull(res, i, i_stattarget))
8359 statsextinfo[i].stattarget = -1;
8360 else
8361 statsextinfo[i].stattarget = atoi(PQgetvalue(res, i, i_stattarget));
8362
8363 /* Decide whether we want to dump it */
8365
8366 if (fout->dopt->dumpStatistics)
8367 statsextinfo[i].dobj.components |= DUMP_COMPONENT_STATISTICS;
8368 }
8369
8370 PQclear(res);
8371 destroyPQExpBuffer(query);
8372}
8373
8374/*
8375 * getConstraints
8376 *
8377 * Get info about constraints on dumpable tables.
8378 *
8379 * Currently handles foreign keys only.
8380 * Unique and primary key constraints are handled with indexes,
8381 * while check constraints are processed in getTableAttrs().
8382 */
8383void
8385{
8388 PGresult *res;
8389 int ntups;
8390 int curtblindx;
8393 int i_contableoid,
8394 i_conoid,
8395 i_conrelid,
8396 i_conname,
8398 i_conindid,
8399 i_condef;
8400
8401 /*
8402 * We want to perform just one query against pg_constraint. However, we
8403 * mustn't try to select every row of the catalog and then sort it out on
8404 * the client side, because some of the server-side functions we need
8405 * would be unsafe to apply to tables we don't have lock on. Hence, we
8406 * build an array of the OIDs of tables we care about (and now have lock
8407 * on!), and use a WHERE clause to constrain which rows are selected.
8408 */
8410 for (int i = 0; i < numTables; i++)
8411 {
8412 TableInfo *tinfo = &tblinfo[i];
8413
8414 if (!(tinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
8415 continue;
8416
8417 /* OK, we need info for this table */
8418 if (tbloids->len > 1) /* do we have more than the '{'? */
8420 appendPQExpBuffer(tbloids, "%u", tinfo->dobj.catId.oid);
8421 }
8423
8425 "SELECT c.tableoid, c.oid, "
8426 "conrelid, conname, confrelid, ");
8427 if (fout->remoteVersion >= 110000)
8428 appendPQExpBufferStr(query, "conindid, ");
8429 else
8430 appendPQExpBufferStr(query, "0 AS conindid, ");
8431 appendPQExpBuffer(query,
8432 "pg_catalog.pg_get_constraintdef(c.oid) AS condef\n"
8433 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8434 "JOIN pg_catalog.pg_constraint c ON (src.tbloid = c.conrelid)\n"
8435 "WHERE contype = 'f' ",
8436 tbloids->data);
8437 if (fout->remoteVersion >= 110000)
8439 "AND conparentid = 0 ");
8441 "ORDER BY conrelid, conname");
8442
8443 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8444
8445 ntups = PQntuples(res);
8446
8447 i_contableoid = PQfnumber(res, "tableoid");
8448 i_conoid = PQfnumber(res, "oid");
8449 i_conrelid = PQfnumber(res, "conrelid");
8450 i_conname = PQfnumber(res, "conname");
8451 i_confrelid = PQfnumber(res, "confrelid");
8452 i_conindid = PQfnumber(res, "conindid");
8453 i_condef = PQfnumber(res, "condef");
8454
8456
8457 curtblindx = -1;
8458 for (int j = 0; j < ntups; j++)
8459 {
8460 Oid conrelid = atooid(PQgetvalue(res, j, i_conrelid));
8462
8463 /*
8464 * Locate the associated TableInfo; we rely on tblinfo[] being in OID
8465 * order.
8466 */
8467 if (tbinfo == NULL || tbinfo->dobj.catId.oid != conrelid)
8468 {
8469 while (++curtblindx < numTables)
8470 {
8471 tbinfo = &tblinfo[curtblindx];
8472 if (tbinfo->dobj.catId.oid == conrelid)
8473 break;
8474 }
8475 if (curtblindx >= numTables)
8476 pg_fatal("unrecognized table OID %u", conrelid);
8477 }
8478
8479 constrinfo[j].dobj.objType = DO_FK_CONSTRAINT;
8480 constrinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid));
8481 constrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_conoid));
8482 AssignDumpId(&constrinfo[j].dobj);
8483 constrinfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
8484 constrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
8485 constrinfo[j].contable = tbinfo;
8486 constrinfo[j].condomain = NULL;
8487 constrinfo[j].contype = 'f';
8488 constrinfo[j].condef = pg_strdup(PQgetvalue(res, j, i_condef));
8489 constrinfo[j].confrelid = atooid(PQgetvalue(res, j, i_confrelid));
8490 constrinfo[j].conindex = 0;
8491 constrinfo[j].condeferrable = false;
8492 constrinfo[j].condeferred = false;
8493 constrinfo[j].conislocal = true;
8494 constrinfo[j].separate = true;
8495
8496 /*
8497 * Restoring an FK that points to a partitioned table requires that
8498 * all partition indexes have been attached beforehand. Ensure that
8499 * happens by making the constraint depend on each index partition
8500 * attach object.
8501 */
8502 reftable = findTableByOid(constrinfo[j].confrelid);
8503 if (reftable && reftable->relkind == RELKIND_PARTITIONED_TABLE)
8504 {
8505 Oid indexOid = atooid(PQgetvalue(res, j, i_conindid));
8506
8507 if (indexOid != InvalidOid)
8508 {
8509 for (int k = 0; k < reftable->numIndexes; k++)
8510 {
8512
8513 /* not our index? */
8514 if (reftable->indexes[k].dobj.catId.oid != indexOid)
8515 continue;
8516
8517 refidx = &reftable->indexes[k];
8519 break;
8520 }
8521 }
8522 }
8523 }
8524
8525 PQclear(res);
8526
8527 destroyPQExpBuffer(query);
8529}
8530
8531/*
8532 * addConstrChildIdxDeps
8533 *
8534 * Recursive subroutine for getConstraints
8535 *
8536 * Given an object representing a foreign key constraint and an index on the
8537 * partitioned table it references, mark the constraint object as dependent
8538 * on the DO_INDEX_ATTACH object of each index partition, recursively
8539 * drilling down to their partitions if any. This ensures that the FK is not
8540 * restored until the index is fully marked valid.
8541 */
8542static void
8544{
8545 SimplePtrListCell *cell;
8546
8548
8549 for (cell = refidx->partattaches.head; cell; cell = cell->next)
8550 {
8552
8553 addObjectDependency(dobj, attach->dobj.dumpId);
8554
8555 if (attach->partitionIdx->partattaches.head != NULL)
8556 addConstrChildIdxDeps(dobj, attach->partitionIdx);
8557 }
8558}
8559
8560/*
8561 * getDomainConstraints
8562 *
8563 * Get info about constraints on a domain.
8564 */
8565static void
8567{
8570 PGresult *res;
8571 int i_tableoid,
8572 i_oid,
8573 i_conname,
8574 i_consrc,
8576 i_contype;
8577 int ntups;
8578
8580 {
8581 /*
8582 * Set up query for constraint-specific details. For servers 17 and
8583 * up, domains have constraints of type 'n' as well as 'c', otherwise
8584 * just the latter.
8585 */
8586 appendPQExpBuffer(query,
8587 "PREPARE getDomainConstraints(pg_catalog.oid) AS\n"
8588 "SELECT tableoid, oid, conname, "
8589 "pg_catalog.pg_get_constraintdef(oid) AS consrc, "
8590 "convalidated, contype "
8591 "FROM pg_catalog.pg_constraint "
8592 "WHERE contypid = $1 AND contype IN (%s) "
8593 "ORDER BY conname",
8594 fout->remoteVersion < 170000 ? "'c'" : "'c', 'n'");
8595
8596 ExecuteSqlStatement(fout, query->data);
8597
8599 }
8600
8601 printfPQExpBuffer(query,
8602 "EXECUTE getDomainConstraints('%u')",
8603 tyinfo->dobj.catId.oid);
8604
8605 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8606
8607 ntups = PQntuples(res);
8608
8609 i_tableoid = PQfnumber(res, "tableoid");
8610 i_oid = PQfnumber(res, "oid");
8611 i_conname = PQfnumber(res, "conname");
8612 i_consrc = PQfnumber(res, "consrc");
8613 i_convalidated = PQfnumber(res, "convalidated");
8614 i_contype = PQfnumber(res, "contype");
8615
8617 tyinfo->domChecks = constrinfo;
8618
8619 /* 'i' tracks result rows; 'j' counts CHECK constraints */
8620 for (int i = 0, j = 0; i < ntups; i++)
8621 {
8622 bool validated = PQgetvalue(res, i, i_convalidated)[0] == 't';
8623 char contype = (PQgetvalue(res, i, i_contype))[0];
8624 ConstraintInfo *constraint;
8625
8626 if (contype == CONSTRAINT_CHECK)
8627 {
8628 constraint = &constrinfo[j++];
8629 tyinfo->nDomChecks++;
8630 }
8631 else
8632 {
8633 Assert(contype == CONSTRAINT_NOTNULL);
8634 Assert(tyinfo->notnull == NULL);
8635 /* use last item in array for the not-null constraint */
8636 tyinfo->notnull = &(constrinfo[ntups - 1]);
8637 constraint = tyinfo->notnull;
8638 }
8639
8640 constraint->dobj.objType = DO_CONSTRAINT;
8641 constraint->dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8642 constraint->dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8643 AssignDumpId(&(constraint->dobj));
8644 constraint->dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
8645 constraint->dobj.namespace = tyinfo->dobj.namespace;
8646 constraint->contable = NULL;
8647 constraint->condomain = tyinfo;
8648 constraint->contype = contype;
8649 constraint->condef = pg_strdup(PQgetvalue(res, i, i_consrc));
8650 constraint->confrelid = InvalidOid;
8651 constraint->conindex = 0;
8652 constraint->condeferrable = false;
8653 constraint->condeferred = false;
8654 constraint->conislocal = true;
8655
8656 constraint->separate = !validated;
8657
8658 /*
8659 * Make the domain depend on the constraint, ensuring it won't be
8660 * output till any constraint dependencies are OK. If the constraint
8661 * has not been validated, it's going to be dumped after the domain
8662 * anyway, so this doesn't matter.
8663 */
8664 if (validated)
8665 addObjectDependency(&tyinfo->dobj, constraint->dobj.dumpId);
8666 }
8667
8668 PQclear(res);
8669
8670 destroyPQExpBuffer(query);
8671}
8672
8673/*
8674 * getRules
8675 * get basic information about every rule in the system
8676 */
8677void
8679{
8680 PGresult *res;
8681 int ntups;
8682 int i;
8685 int i_tableoid;
8686 int i_oid;
8687 int i_rulename;
8688 int i_ruletable;
8689 int i_ev_type;
8690 int i_is_instead;
8691 int i_ev_enabled;
8692
8693 appendPQExpBufferStr(query, "SELECT "
8694 "tableoid, oid, rulename, "
8695 "ev_class AS ruletable, ev_type, is_instead, "
8696 "ev_enabled "
8697 "FROM pg_rewrite "
8698 "ORDER BY oid");
8699
8700 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8701
8702 ntups = PQntuples(res);
8703
8705
8706 i_tableoid = PQfnumber(res, "tableoid");
8707 i_oid = PQfnumber(res, "oid");
8708 i_rulename = PQfnumber(res, "rulename");
8709 i_ruletable = PQfnumber(res, "ruletable");
8710 i_ev_type = PQfnumber(res, "ev_type");
8711 i_is_instead = PQfnumber(res, "is_instead");
8712 i_ev_enabled = PQfnumber(res, "ev_enabled");
8713
8714 for (i = 0; i < ntups; i++)
8715 {
8717
8718 ruleinfo[i].dobj.objType = DO_RULE;
8719 ruleinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8720 ruleinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8721 AssignDumpId(&ruleinfo[i].dobj);
8722 ruleinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_rulename));
8724 ruleinfo[i].ruletable = findTableByOid(ruletableoid);
8725 if (ruleinfo[i].ruletable == NULL)
8726 pg_fatal("failed sanity check, parent table with OID %u of pg_rewrite entry with OID %u not found",
8727 ruletableoid, ruleinfo[i].dobj.catId.oid);
8728 ruleinfo[i].dobj.namespace = ruleinfo[i].ruletable->dobj.namespace;
8729 ruleinfo[i].dobj.dump = ruleinfo[i].ruletable->dobj.dump;
8730 ruleinfo[i].ev_type = *(PQgetvalue(res, i, i_ev_type));
8731 ruleinfo[i].is_instead = *(PQgetvalue(res, i, i_is_instead)) == 't';
8732 ruleinfo[i].ev_enabled = *(PQgetvalue(res, i, i_ev_enabled));
8733 if (ruleinfo[i].ruletable)
8734 {
8735 /*
8736 * If the table is a view or materialized view, force its ON
8737 * SELECT rule to be sorted before the view itself --- this
8738 * ensures that any dependencies for the rule affect the table's
8739 * positioning. Other rules are forced to appear after their
8740 * table.
8741 */
8742 if ((ruleinfo[i].ruletable->relkind == RELKIND_VIEW ||
8743 ruleinfo[i].ruletable->relkind == RELKIND_MATVIEW) &&
8744 ruleinfo[i].ev_type == '1' && ruleinfo[i].is_instead)
8745 {
8746 addObjectDependency(&ruleinfo[i].ruletable->dobj,
8747 ruleinfo[i].dobj.dumpId);
8748 /* We'll merge the rule into CREATE VIEW, if possible */
8749 ruleinfo[i].separate = false;
8750 }
8751 else
8752 {
8754 ruleinfo[i].ruletable->dobj.dumpId);
8755 ruleinfo[i].separate = true;
8756 }
8757 }
8758 else
8759 ruleinfo[i].separate = true;
8760 }
8761
8762 PQclear(res);
8763
8764 destroyPQExpBuffer(query);
8765}
8766
8767/*
8768 * getTriggers
8769 * get information about every trigger on a dumpable table
8770 *
8771 * Note: trigger data is not returned directly to the caller, but it
8772 * does get entered into the DumpableObject tables.
8773 */
8774void
8776{
8779 PGresult *res;
8780 int ntups;
8781 int curtblindx;
8783 int i_tableoid,
8784 i_oid,
8785 i_tgrelid,
8786 i_tgname,
8789 i_tgdef;
8790
8791 /*
8792 * We want to perform just one query against pg_trigger. However, we
8793 * mustn't try to select every row of the catalog and then sort it out on
8794 * the client side, because some of the server-side functions we need
8795 * would be unsafe to apply to tables we don't have lock on. Hence, we
8796 * build an array of the OIDs of tables we care about (and now have lock
8797 * on!), and use a WHERE clause to constrain which rows are selected.
8798 */
8800 for (int i = 0; i < numTables; i++)
8801 {
8802 TableInfo *tbinfo = &tblinfo[i];
8803
8804 if (!tbinfo->hastriggers ||
8805 !(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
8806 continue;
8807
8808 /* OK, we need info for this table */
8809 if (tbloids->len > 1) /* do we have more than the '{'? */
8811 appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
8812 }
8814
8815 if (fout->remoteVersion >= 150000)
8816 {
8817 /*
8818 * NB: think not to use pretty=true in pg_get_triggerdef. It could
8819 * result in non-forward-compatible dumps of WHEN clauses due to
8820 * under-parenthesization.
8821 *
8822 * NB: We need to see partition triggers in case the tgenabled flag
8823 * has been changed from the parent.
8824 */
8825 appendPQExpBuffer(query,
8826 "SELECT t.tgrelid, t.tgname, "
8827 "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8828 "t.tgenabled, t.tableoid, t.oid, "
8829 "t.tgparentid <> 0 AS tgispartition\n"
8830 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8831 "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8832 "LEFT JOIN pg_catalog.pg_trigger u ON (u.oid = t.tgparentid) "
8833 "WHERE ((NOT t.tgisinternal AND t.tgparentid = 0) "
8834 "OR t.tgenabled != u.tgenabled) "
8835 "ORDER BY t.tgrelid, t.tgname",
8836 tbloids->data);
8837 }
8838 else if (fout->remoteVersion >= 130000)
8839 {
8840 /*
8841 * NB: think not to use pretty=true in pg_get_triggerdef. It could
8842 * result in non-forward-compatible dumps of WHEN clauses due to
8843 * under-parenthesization.
8844 *
8845 * NB: We need to see tgisinternal triggers in partitions, in case the
8846 * tgenabled flag has been changed from the parent.
8847 */
8848 appendPQExpBuffer(query,
8849 "SELECT t.tgrelid, t.tgname, "
8850 "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8851 "t.tgenabled, t.tableoid, t.oid, t.tgisinternal as tgispartition\n"
8852 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8853 "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8854 "LEFT JOIN pg_catalog.pg_trigger u ON (u.oid = t.tgparentid) "
8855 "WHERE (NOT t.tgisinternal OR t.tgenabled != u.tgenabled) "
8856 "ORDER BY t.tgrelid, t.tgname",
8857 tbloids->data);
8858 }
8859 else if (fout->remoteVersion >= 110000)
8860 {
8861 /*
8862 * NB: We need to see tgisinternal triggers in partitions, in case the
8863 * tgenabled flag has been changed from the parent. No tgparentid in
8864 * version 11-12, so we have to match them via pg_depend.
8865 *
8866 * See above about pretty=true in pg_get_triggerdef.
8867 */
8868 appendPQExpBuffer(query,
8869 "SELECT t.tgrelid, t.tgname, "
8870 "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8871 "t.tgenabled, t.tableoid, t.oid, t.tgisinternal as tgispartition "
8872 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8873 "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8874 "LEFT JOIN pg_catalog.pg_depend AS d ON "
8875 " d.classid = 'pg_catalog.pg_trigger'::pg_catalog.regclass AND "
8876 " d.refclassid = 'pg_catalog.pg_trigger'::pg_catalog.regclass AND "
8877 " d.objid = t.oid "
8878 "LEFT JOIN pg_catalog.pg_trigger AS pt ON pt.oid = refobjid "
8879 "WHERE (NOT t.tgisinternal OR t.tgenabled != pt.tgenabled) "
8880 "ORDER BY t.tgrelid, t.tgname",
8881 tbloids->data);
8882 }
8883 else
8884 {
8885 /* See above about pretty=true in pg_get_triggerdef */
8886 appendPQExpBuffer(query,
8887 "SELECT t.tgrelid, t.tgname, "
8888 "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8889 "t.tgenabled, false as tgispartition, "
8890 "t.tableoid, t.oid "
8891 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8892 "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8893 "WHERE NOT tgisinternal "
8894 "ORDER BY t.tgrelid, t.tgname",
8895 tbloids->data);
8896 }
8897
8898 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8899
8900 ntups = PQntuples(res);
8901
8902 i_tableoid = PQfnumber(res, "tableoid");
8903 i_oid = PQfnumber(res, "oid");
8904 i_tgrelid = PQfnumber(res, "tgrelid");
8905 i_tgname = PQfnumber(res, "tgname");
8906 i_tgenabled = PQfnumber(res, "tgenabled");
8907 i_tgispartition = PQfnumber(res, "tgispartition");
8908 i_tgdef = PQfnumber(res, "tgdef");
8909
8911
8912 /*
8913 * Outer loop iterates once per table, not once per row. Incrementing of
8914 * j is handled by the inner loop.
8915 */
8916 curtblindx = -1;
8917 for (int j = 0; j < ntups;)
8918 {
8921 int numtrigs;
8922
8923 /* Count rows for this table */
8924 for (numtrigs = 1; numtrigs < ntups - j; numtrigs++)
8925 if (atooid(PQgetvalue(res, j + numtrigs, i_tgrelid)) != tgrelid)
8926 break;
8927
8928 /*
8929 * Locate the associated TableInfo; we rely on tblinfo[] being in OID
8930 * order.
8931 */
8932 while (++curtblindx < numTables)
8933 {
8934 tbinfo = &tblinfo[curtblindx];
8935 if (tbinfo->dobj.catId.oid == tgrelid)
8936 break;
8937 }
8938 if (curtblindx >= numTables)
8939 pg_fatal("unrecognized table OID %u", tgrelid);
8940
8941 /* Save data for this table */
8942 tbinfo->triggers = tginfo + j;
8943 tbinfo->numTriggers = numtrigs;
8944
8945 for (int c = 0; c < numtrigs; c++, j++)
8946 {
8947 tginfo[j].dobj.objType = DO_TRIGGER;
8948 tginfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
8949 tginfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
8950 AssignDumpId(&tginfo[j].dobj);
8951 tginfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_tgname));
8952 tginfo[j].dobj.namespace = tbinfo->dobj.namespace;
8953 tginfo[j].tgtable = tbinfo;
8954 tginfo[j].tgenabled = *(PQgetvalue(res, j, i_tgenabled));
8955 tginfo[j].tgispartition = *(PQgetvalue(res, j, i_tgispartition)) == 't';
8956 tginfo[j].tgdef = pg_strdup(PQgetvalue(res, j, i_tgdef));
8957 }
8958 }
8959
8960 PQclear(res);
8961
8962 destroyPQExpBuffer(query);
8964}
8965
8966/*
8967 * getEventTriggers
8968 * get information about event triggers
8969 */
8970void
8972{
8973 int i;
8974 PQExpBuffer query;
8975 PGresult *res;
8977 int i_tableoid,
8978 i_oid,
8979 i_evtname,
8980 i_evtevent,
8981 i_evtowner,
8982 i_evttags,
8983 i_evtfname,
8985 int ntups;
8986
8987 /* Before 9.3, there are no event triggers */
8988 if (fout->remoteVersion < 90300)
8989 return;
8990
8991 query = createPQExpBuffer();
8992
8994 "SELECT e.tableoid, e.oid, evtname, evtenabled, "
8995 "evtevent, evtowner, "
8996 "array_to_string(array("
8997 "select quote_literal(x) "
8998 " from unnest(evttags) as t(x)), ', ') as evttags, "
8999 "e.evtfoid::regproc as evtfname "
9000 "FROM pg_event_trigger e "
9001 "ORDER BY e.oid");
9002
9003 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
9004
9005 ntups = PQntuples(res);
9006
9008
9009 i_tableoid = PQfnumber(res, "tableoid");
9010 i_oid = PQfnumber(res, "oid");
9011 i_evtname = PQfnumber(res, "evtname");
9012 i_evtevent = PQfnumber(res, "evtevent");
9013 i_evtowner = PQfnumber(res, "evtowner");
9014 i_evttags = PQfnumber(res, "evttags");
9015 i_evtfname = PQfnumber(res, "evtfname");
9016 i_evtenabled = PQfnumber(res, "evtenabled");
9017
9018 for (i = 0; i < ntups; i++)
9019 {
9020 evtinfo[i].dobj.objType = DO_EVENT_TRIGGER;
9021 evtinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
9022 evtinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
9023 AssignDumpId(&evtinfo[i].dobj);
9024 evtinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_evtname));
9025 evtinfo[i].evtname = pg_strdup(PQgetvalue(res, i, i_evtname));
9026 evtinfo[i].evtevent = pg_strdup(PQgetvalue(res, i, i_evtevent));
9027 evtinfo[i].evtowner = getRoleName(PQgetvalue(res, i, i_evtowner));
9028 evtinfo[i].evttags = pg_strdup(PQgetvalue(res, i, i_evttags));
9029 evtinfo[i].evtfname = pg_strdup(PQgetvalue(res, i, i_evtfname));
9030 evtinfo[i].evtenabled = *(PQgetvalue(res, i, i_evtenabled));
9031
9032 /* Decide whether we want to dump it */
9034 }
9035
9036 PQclear(res);
9037
9038 destroyPQExpBuffer(query);
9039}
9040
9041/*
9042 * getProcLangs
9043 * get basic information about every procedural language in the system
9044 *
9045 * NB: this must run after getFuncs() because we assume we can do
9046 * findFuncByOid().
9047 */
9048void
9050{
9051 PGresult *res;
9052 int ntups;
9053 int i;
9056 int i_tableoid;
9057 int i_oid;
9058 int i_lanname;
9059 int i_lanpltrusted;
9060 int i_lanplcallfoid;
9061 int i_laninline;
9062 int i_lanvalidator;
9063 int i_lanacl;
9064 int i_acldefault;
9065 int i_lanowner;
9066
9067 appendPQExpBufferStr(query, "SELECT tableoid, oid, "
9068 "lanname, lanpltrusted, lanplcallfoid, "
9069 "laninline, lanvalidator, "
9070 "lanacl, "
9071 "acldefault('l', lanowner) AS acldefault, "
9072 "lanowner "
9073 "FROM pg_language "
9074 "WHERE lanispl "
9075 "ORDER BY oid");
9076
9077 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
9078
9079 ntups = PQntuples(res);
9080
9082
9083 i_tableoid = PQfnumber(res, "tableoid");
9084 i_oid = PQfnumber(res, "oid");
9085 i_lanname = PQfnumber(res, "lanname");
9086 i_lanpltrusted = PQfnumber(res, "lanpltrusted");
9087 i_lanplcallfoid = PQfnumber(res, "lanplcallfoid");
9088 i_laninline = PQfnumber(res, "laninline");
9089 i_lanvalidator = PQfnumber(res, "lanvalidator");
9090 i_lanacl = PQfnumber(res, "lanacl");
9091 i_acldefault = PQfnumber(res, "acldefault");
9092 i_lanowner = PQfnumber(res, "lanowner");
9093
9094 for (i = 0; i < ntups; i++)
9095 {
9096 planginfo[i].dobj.objType = DO_PROCLANG;
9097 planginfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
9098 planginfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
9099 AssignDumpId(&planginfo[i].dobj);
9100
9101 planginfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_lanname));
9102 planginfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_lanacl));
9103 planginfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
9104 planginfo[i].dacl.privtype = 0;
9105 planginfo[i].dacl.initprivs = NULL;
9106 planginfo[i].lanpltrusted = *(PQgetvalue(res, i, i_lanpltrusted)) == 't';
9107 planginfo[i].lanplcallfoid = atooid(PQgetvalue(res, i, i_lanplcallfoid));
9108 planginfo[i].laninline = atooid(PQgetvalue(res, i, i_laninline));
9109 planginfo[i].lanvalidator = atooid(PQgetvalue(res, i, i_lanvalidator));
9110 planginfo[i].lanowner = getRoleName(PQgetvalue(res, i, i_lanowner));
9111
9112 /* Decide whether we want to dump it */
9114
9115 /* Mark whether language has an ACL */
9116 if (!PQgetisnull(res, i, i_lanacl))
9117 planginfo[i].dobj.components |= DUMP_COMPONENT_ACL;
9118 }
9119
9120 PQclear(res);
9121
9122 destroyPQExpBuffer(query);
9123}
9124
9125/*
9126 * getCasts
9127 * get basic information about most casts in the system
9128 *
9129 * Skip casts from a range to its multirange, since we'll create those
9130 * automatically.
9131 */
9132void
9134{
9135 PGresult *res;
9136 int ntups;
9137 int i;
9140 int i_tableoid;
9141 int i_oid;
9142 int i_castsource;
9143 int i_casttarget;
9144 int i_castfunc;
9145 int i_castcontext;
9146 int i_castmethod;
9147
9148 if (fout->remoteVersion >= 140000)
9149 {
9150 appendPQExpBufferStr(query, "SELECT tableoid, oid, "
9151 "castsource, casttarget, castfunc, castcontext, "
9152 "castmethod "
9153 "FROM pg_cast c "
9154 "WHERE NOT EXISTS ( "
9155 "SELECT 1 FROM pg_range r "
9156 "WHERE c.castsource = r.rngtypid "
9157 "AND c.casttarget = r.rngmultitypid "
9158 ") "
9159 "ORDER BY 3,4");
9160 }
9161 else
9162 {
9163 appendPQExpBufferStr(query, "SELECT tableoid, oid, "
9164 "castsource, casttarget, castfunc, castcontext, "
9165 "castmethod "
9166 "FROM pg_cast ORDER BY 3,4");
9167 }
9168
9169 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
9170
9171 ntups = PQntuples(res);
9172
9174
9175 i_tableoid = PQfnumber(res, "tableoid");
9176 i_oid = PQfnumber(res, "oid");
9177 i_castsource = PQfnumber(res, "castsource");
9178 i_casttarget = PQfnumber(res, "casttarget");
9179 i_castfunc = PQfnumber(res, "castfunc");
9180 i_castcontext = PQfnumber(res, "castcontext");
9181 i_castmethod = PQfnumber(res, "castmethod");
9182
9183 for (i = 0; i < ntups; i++)
9184 {
9188
9190 castinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
9191 castinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
9192 AssignDumpId(&castinfo[i].dobj);
9193 castinfo[i].castsource = atooid(PQgetvalue(res, i, i_castsource));
9194 castinfo[i].casttarget = atooid(PQgetvalue(res, i, i_casttarget));
9195 castinfo[i].castfunc = atooid(PQgetvalue(res, i, i_castfunc));
9196 castinfo[i].castcontext = *(PQgetvalue(res, i, i_castcontext));
9197 castinfo[i].castmethod = *(PQgetvalue(res, i, i_castmethod));
9198
9199 /*
9200 * Try to name cast as concatenation of typnames. This is only used
9201 * for purposes of sorting. If we fail to find either type, the name
9202 * will be an empty string.
9203 */
9205 sTypeInfo = findTypeByOid(castinfo[i].castsource);
9206 tTypeInfo = findTypeByOid(castinfo[i].casttarget);
9207 if (sTypeInfo && tTypeInfo)
9208 appendPQExpBuffer(&namebuf, "%s %s",
9209 sTypeInfo->dobj.name, tTypeInfo->dobj.name);
9210 castinfo[i].dobj.name = namebuf.data;
9211
9212 /* Decide whether we want to dump it */
9214 }
9215
9216 PQclear(res);
9217
9218 destroyPQExpBuffer(query);
9219}
9220
9221static char *
9223{
9224 PQExpBuffer query;
9225 PGresult *res;
9226 char *lanname;
9227
9228 query = createPQExpBuffer();
9229 appendPQExpBuffer(query, "SELECT lanname FROM pg_language WHERE oid = %u", langid);
9230 res = ExecuteSqlQueryForSingleRow(fout, query->data);
9231 lanname = pg_strdup(fmtId(PQgetvalue(res, 0, 0)));
9232 destroyPQExpBuffer(query);
9233 PQclear(res);
9234
9235 return lanname;
9236}
9237
9238/*
9239 * getTransforms
9240 * get basic information about every transform in the system
9241 */
9242void
9244{
9245 PGresult *res;
9246 int ntups;
9247 int i;
9248 PQExpBuffer query;
9250 int i_tableoid;
9251 int i_oid;
9252 int i_trftype;
9253 int i_trflang;
9254 int i_trffromsql;
9255 int i_trftosql;
9256
9257 /* Transforms didn't exist pre-9.5 */
9258 if (fout->remoteVersion < 90500)
9259 return;
9260
9261 query = createPQExpBuffer();
9262
9263 appendPQExpBufferStr(query, "SELECT tableoid, oid, "
9264 "trftype, trflang, trffromsql::oid, trftosql::oid "
9265 "FROM pg_transform "
9266 "ORDER BY 3,4");
9267
9268 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
9269
9270 ntups = PQntuples(res);
9271
9273
9274 i_tableoid = PQfnumber(res, "tableoid");
9275 i_oid = PQfnumber(res, "oid");
9276 i_trftype = PQfnumber(res, "trftype");
9277 i_trflang = PQfnumber(res, "trflang");
9278 i_trffromsql = PQfnumber(res, "trffromsql");
9279 i_trftosql = PQfnumber(res, "trftosql");
9280
9281 for (i = 0; i < ntups; i++)
9282 {
9285 char *lanname;
9286
9288 transforminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
9289 transforminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
9291 transforminfo[i].trftype = atooid(PQgetvalue(res, i, i_trftype));
9292 transforminfo[i].trflang = atooid(PQgetvalue(res, i, i_trflang));
9293 transforminfo[i].trffromsql = atooid(PQgetvalue(res, i, i_trffromsql));
9294 transforminfo[i].trftosql = atooid(PQgetvalue(res, i, i_trftosql));
9295
9296 /*
9297 * Try to name transform as concatenation of type and language name.
9298 * This is only used for purposes of sorting. If we fail to find
9299 * either, the name will be an empty string.
9300 */
9304 if (typeInfo && lanname)
9305 appendPQExpBuffer(&namebuf, "%s %s",
9306 typeInfo->dobj.name, lanname);
9307 transforminfo[i].dobj.name = namebuf.data;
9308 free(lanname);
9309
9310 /* Decide whether we want to dump it */
9312 }
9313
9314 PQclear(res);
9315
9316 destroyPQExpBuffer(query);
9317}
9318
9319/*
9320 * getTableAttrs -
9321 * for each interesting table, read info about its attributes
9322 * (names, types, default values, CHECK constraints, etc)
9323 *
9324 * modifies tblinfo
9325 */
9326void
9328{
9329 DumpOptions *dopt = fout->dopt;
9334 PGresult *res;
9335 int ntups;
9336 int curtblindx;
9337 int i_attrelid;
9338 int i_attnum;
9339 int i_attname;
9340 int i_atttypname;
9341 int i_attstattarget;
9342 int i_attstorage;
9343 int i_typstorage;
9344 int i_attidentity;
9345 int i_attgenerated;
9346 int i_attisdropped;
9347 int i_attlen;
9348 int i_attalign;
9349 int i_attislocal;
9350 int i_notnull_name;
9355 int i_attoptions;
9356 int i_attcollation;
9357 int i_attcompression;
9358 int i_attfdwoptions;
9359 int i_attmissingval;
9360 int i_atthasdef;
9361
9362 /*
9363 * We want to perform just one query against pg_attribute, and then just
9364 * one against pg_attrdef (for DEFAULTs) and two against pg_constraint
9365 * (for CHECK constraints and for NOT NULL constraints). However, we
9366 * mustn't try to select every row of those catalogs and then sort it out
9367 * on the client side, because some of the server-side functions we need
9368 * would be unsafe to apply to tables we don't have lock on. Hence, we
9369 * build an array of the OIDs of tables we care about (and now have lock
9370 * on!), and use a WHERE clause to constrain which rows are selected.
9371 */
9374 for (int i = 0; i < numTables; i++)
9375 {
9376 TableInfo *tbinfo = &tblinfo[i];
9377
9378 /* Don't bother to collect info for sequences */
9379 if (tbinfo->relkind == RELKIND_SEQUENCE)
9380 continue;
9381
9382 /*
9383 * Don't bother with uninteresting tables, either. For binary
9384 * upgrades, this is bypassed for pg_largeobject_metadata and
9385 * pg_shdepend so that the columns names are collected for the
9386 * corresponding COPY commands. Restoring the data for those catalogs
9387 * is faster than restoring the equivalent set of large object
9388 * commands.
9389 */
9390 if (!tbinfo->interesting &&
9391 !(fout->dopt->binary_upgrade &&
9392 (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
9393 tbinfo->dobj.catId.oid == SharedDependRelationId)))
9394 continue;
9395
9396 /* OK, we need info for this table */
9397 if (tbloids->len > 1) /* do we have more than the '{'? */
9399 appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
9400
9401 if (tbinfo->ncheck > 0)
9402 {
9403 /* Also make a list of the ones with check constraints */
9404 if (checkoids->len > 1) /* do we have more than the '{'? */
9406 appendPQExpBuffer(checkoids, "%u", tbinfo->dobj.catId.oid);
9407 }
9408 }
9411
9412 /*
9413 * Find all the user attributes and their types.
9414 *
9415 * Since we only want to dump COLLATE clauses for attributes whose
9416 * collation is different from their type's default, we use a CASE here to
9417 * suppress uninteresting attcollations cheaply.
9418 */
9420 "SELECT\n"
9421 "a.attrelid,\n"
9422 "a.attnum,\n"
9423 "a.attname,\n"
9424 "a.attstattarget,\n"
9425 "a.attstorage,\n"
9426 "t.typstorage,\n"
9427 "a.atthasdef,\n"
9428 "a.attisdropped,\n"
9429 "a.attlen,\n"
9430 "a.attalign,\n"
9431 "a.attislocal,\n"
9432 "pg_catalog.format_type(t.oid, a.atttypmod) AS atttypname,\n"
9433 "array_to_string(a.attoptions, ', ') AS attoptions,\n"
9434 "CASE WHEN a.attcollation <> t.typcollation "
9435 "THEN a.attcollation ELSE 0 END AS attcollation,\n"
9436 "pg_catalog.array_to_string(ARRAY("
9437 "SELECT pg_catalog.quote_ident(option_name) || "
9438 "' ' || pg_catalog.quote_literal(option_value) "
9439 "FROM pg_catalog.pg_options_to_table(attfdwoptions) "
9440 "ORDER BY option_name"
9441 "), E',\n ') AS attfdwoptions,\n");
9442
9443 /*
9444 * Find out any NOT NULL markings for each column. In 18 and up we read
9445 * pg_constraint to obtain the constraint name, and for valid constraints
9446 * also pg_description to obtain its comment. notnull_noinherit is set
9447 * according to the NO INHERIT property. For versions prior to 18, we
9448 * store an empty string as the name when a constraint is marked as
9449 * attnotnull (this cues dumpTableSchema to print the NOT NULL clause
9450 * without a name); also, such cases are never NO INHERIT.
9451 *
9452 * For invalid constraints, we need to store their OIDs for processing
9453 * elsewhere, so we bring the pg_constraint.oid value when the constraint
9454 * is invalid, and NULL otherwise. Their comments are handled not here
9455 * but by collectComments, because they're their own dumpable object.
9456 *
9457 * We track in notnull_islocal whether the constraint was defined directly
9458 * in this table or via an ancestor, for binary upgrade. flagInhAttrs
9459 * might modify this later.
9460 */
9461 if (fout->remoteVersion >= 180000)
9463 "co.conname AS notnull_name,\n"
9464 "CASE WHEN co.convalidated THEN pt.description"
9465 " ELSE NULL END AS notnull_comment,\n"
9466 "CASE WHEN NOT co.convalidated THEN co.oid "
9467 "ELSE NULL END AS notnull_invalidoid,\n"
9468 "co.connoinherit AS notnull_noinherit,\n"
9469 "co.conislocal AS notnull_islocal,\n");
9470 else
9472 "CASE WHEN a.attnotnull THEN '' ELSE NULL END AS notnull_name,\n"
9473 "NULL AS notnull_comment,\n"
9474 "NULL AS notnull_invalidoid,\n"
9475 "false AS notnull_noinherit,\n"
9476 "CASE WHEN a.attislocal THEN true\n"
9477 " WHEN a.attnotnull AND NOT a.attislocal THEN true\n"
9478 " ELSE false\n"
9479 "END AS notnull_islocal,\n");
9480
9481 if (fout->remoteVersion >= 140000)
9483 "a.attcompression AS attcompression,\n");
9484 else
9486 "'' AS attcompression,\n");
9487
9488 if (fout->remoteVersion >= 100000)
9490 "a.attidentity,\n");
9491 else
9493 "'' AS attidentity,\n");
9494
9495 if (fout->remoteVersion >= 110000)
9497 "CASE WHEN a.atthasmissing AND NOT a.attisdropped "
9498 "THEN a.attmissingval ELSE null END AS attmissingval,\n");
9499 else
9501 "NULL AS attmissingval,\n");
9502
9503 if (fout->remoteVersion >= 120000)
9505 "a.attgenerated\n");
9506 else
9508 "'' AS attgenerated\n");
9509
9510 /* need left join to pg_type to not fail on dropped columns ... */
9512 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
9513 "JOIN pg_catalog.pg_attribute a ON (src.tbloid = a.attrelid) "
9514 "LEFT JOIN pg_catalog.pg_type t "
9515 "ON (a.atttypid = t.oid)\n",
9516 tbloids->data);
9517
9518 /*
9519 * In versions 18 and up, we need pg_constraint for explicit NOT NULL
9520 * entries and pg_description to get their comments.
9521 */
9522 if (fout->remoteVersion >= 180000)
9524 " LEFT JOIN pg_catalog.pg_constraint co ON "
9525 "(a.attrelid = co.conrelid\n"
9526 " AND co.contype = 'n' AND "
9527 "co.conkey = array[a.attnum])\n"
9528 " LEFT JOIN pg_catalog.pg_description pt ON "
9529 "(pt.classoid = co.tableoid AND pt.objoid = co.oid)\n");
9530
9532 "WHERE a.attnum > 0::pg_catalog.int2\n");
9533
9534 /*
9535 * For binary upgrades from <v12, be sure to pick up
9536 * pg_largeobject_metadata's oid column.
9537 */
9538 if (fout->dopt->binary_upgrade && fout->remoteVersion < 120000)
9540 "OR (a.attnum = -2::pg_catalog.int2 AND src.tbloid = "
9542
9544 "ORDER BY a.attrelid, a.attnum");
9545
9547
9548 ntups = PQntuples(res);
9549
9550 i_attrelid = PQfnumber(res, "attrelid");
9551 i_attnum = PQfnumber(res, "attnum");
9552 i_attname = PQfnumber(res, "attname");
9553 i_atttypname = PQfnumber(res, "atttypname");
9554 i_attstattarget = PQfnumber(res, "attstattarget");
9555 i_attstorage = PQfnumber(res, "attstorage");
9556 i_typstorage = PQfnumber(res, "typstorage");
9557 i_attidentity = PQfnumber(res, "attidentity");
9558 i_attgenerated = PQfnumber(res, "attgenerated");
9559 i_attisdropped = PQfnumber(res, "attisdropped");
9560 i_attlen = PQfnumber(res, "attlen");
9561 i_attalign = PQfnumber(res, "attalign");
9562 i_attislocal = PQfnumber(res, "attislocal");
9563 i_notnull_name = PQfnumber(res, "notnull_name");
9564 i_notnull_comment = PQfnumber(res, "notnull_comment");
9565 i_notnull_invalidoid = PQfnumber(res, "notnull_invalidoid");
9566 i_notnull_noinherit = PQfnumber(res, "notnull_noinherit");
9567 i_notnull_islocal = PQfnumber(res, "notnull_islocal");
9568 i_attoptions = PQfnumber(res, "attoptions");
9569 i_attcollation = PQfnumber(res, "attcollation");
9570 i_attcompression = PQfnumber(res, "attcompression");
9571 i_attfdwoptions = PQfnumber(res, "attfdwoptions");
9572 i_attmissingval = PQfnumber(res, "attmissingval");
9573 i_atthasdef = PQfnumber(res, "atthasdef");
9574
9575 /* Within the next loop, we'll accumulate OIDs of tables with defaults */
9578
9579 /*
9580 * Outer loop iterates once per table, not once per row. Incrementing of
9581 * r is handled by the inner loop.
9582 */
9583 curtblindx = -1;
9584 for (int r = 0; r < ntups;)
9585 {
9586 Oid attrelid = atooid(PQgetvalue(res, r, i_attrelid));
9588 int numatts;
9589 bool hasdefaults;
9590
9591 /* Count rows for this table */
9592 for (numatts = 1; numatts < ntups - r; numatts++)
9593 if (atooid(PQgetvalue(res, r + numatts, i_attrelid)) != attrelid)
9594 break;
9595
9596 /*
9597 * Locate the associated TableInfo; we rely on tblinfo[] being in OID
9598 * order.
9599 */
9600 while (++curtblindx < numTables)
9601 {
9602 tbinfo = &tblinfo[curtblindx];
9603 if (tbinfo->dobj.catId.oid == attrelid)
9604 break;
9605 }
9606 if (curtblindx >= numTables)
9607 pg_fatal("unrecognized table OID %u", attrelid);
9608 /* cross-check that we only got requested tables */
9609 if (tbinfo->relkind == RELKIND_SEQUENCE ||
9610 (!tbinfo->interesting &&
9611 !(fout->dopt->binary_upgrade &&
9612 (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
9613 tbinfo->dobj.catId.oid == SharedDependRelationId))))
9614 pg_fatal("unexpected column data for table \"%s\"",
9615 tbinfo->dobj.name);
9616
9617 /* Save data for this table */
9618 tbinfo->numatts = numatts;
9619 tbinfo->attnames = pg_malloc_array(char *, numatts);
9620 tbinfo->atttypnames = pg_malloc_array(char *, numatts);
9621 tbinfo->attstattarget = pg_malloc_array(int, numatts);
9622 tbinfo->attstorage = pg_malloc_array(char, numatts);
9623 tbinfo->typstorage = pg_malloc_array(char, numatts);
9624 tbinfo->attidentity = pg_malloc_array(char, numatts);
9625 tbinfo->attgenerated = pg_malloc_array(char, numatts);
9626 tbinfo->attisdropped = pg_malloc_array(bool, numatts);
9627 tbinfo->attlen = pg_malloc_array(int, numatts);
9628 tbinfo->attalign = pg_malloc_array(char, numatts);
9629 tbinfo->attislocal = pg_malloc_array(bool, numatts);
9630 tbinfo->attoptions = pg_malloc_array(char *, numatts);
9631 tbinfo->attcollation = pg_malloc_array(Oid, numatts);
9632 tbinfo->attcompression = pg_malloc_array(char, numatts);
9633 tbinfo->attfdwoptions = pg_malloc_array(char *, numatts);
9634 tbinfo->attmissingval = pg_malloc_array(char *, numatts);
9635 tbinfo->notnull_constrs = pg_malloc_array(char *, numatts);
9636 tbinfo->notnull_comment = pg_malloc_array(char *, numatts);
9637 tbinfo->notnull_invalid = pg_malloc_array(bool, numatts);
9638 tbinfo->notnull_noinh = pg_malloc_array(bool, numatts);
9639 tbinfo->notnull_islocal = pg_malloc_array(bool, numatts);
9640 tbinfo->attrdefs = pg_malloc_array(AttrDefInfo *, numatts);
9641 hasdefaults = false;
9642
9643 for (int j = 0; j < numatts; j++, r++)
9644 {
9645 if (j + 1 != atoi(PQgetvalue(res, r, i_attnum)) &&
9646 !(fout->dopt->binary_upgrade && fout->remoteVersion < 120000 &&
9647 tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId))
9648 pg_fatal("invalid column numbering in table \"%s\"",
9649 tbinfo->dobj.name);
9650 tbinfo->attnames[j] = pg_strdup(PQgetvalue(res, r, i_attname));
9651 tbinfo->atttypnames[j] = pg_strdup(PQgetvalue(res, r, i_atttypname));
9652 if (PQgetisnull(res, r, i_attstattarget))
9653 tbinfo->attstattarget[j] = -1;
9654 else
9655 tbinfo->attstattarget[j] = atoi(PQgetvalue(res, r, i_attstattarget));
9656 tbinfo->attstorage[j] = *(PQgetvalue(res, r, i_attstorage));
9657 tbinfo->typstorage[j] = *(PQgetvalue(res, r, i_typstorage));
9658 tbinfo->attidentity[j] = *(PQgetvalue(res, r, i_attidentity));
9659 tbinfo->attgenerated[j] = *(PQgetvalue(res, r, i_attgenerated));
9660 tbinfo->needs_override = tbinfo->needs_override || (tbinfo->attidentity[j] == ATTRIBUTE_IDENTITY_ALWAYS);
9661 tbinfo->attisdropped[j] = (PQgetvalue(res, r, i_attisdropped)[0] == 't');
9662 tbinfo->attlen[j] = atoi(PQgetvalue(res, r, i_attlen));
9663 tbinfo->attalign[j] = *(PQgetvalue(res, r, i_attalign));
9664 tbinfo->attislocal[j] = (PQgetvalue(res, r, i_attislocal)[0] == 't');
9665
9666 /* Handle not-null constraint name and flags */
9668 tbinfo, j,
9675
9676 tbinfo->notnull_comment[j] = PQgetisnull(res, r, i_notnull_comment) ?
9678 tbinfo->attoptions[j] = pg_strdup(PQgetvalue(res, r, i_attoptions));
9679 tbinfo->attcollation[j] = atooid(PQgetvalue(res, r, i_attcollation));
9680 tbinfo->attcompression[j] = *(PQgetvalue(res, r, i_attcompression));
9681 tbinfo->attfdwoptions[j] = pg_strdup(PQgetvalue(res, r, i_attfdwoptions));
9682 tbinfo->attmissingval[j] = pg_strdup(PQgetvalue(res, r, i_attmissingval));
9683 tbinfo->attrdefs[j] = NULL; /* fix below */
9684 if (PQgetvalue(res, r, i_atthasdef)[0] == 't')
9685 hasdefaults = true;
9686 }
9687
9688 if (hasdefaults)
9689 {
9690 /* Collect OIDs of interesting tables that have defaults */
9691 if (tbloids->len > 1) /* do we have more than the '{'? */
9693 appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
9694 }
9695 }
9696
9697 /* If invalidnotnulloids has any data, finalize it */
9698 if (invalidnotnulloids != NULL)
9700
9701 PQclear(res);
9702
9703 /*
9704 * Now get info about column defaults. This is skipped for a data-only
9705 * dump, as it is only needed for table schemas.
9706 */
9707 if (dopt->dumpSchema && tbloids->len > 1)
9708 {
9709 AttrDefInfo *attrdefs;
9710 int numDefaults;
9712
9713 pg_log_info("finding table default expressions");
9714
9716
9717 printfPQExpBuffer(q, "SELECT a.tableoid, a.oid, adrelid, adnum, "
9718 "pg_catalog.pg_get_expr(adbin, adrelid) AS adsrc\n"
9719 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
9720 "JOIN pg_catalog.pg_attrdef a ON (src.tbloid = a.adrelid)\n"
9721 "ORDER BY a.adrelid, a.adnum",
9722 tbloids->data);
9723
9725
9726 numDefaults = PQntuples(res);
9728
9729 curtblindx = -1;
9730 for (int j = 0; j < numDefaults; j++)
9731 {
9732 Oid adtableoid = atooid(PQgetvalue(res, j, 0));
9733 Oid adoid = atooid(PQgetvalue(res, j, 1));
9734 Oid adrelid = atooid(PQgetvalue(res, j, 2));
9735 int adnum = atoi(PQgetvalue(res, j, 3));
9736 char *adsrc = PQgetvalue(res, j, 4);
9737
9738 /*
9739 * Locate the associated TableInfo; we rely on tblinfo[] being in
9740 * OID order.
9741 */
9742 if (tbinfo == NULL || tbinfo->dobj.catId.oid != adrelid)
9743 {
9744 while (++curtblindx < numTables)
9745 {
9746 tbinfo = &tblinfo[curtblindx];
9747 if (tbinfo->dobj.catId.oid == adrelid)
9748 break;
9749 }
9750 if (curtblindx >= numTables)
9751 pg_fatal("unrecognized table OID %u", adrelid);
9752 }
9753
9754 if (adnum <= 0 || adnum > tbinfo->numatts)
9755 pg_fatal("invalid adnum value %d for table \"%s\"",
9756 adnum, tbinfo->dobj.name);
9757
9758 /*
9759 * dropped columns shouldn't have defaults, but just in case,
9760 * ignore 'em
9761 */
9762 if (tbinfo->attisdropped[adnum - 1])
9763 continue;
9764
9765 attrdefs[j].dobj.objType = DO_ATTRDEF;
9766 attrdefs[j].dobj.catId.tableoid = adtableoid;
9767 attrdefs[j].dobj.catId.oid = adoid;
9768 AssignDumpId(&attrdefs[j].dobj);
9769 attrdefs[j].adtable = tbinfo;
9770 attrdefs[j].adnum = adnum;
9771 attrdefs[j].adef_expr = pg_strdup(adsrc);
9772
9773 attrdefs[j].dobj.name = pg_strdup(tbinfo->dobj.name);
9774 attrdefs[j].dobj.namespace = tbinfo->dobj.namespace;
9775
9776 attrdefs[j].dobj.dump = tbinfo->dobj.dump;
9777
9778 /*
9779 * Figure out whether the default/generation expression should be
9780 * dumped as part of the main CREATE TABLE (or similar) command or
9781 * as a separate ALTER TABLE (or similar) command. The preference
9782 * is to put it into the CREATE command, but in some cases that's
9783 * not possible.
9784 */
9785 if (tbinfo->attgenerated[adnum - 1])
9786 {
9787 /*
9788 * Column generation expressions cannot be dumped separately,
9789 * because there is no syntax for it. By setting separate to
9790 * false here we prevent the "default" from being processed as
9791 * its own dumpable object. Later, flagInhAttrs() will mark
9792 * it as not to be dumped at all, if possible (that is, if it
9793 * can be inherited from a parent).
9794 */
9795 attrdefs[j].separate = false;
9796 }
9797 else if (tbinfo->relkind == RELKIND_VIEW)
9798 {
9799 /*
9800 * Defaults on a VIEW must always be dumped as separate ALTER
9801 * TABLE commands.
9802 */
9803 attrdefs[j].separate = true;
9804 }
9805 else if (!shouldPrintColumn(dopt, tbinfo, adnum - 1))
9806 {
9807 /* column will be suppressed, print default separately */
9808 attrdefs[j].separate = true;
9809 }
9810 else
9811 {
9812 attrdefs[j].separate = false;
9813 }
9814
9815 if (!attrdefs[j].separate)
9816 {
9817 /*
9818 * Mark the default as needing to appear before the table, so
9819 * that any dependencies it has must be emitted before the
9820 * CREATE TABLE. If this is not possible, we'll change to
9821 * "separate" mode while sorting dependencies.
9822 */
9824 attrdefs[j].dobj.dumpId);
9825 }
9826
9827 tbinfo->attrdefs[adnum - 1] = &attrdefs[j];
9828 }
9829
9830 PQclear(res);
9831 }
9832
9833 /*
9834 * Get info about NOT NULL NOT VALID constraints. This is skipped for a
9835 * data-only dump, as it is only needed for table schemas.
9836 */
9837 if (dopt->dumpSchema && invalidnotnulloids)
9838 {
9840 int numConstrs;
9841 int i_tableoid;
9842 int i_oid;
9843 int i_conrelid;
9844 int i_conname;
9845 int i_consrc;
9846 int i_conislocal;
9847
9848 pg_log_info("finding invalid not-null constraints");
9849
9852 "SELECT c.tableoid, c.oid, conrelid, conname, "
9853 "pg_catalog.pg_get_constraintdef(c.oid) AS consrc, "
9854 "conislocal, convalidated "
9855 "FROM unnest('%s'::pg_catalog.oid[]) AS src(conoid)\n"
9856 "JOIN pg_catalog.pg_constraint c ON (src.conoid = c.oid)\n"
9857 "ORDER BY c.conrelid, c.conname",
9859
9861
9862 numConstrs = PQntuples(res);
9864
9865 i_tableoid = PQfnumber(res, "tableoid");
9866 i_oid = PQfnumber(res, "oid");
9867 i_conrelid = PQfnumber(res, "conrelid");
9868 i_conname = PQfnumber(res, "conname");
9869 i_consrc = PQfnumber(res, "consrc");
9870 i_conislocal = PQfnumber(res, "conislocal");
9871
9872 /* As above, this loop iterates once per table, not once per row */
9873 curtblindx = -1;
9874 for (int j = 0; j < numConstrs;)
9875 {
9876 Oid conrelid = atooid(PQgetvalue(res, j, i_conrelid));
9878 int numcons;
9879
9880 /* Count rows for this table */
9881 for (numcons = 1; numcons < numConstrs - j; numcons++)
9882 if (atooid(PQgetvalue(res, j + numcons, i_conrelid)) != conrelid)
9883 break;
9884
9885 /*
9886 * Locate the associated TableInfo; we rely on tblinfo[] being in
9887 * OID order.
9888 */
9889 while (++curtblindx < numTables)
9890 {
9891 tbinfo = &tblinfo[curtblindx];
9892 if (tbinfo->dobj.catId.oid == conrelid)
9893 break;
9894 }
9895 if (curtblindx >= numTables)
9896 pg_fatal("unrecognized table OID %u", conrelid);
9897
9898 for (int c = 0; c < numcons; c++, j++)
9899 {
9900 constrs[j].dobj.objType = DO_CONSTRAINT;
9901 constrs[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
9902 constrs[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
9903 AssignDumpId(&constrs[j].dobj);
9904 constrs[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
9905 constrs[j].dobj.namespace = tbinfo->dobj.namespace;
9906 constrs[j].contable = tbinfo;
9907 constrs[j].condomain = NULL;
9908 constrs[j].contype = 'n';
9909 constrs[j].condef = pg_strdup(PQgetvalue(res, j, i_consrc));
9910 constrs[j].confrelid = InvalidOid;
9911 constrs[j].conindex = 0;
9912 constrs[j].condeferrable = false;
9913 constrs[j].condeferred = false;
9914 constrs[j].conislocal = (PQgetvalue(res, j, i_conislocal)[0] == 't');
9915
9916 /*
9917 * All invalid not-null constraints must be dumped separately,
9918 * because CREATE TABLE would not create them as invalid, and
9919 * also because they must be created after potentially
9920 * violating data has been loaded.
9921 */
9922 constrs[j].separate = true;
9923
9924 constrs[j].dobj.dump = tbinfo->dobj.dump;
9925 }
9926 }
9927 PQclear(res);
9928 }
9929
9930 /*
9931 * Get info about table CHECK constraints. This is skipped for a
9932 * data-only dump, as it is only needed for table schemas.
9933 */
9934 if (dopt->dumpSchema && checkoids->len > 2)
9935 {
9937 int numConstrs;
9938 int i_tableoid;
9939 int i_oid;
9940 int i_conrelid;
9941 int i_conname;
9942 int i_consrc;
9943 int i_conislocal;
9944 int i_convalidated;
9945
9946 pg_log_info("finding table check constraints");
9947
9950 "SELECT c.tableoid, c.oid, conrelid, conname, "
9951 "pg_catalog.pg_get_constraintdef(c.oid) AS consrc, "
9952 "conislocal, convalidated "
9953 "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
9954 "JOIN pg_catalog.pg_constraint c ON (src.tbloid = c.conrelid)\n"
9955 "WHERE contype = 'c' "
9956 "ORDER BY c.conrelid, c.conname",
9957 checkoids->data);
9958
9960
9961 numConstrs = PQntuples(res);
9963
9964 i_tableoid = PQfnumber(res, "tableoid");
9965 i_oid = PQfnumber(res, "oid");
9966 i_conrelid = PQfnumber(res, "conrelid");
9967 i_conname = PQfnumber(res, "conname");
9968 i_consrc = PQfnumber(res, "consrc");
9969 i_conislocal = PQfnumber(res, "conislocal");
9970 i_convalidated = PQfnumber(res, "convalidated");
9971
9972 /* As above, this loop iterates once per table, not once per row */
9973 curtblindx = -1;
9974 for (int j = 0; j < numConstrs;)
9975 {
9976 Oid conrelid = atooid(PQgetvalue(res, j, i_conrelid));
9978 int numcons;
9979
9980 /* Count rows for this table */
9981 for (numcons = 1; numcons < numConstrs - j; numcons++)
9982 if (atooid(PQgetvalue(res, j + numcons, i_conrelid)) != conrelid)
9983 break;
9984
9985 /*
9986 * Locate the associated TableInfo; we rely on tblinfo[] being in
9987 * OID order.
9988 */
9989 while (++curtblindx < numTables)
9990 {
9991 tbinfo = &tblinfo[curtblindx];
9992 if (tbinfo->dobj.catId.oid == conrelid)
9993 break;
9994 }
9995 if (curtblindx >= numTables)
9996 pg_fatal("unrecognized table OID %u", conrelid);
9997
9998 if (numcons != tbinfo->ncheck)
9999 {
10000 pg_log_error(ngettext("expected %d check constraint on table \"%s\" but found %d",
10001 "expected %d check constraints on table \"%s\" but found %d",
10002 tbinfo->ncheck),
10003 tbinfo->ncheck, tbinfo->dobj.name, numcons);
10004 pg_log_error_hint("The system catalogs might be corrupted.");
10005 exit_nicely(1);
10006 }
10007
10008 tbinfo->checkexprs = constrs + j;
10009
10010 for (int c = 0; c < numcons; c++, j++)
10011 {
10012 bool validated = PQgetvalue(res, j, i_convalidated)[0] == 't';
10013
10014 constrs[j].dobj.objType = DO_CONSTRAINT;
10015 constrs[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
10016 constrs[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
10017 AssignDumpId(&constrs[j].dobj);
10018 constrs[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
10019 constrs[j].dobj.namespace = tbinfo->dobj.namespace;
10020 constrs[j].contable = tbinfo;
10021 constrs[j].condomain = NULL;
10022 constrs[j].contype = 'c';
10023 constrs[j].condef = pg_strdup(PQgetvalue(res, j, i_consrc));
10024 constrs[j].confrelid = InvalidOid;
10025 constrs[j].conindex = 0;
10026 constrs[j].condeferrable = false;
10027 constrs[j].condeferred = false;
10028 constrs[j].conislocal = (PQgetvalue(res, j, i_conislocal)[0] == 't');
10029
10030 /*
10031 * An unvalidated constraint needs to be dumped separately, so
10032 * that potentially-violating existing data is loaded before
10033 * the constraint.
10034 */
10035 constrs[j].separate = !validated;
10036
10037 constrs[j].dobj.dump = tbinfo->dobj.dump;
10038
10039 /*
10040 * Mark the constraint as needing to appear before the table
10041 * --- this is so that any other dependencies of the
10042 * constraint will be emitted before we try to create the
10043 * table. If the constraint is to be dumped separately, it
10044 * will be dumped after data is loaded anyway, so don't do it.
10045 * (There's an automatic dependency in the opposite direction
10046 * anyway, so don't need to add one manually here.)
10047 */
10048 if (!constrs[j].separate)
10050 constrs[j].dobj.dumpId);
10051
10052 /*
10053 * We will detect later whether the constraint must be split
10054 * out from the table definition.
10055 */
10056 }
10057 }
10058
10059 PQclear(res);
10060 }
10061
10065}
10066
10067/*
10068 * Based on the getTableAttrs query's row corresponding to one column, set
10069 * the name and flags to handle a not-null constraint for that column in
10070 * the tbinfo struct.
10071 *
10072 * Result row 'r' is for tbinfo's attribute 'j'.
10073 *
10074 * There are four possibilities:
10075 * 1) the column has no not-null constraints. In that case, ->notnull_constrs
10076 * (the constraint name) remains NULL.
10077 * 2) The column has a constraint with no name (this is the case when
10078 * constraints come from pre-18 servers). In this case, ->notnull_constrs
10079 * is set to the empty string; dumpTableSchema will print just "NOT NULL".
10080 * 3) The column has an invalid not-null constraint. This must be treated
10081 * as a separate object (because it must be created after the table data
10082 * is loaded). So we add its OID to invalidnotnulloids for processing
10083 * elsewhere and do nothing further with it here. We distinguish this
10084 * case because the "notnull_invalidoid" column has been set to a non-NULL
10085 * value, which is the constraint OID. Valid constraints have a null OID.
10086 * 4) The column has a constraint with a known name; in that case
10087 * notnull_constrs carries that name and dumpTableSchema will print
10088 * "CONSTRAINT the_name NOT NULL". However, if the name is the default
10089 * (table_column_not_null) and there's no comment on the constraint,
10090 * there's no need to print that name in the dump, so notnull_constrs
10091 * is set to the empty string and it behaves as case 2.
10092 *
10093 * In a child table that inherits from a parent already containing NOT NULL
10094 * constraints and the columns in the child don't have their own NOT NULL
10095 * declarations, we suppress printing constraints in the child: the
10096 * constraints are acquired at the point where the child is attached to the
10097 * parent. This is tracked in ->notnull_islocal; for servers pre-18 this is
10098 * set not here but in flagInhAttrs. That flag is also used when the
10099 * constraint was validated in a child but all its parent have it as NOT
10100 * VALID.
10101 *
10102 * Any of these constraints might have the NO INHERIT bit. If so we set
10103 * ->notnull_noinh and NO INHERIT will be printed by dumpTableSchema.
10104 *
10105 * In case 4 above, the name comparison is a bit of a hack; it actually fails
10106 * to do the right thing in all but the trivial case. However, the downside
10107 * of getting it wrong is simply that the name is printed rather than
10108 * suppressed, so it's not a big deal.
10109 *
10110 * invalidnotnulloids is expected to be given as NULL; if any invalid not-null
10111 * constraints are found, it is initialized and filled with the array of
10112 * OIDs of such constraints, for later processing.
10113 */
10114static void
10116 TableInfo *tbinfo, int j,
10117 int i_notnull_name,
10123{
10124 DumpOptions *dopt = fout->dopt;
10125
10126 /*
10127 * If this not-null constraint is not valid, list its OID in
10128 * invalidnotnulloids and do nothing further. It'll be processed
10129 * elsewhere later.
10130 *
10131 * Because invalid not-null constraints are rare, we don't want to malloc
10132 * invalidnotnulloids until we're sure we're going it need it, which
10133 * happens here.
10134 */
10135 if (!PQgetisnull(res, r, i_notnull_invalidoid))
10136 {
10137 char *constroid = PQgetvalue(res, r, i_notnull_invalidoid);
10138
10139 if (*invalidnotnulloids == NULL)
10140 {
10144 }
10145 else
10147
10148 /*
10149 * Track when a parent constraint is invalid for the cases where a
10150 * child constraint has been validated independenly.
10151 */
10152 tbinfo->notnull_invalid[j] = true;
10153
10154 /* nothing else to do */
10155 tbinfo->notnull_constrs[j] = NULL;
10156 return;
10157 }
10158
10159 /*
10160 * notnull_noinh is straight from the query result. notnull_islocal also,
10161 * though flagInhAttrs may change that one later.
10162 */
10163 tbinfo->notnull_noinh[j] = PQgetvalue(res, r, i_notnull_noinherit)[0] == 't';
10164 tbinfo->notnull_islocal[j] = PQgetvalue(res, r, i_notnull_islocal)[0] == 't';
10165 tbinfo->notnull_invalid[j] = false;
10166
10167 /*
10168 * Determine a constraint name to use. If the column is not marked not-
10169 * null, we set NULL which cues ... to do nothing. An empty string says
10170 * to print an unnamed NOT NULL, and anything else is a constraint name to
10171 * use.
10172 */
10173 if (fout->remoteVersion < 180000)
10174 {
10175 /*
10176 * < 18 doesn't have not-null names, so an unnamed constraint is
10177 * sufficient.
10178 */
10179 if (PQgetisnull(res, r, i_notnull_name))
10180 tbinfo->notnull_constrs[j] = NULL;
10181 else
10182 tbinfo->notnull_constrs[j] = "";
10183 }
10184 else
10185 {
10186 if (PQgetisnull(res, r, i_notnull_name))
10187 tbinfo->notnull_constrs[j] = NULL;
10188 else
10189 {
10190 /*
10191 * In binary upgrade of inheritance child tables, must have a
10192 * constraint name that we can UPDATE later; same if there's a
10193 * comment on the constraint.
10194 */
10195 if ((dopt->binary_upgrade &&
10196 !tbinfo->ispartition &&
10197 !tbinfo->notnull_islocal[j]) ||
10199 {
10200 tbinfo->notnull_constrs[j] =
10202 }
10203 else
10204 {
10205 char *default_name;
10206
10207 /* XXX should match ChooseConstraintName better */
10208 default_name = psprintf("%s_%s_not_null", tbinfo->dobj.name,
10209 tbinfo->attnames[j]);
10210 if (strcmp(default_name,
10211 PQgetvalue(res, r, i_notnull_name)) == 0)
10212 tbinfo->notnull_constrs[j] = "";
10213 else
10214 {
10215 tbinfo->notnull_constrs[j] =
10217 }
10219 }
10220 }
10221 }
10222}
10223
10224/*
10225 * Test whether a column should be printed as part of table's CREATE TABLE.
10226 * Column number is zero-based.
10227 *
10228 * Normally this is always true, but it's false for dropped columns, as well
10229 * as those that were inherited without any local definition. (If we print
10230 * such a column it will mistakenly get pg_attribute.attislocal set to true.)
10231 * For partitions, it's always true, because we want the partitions to be
10232 * created independently and ATTACH PARTITION used afterwards.
10233 *
10234 * In binary_upgrade mode, we must print all columns and fix the attislocal/
10235 * attisdropped state later, so as to keep control of the physical column
10236 * order.
10237 *
10238 * This function exists because there are scattered nonobvious places that
10239 * must be kept in sync with this decision.
10240 */
10241bool
10242shouldPrintColumn(const DumpOptions *dopt, const TableInfo *tbinfo, int colno)
10243{
10244 if (dopt->binary_upgrade)
10245 return true;
10246 if (tbinfo->attisdropped[colno])
10247 return false;
10248 return (tbinfo->attislocal[colno] || tbinfo->ispartition);
10249}
10250
10251
10252/*
10253 * getTSParsers:
10254 * get information about all text search parsers in the system catalogs
10255 */
10256void
10258{
10259 PGresult *res;
10260 int ntups;
10261 int i;
10262 PQExpBuffer query;
10264 int i_tableoid;
10265 int i_oid;
10266 int i_prsname;
10267 int i_prsnamespace;
10268 int i_prsstart;
10269 int i_prstoken;
10270 int i_prsend;
10271 int i_prsheadline;
10272 int i_prslextype;
10273
10274 query = createPQExpBuffer();
10275
10276 /*
10277 * find all text search objects, including builtin ones; we filter out
10278 * system-defined objects at dump-out time.
10279 */
10280
10281 appendPQExpBufferStr(query, "SELECT tableoid, oid, prsname, prsnamespace, "
10282 "prsstart::oid, prstoken::oid, "
10283 "prsend::oid, prsheadline::oid, prslextype::oid "
10284 "FROM pg_ts_parser");
10285
10286 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10287
10288 ntups = PQntuples(res);
10289
10291
10292 i_tableoid = PQfnumber(res, "tableoid");
10293 i_oid = PQfnumber(res, "oid");
10294 i_prsname = PQfnumber(res, "prsname");
10295 i_prsnamespace = PQfnumber(res, "prsnamespace");
10296 i_prsstart = PQfnumber(res, "prsstart");
10297 i_prstoken = PQfnumber(res, "prstoken");
10298 i_prsend = PQfnumber(res, "prsend");
10299 i_prsheadline = PQfnumber(res, "prsheadline");
10300 i_prslextype = PQfnumber(res, "prslextype");
10301
10302 for (i = 0; i < ntups; i++)
10303 {
10304 prsinfo[i].dobj.objType = DO_TSPARSER;
10305 prsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10306 prsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10307 AssignDumpId(&prsinfo[i].dobj);
10308 prsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_prsname));
10309 prsinfo[i].dobj.namespace =
10311 prsinfo[i].prsstart = atooid(PQgetvalue(res, i, i_prsstart));
10312 prsinfo[i].prstoken = atooid(PQgetvalue(res, i, i_prstoken));
10313 prsinfo[i].prsend = atooid(PQgetvalue(res, i, i_prsend));
10314 prsinfo[i].prsheadline = atooid(PQgetvalue(res, i, i_prsheadline));
10315 prsinfo[i].prslextype = atooid(PQgetvalue(res, i, i_prslextype));
10316
10317 /* Decide whether we want to dump it */
10319 }
10320
10321 PQclear(res);
10322
10323 destroyPQExpBuffer(query);
10324}
10325
10326/*
10327 * getTSDictionaries:
10328 * get information about all text search dictionaries in the system catalogs
10329 */
10330void
10332{
10333 PGresult *res;
10334 int ntups;
10335 int i;
10336 PQExpBuffer query;
10338 int i_tableoid;
10339 int i_oid;
10340 int i_dictname;
10341 int i_dictnamespace;
10342 int i_dictowner;
10343 int i_dicttemplate;
10344 int i_dictinitoption;
10345
10346 query = createPQExpBuffer();
10347
10348 appendPQExpBufferStr(query, "SELECT tableoid, oid, dictname, "
10349 "dictnamespace, dictowner, "
10350 "dicttemplate, dictinitoption "
10351 "FROM pg_ts_dict");
10352
10353 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10354
10355 ntups = PQntuples(res);
10356
10358
10359 i_tableoid = PQfnumber(res, "tableoid");
10360 i_oid = PQfnumber(res, "oid");
10361 i_dictname = PQfnumber(res, "dictname");
10362 i_dictnamespace = PQfnumber(res, "dictnamespace");
10363 i_dictowner = PQfnumber(res, "dictowner");
10364 i_dictinitoption = PQfnumber(res, "dictinitoption");
10365 i_dicttemplate = PQfnumber(res, "dicttemplate");
10366
10367 for (i = 0; i < ntups; i++)
10368 {
10369 dictinfo[i].dobj.objType = DO_TSDICT;
10370 dictinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10371 dictinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10372 AssignDumpId(&dictinfo[i].dobj);
10373 dictinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_dictname));
10374 dictinfo[i].dobj.namespace =
10376 dictinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_dictowner));
10377 dictinfo[i].dicttemplate = atooid(PQgetvalue(res, i, i_dicttemplate));
10378 if (PQgetisnull(res, i, i_dictinitoption))
10379 dictinfo[i].dictinitoption = NULL;
10380 else
10381 dictinfo[i].dictinitoption = pg_strdup(PQgetvalue(res, i, i_dictinitoption));
10382
10383 /* Decide whether we want to dump it */
10385 }
10386
10387 PQclear(res);
10388
10389 destroyPQExpBuffer(query);
10390}
10391
10392/*
10393 * getTSTemplates:
10394 * get information about all text search templates in the system catalogs
10395 */
10396void
10398{
10399 PGresult *res;
10400 int ntups;
10401 int i;
10402 PQExpBuffer query;
10404 int i_tableoid;
10405 int i_oid;
10406 int i_tmplname;
10407 int i_tmplnamespace;
10408 int i_tmplinit;
10409 int i_tmpllexize;
10410
10411 query = createPQExpBuffer();
10412
10413 appendPQExpBufferStr(query, "SELECT tableoid, oid, tmplname, "
10414 "tmplnamespace, tmplinit::oid, tmpllexize::oid "
10415 "FROM pg_ts_template");
10416
10417 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10418
10419 ntups = PQntuples(res);
10420
10422
10423 i_tableoid = PQfnumber(res, "tableoid");
10424 i_oid = PQfnumber(res, "oid");
10425 i_tmplname = PQfnumber(res, "tmplname");
10426 i_tmplnamespace = PQfnumber(res, "tmplnamespace");
10427 i_tmplinit = PQfnumber(res, "tmplinit");
10428 i_tmpllexize = PQfnumber(res, "tmpllexize");
10429
10430 for (i = 0; i < ntups; i++)
10431 {
10432 tmplinfo[i].dobj.objType = DO_TSTEMPLATE;
10433 tmplinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10434 tmplinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10435 AssignDumpId(&tmplinfo[i].dobj);
10436 tmplinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_tmplname));
10437 tmplinfo[i].dobj.namespace =
10439 tmplinfo[i].tmplinit = atooid(PQgetvalue(res, i, i_tmplinit));
10440 tmplinfo[i].tmpllexize = atooid(PQgetvalue(res, i, i_tmpllexize));
10441
10442 /* Decide whether we want to dump it */
10444 }
10445
10446 PQclear(res);
10447
10448 destroyPQExpBuffer(query);
10449}
10450
10451/*
10452 * getTSConfigurations:
10453 * get information about all text search configurations
10454 */
10455void
10457{
10458 PGresult *res;
10459 int ntups;
10460 int i;
10461 PQExpBuffer query;
10463 int i_tableoid;
10464 int i_oid;
10465 int i_cfgname;
10466 int i_cfgnamespace;
10467 int i_cfgowner;
10468 int i_cfgparser;
10469
10470 query = createPQExpBuffer();
10471
10472 appendPQExpBufferStr(query, "SELECT tableoid, oid, cfgname, "
10473 "cfgnamespace, cfgowner, cfgparser "
10474 "FROM pg_ts_config");
10475
10476 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10477
10478 ntups = PQntuples(res);
10479
10481
10482 i_tableoid = PQfnumber(res, "tableoid");
10483 i_oid = PQfnumber(res, "oid");
10484 i_cfgname = PQfnumber(res, "cfgname");
10485 i_cfgnamespace = PQfnumber(res, "cfgnamespace");
10486 i_cfgowner = PQfnumber(res, "cfgowner");
10487 i_cfgparser = PQfnumber(res, "cfgparser");
10488
10489 for (i = 0; i < ntups; i++)
10490 {
10491 cfginfo[i].dobj.objType = DO_TSCONFIG;
10492 cfginfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10493 cfginfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10494 AssignDumpId(&cfginfo[i].dobj);
10495 cfginfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_cfgname));
10496 cfginfo[i].dobj.namespace =
10498 cfginfo[i].rolname = getRoleName(PQgetvalue(res, i, i_cfgowner));
10499 cfginfo[i].cfgparser = atooid(PQgetvalue(res, i, i_cfgparser));
10500
10501 /* Decide whether we want to dump it */
10503 }
10504
10505 PQclear(res);
10506
10507 destroyPQExpBuffer(query);
10508}
10509
10510/*
10511 * getForeignDataWrappers:
10512 * get information about all foreign-data wrappers in the system catalogs
10513 */
10514void
10516{
10517 PGresult *res;
10518 int ntups;
10519 int i;
10520 PQExpBuffer query;
10522 int i_tableoid;
10523 int i_oid;
10524 int i_fdwname;
10525 int i_fdwowner;
10526 int i_fdwhandler;
10527 int i_fdwvalidator;
10528 int i_fdwacl;
10529 int i_acldefault;
10530 int i_fdwoptions;
10531
10532 query = createPQExpBuffer();
10533
10534 appendPQExpBufferStr(query, "SELECT tableoid, oid, fdwname, "
10535 "fdwowner, "
10536 "fdwhandler::pg_catalog.regproc, "
10537 "fdwvalidator::pg_catalog.regproc, "
10538 "fdwacl, "
10539 "acldefault('F', fdwowner) AS acldefault, "
10540 "array_to_string(ARRAY("
10541 "SELECT quote_ident(option_name) || ' ' || "
10542 "quote_literal(option_value) "
10543 "FROM pg_options_to_table(fdwoptions) "
10544 "ORDER BY option_name"
10545 "), E',\n ') AS fdwoptions "
10546 "FROM pg_foreign_data_wrapper");
10547
10548 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10549
10550 ntups = PQntuples(res);
10551
10553
10554 i_tableoid = PQfnumber(res, "tableoid");
10555 i_oid = PQfnumber(res, "oid");
10556 i_fdwname = PQfnumber(res, "fdwname");
10557 i_fdwowner = PQfnumber(res, "fdwowner");
10558 i_fdwhandler = PQfnumber(res, "fdwhandler");
10559 i_fdwvalidator = PQfnumber(res, "fdwvalidator");
10560 i_fdwacl = PQfnumber(res, "fdwacl");
10561 i_acldefault = PQfnumber(res, "acldefault");
10562 i_fdwoptions = PQfnumber(res, "fdwoptions");
10563
10564 for (i = 0; i < ntups; i++)
10565 {
10566 fdwinfo[i].dobj.objType = DO_FDW;
10567 fdwinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10568 fdwinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10569 AssignDumpId(&fdwinfo[i].dobj);
10570 fdwinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_fdwname));
10571 fdwinfo[i].dobj.namespace = NULL;
10572 fdwinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_fdwacl));
10573 fdwinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
10574 fdwinfo[i].dacl.privtype = 0;
10575 fdwinfo[i].dacl.initprivs = NULL;
10576 fdwinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_fdwowner));
10577 fdwinfo[i].fdwhandler = pg_strdup(PQgetvalue(res, i, i_fdwhandler));
10578 fdwinfo[i].fdwvalidator = pg_strdup(PQgetvalue(res, i, i_fdwvalidator));
10579 fdwinfo[i].fdwoptions = pg_strdup(PQgetvalue(res, i, i_fdwoptions));
10580
10581 /* Decide whether we want to dump it */
10583
10584 /* Mark whether FDW has an ACL */
10585 if (!PQgetisnull(res, i, i_fdwacl))
10586 fdwinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
10587 }
10588
10589 PQclear(res);
10590
10591 destroyPQExpBuffer(query);
10592}
10593
10594/*
10595 * getForeignServers:
10596 * get information about all foreign servers in the system catalogs
10597 */
10598void
10600{
10601 PGresult *res;
10602 int ntups;
10603 int i;
10604 PQExpBuffer query;
10606 int i_tableoid;
10607 int i_oid;
10608 int i_srvname;
10609 int i_srvowner;
10610 int i_srvfdw;
10611 int i_srvtype;
10612 int i_srvversion;
10613 int i_srvacl;
10614 int i_acldefault;
10615 int i_srvoptions;
10616
10617 query = createPQExpBuffer();
10618
10619 appendPQExpBufferStr(query, "SELECT tableoid, oid, srvname, "
10620 "srvowner, "
10621 "srvfdw, srvtype, srvversion, srvacl, "
10622 "acldefault('S', srvowner) AS acldefault, "
10623 "array_to_string(ARRAY("
10624 "SELECT quote_ident(option_name) || ' ' || "
10625 "quote_literal(option_value) "
10626 "FROM pg_options_to_table(srvoptions) "
10627 "ORDER BY option_name"
10628 "), E',\n ') AS srvoptions "
10629 "FROM pg_foreign_server");
10630
10631 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10632
10633 ntups = PQntuples(res);
10634
10636
10637 i_tableoid = PQfnumber(res, "tableoid");
10638 i_oid = PQfnumber(res, "oid");
10639 i_srvname = PQfnumber(res, "srvname");
10640 i_srvowner = PQfnumber(res, "srvowner");
10641 i_srvfdw = PQfnumber(res, "srvfdw");
10642 i_srvtype = PQfnumber(res, "srvtype");
10643 i_srvversion = PQfnumber(res, "srvversion");
10644 i_srvacl = PQfnumber(res, "srvacl");
10645 i_acldefault = PQfnumber(res, "acldefault");
10646 i_srvoptions = PQfnumber(res, "srvoptions");
10647
10648 for (i = 0; i < ntups; i++)
10649 {
10650 srvinfo[i].dobj.objType = DO_FOREIGN_SERVER;
10651 srvinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10652 srvinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10653 AssignDumpId(&srvinfo[i].dobj);
10654 srvinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_srvname));
10655 srvinfo[i].dobj.namespace = NULL;
10656 srvinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_srvacl));
10657 srvinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
10658 srvinfo[i].dacl.privtype = 0;
10659 srvinfo[i].dacl.initprivs = NULL;
10660 srvinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_srvowner));
10661 srvinfo[i].srvfdw = atooid(PQgetvalue(res, i, i_srvfdw));
10662 srvinfo[i].srvtype = pg_strdup(PQgetvalue(res, i, i_srvtype));
10663 srvinfo[i].srvversion = pg_strdup(PQgetvalue(res, i, i_srvversion));
10664 srvinfo[i].srvoptions = pg_strdup(PQgetvalue(res, i, i_srvoptions));
10665
10666 /* Decide whether we want to dump it */
10668
10669 /* Servers have user mappings */
10670 srvinfo[i].dobj.components |= DUMP_COMPONENT_USERMAP;
10671
10672 /* Mark whether server has an ACL */
10673 if (!PQgetisnull(res, i, i_srvacl))
10674 srvinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
10675 }
10676
10677 PQclear(res);
10678
10679 destroyPQExpBuffer(query);
10680}
10681
10682/*
10683 * getDefaultACLs:
10684 * get information about all default ACL information in the system catalogs
10685 */
10686void
10688{
10689 DumpOptions *dopt = fout->dopt;
10691 PQExpBuffer query;
10692 PGresult *res;
10693 int i_oid;
10694 int i_tableoid;
10695 int i_defaclrole;
10697 int i_defaclobjtype;
10698 int i_defaclacl;
10699 int i_acldefault;
10700 int i,
10701 ntups;
10702
10703 query = createPQExpBuffer();
10704
10705 /*
10706 * Global entries (with defaclnamespace=0) replace the hard-wired default
10707 * ACL for their object type. We should dump them as deltas from the
10708 * default ACL, since that will be used as a starting point for
10709 * interpreting the ALTER DEFAULT PRIVILEGES commands. On the other hand,
10710 * non-global entries can only add privileges not revoke them. We must
10711 * dump those as-is (i.e., as deltas from an empty ACL).
10712 *
10713 * We can use defaclobjtype as the object type for acldefault(), except
10714 * for the case of 'S' (DEFACLOBJ_SEQUENCE) which must be converted to
10715 * 's'.
10716 */
10718 "SELECT oid, tableoid, "
10719 "defaclrole, "
10720 "defaclnamespace, "
10721 "defaclobjtype, "
10722 "defaclacl, "
10723 "CASE WHEN defaclnamespace = 0 THEN "
10724 "acldefault(CASE WHEN defaclobjtype = 'S' "
10725 "THEN 's'::\"char\" ELSE defaclobjtype END, "
10726 "defaclrole) ELSE '{}' END AS acldefault "
10727 "FROM pg_default_acl");
10728
10729 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10730
10731 ntups = PQntuples(res);
10732
10734
10735 i_oid = PQfnumber(res, "oid");
10736 i_tableoid = PQfnumber(res, "tableoid");
10737 i_defaclrole = PQfnumber(res, "defaclrole");
10738 i_defaclnamespace = PQfnumber(res, "defaclnamespace");
10739 i_defaclobjtype = PQfnumber(res, "defaclobjtype");
10740 i_defaclacl = PQfnumber(res, "defaclacl");
10741 i_acldefault = PQfnumber(res, "acldefault");
10742
10743 for (i = 0; i < ntups; i++)
10744 {
10746
10747 daclinfo[i].dobj.objType = DO_DEFAULT_ACL;
10748 daclinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10749 daclinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10750 AssignDumpId(&daclinfo[i].dobj);
10751 /* cheesy ... is it worth coming up with a better object name? */
10752 daclinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_defaclobjtype));
10753
10754 if (nspid != InvalidOid)
10755 daclinfo[i].dobj.namespace = findNamespace(nspid);
10756 else
10757 daclinfo[i].dobj.namespace = NULL;
10758
10759 daclinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_defaclacl));
10760 daclinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
10761 daclinfo[i].dacl.privtype = 0;
10762 daclinfo[i].dacl.initprivs = NULL;
10763 daclinfo[i].defaclrole = getRoleName(PQgetvalue(res, i, i_defaclrole));
10764 daclinfo[i].defaclobjtype = *(PQgetvalue(res, i, i_defaclobjtype));
10765
10766 /* Default ACLs are ACLs, of course */
10767 daclinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
10768
10769 /* Decide whether we want to dump it */
10771 }
10772
10773 PQclear(res);
10774
10775 destroyPQExpBuffer(query);
10776}
10777
10778/*
10779 * getRoleName -- look up the name of a role, given its OID
10780 *
10781 * In current usage, we don't expect failures, so error out for a bad OID.
10782 */
10783static const char *
10785{
10786 Oid roleoid = atooid(roleoid_str);
10787
10788 /*
10789 * Do binary search to find the appropriate item.
10790 */
10791 if (nrolenames > 0)
10792 {
10793 RoleNameItem *low = &rolenames[0];
10794 RoleNameItem *high = &rolenames[nrolenames - 1];
10795
10796 while (low <= high)
10797 {
10798 RoleNameItem *middle = low + (high - low) / 2;
10799
10800 if (roleoid < middle->roleoid)
10801 high = middle - 1;
10802 else if (roleoid > middle->roleoid)
10803 low = middle + 1;
10804 else
10805 return middle->rolename; /* found a match */
10806 }
10807 }
10808
10809 pg_fatal("role with OID %u does not exist", roleoid);
10810 return NULL; /* keep compiler quiet */
10811}
10812
10813/*
10814 * collectRoleNames --
10815 *
10816 * Construct a table of all known roles.
10817 * The table is sorted by OID for speed in lookup.
10818 */
10819static void
10821{
10822 PGresult *res;
10823 const char *query;
10824 int i;
10825
10826 query = "SELECT oid, rolname FROM pg_catalog.pg_roles ORDER BY 1";
10827
10828 res = ExecuteSqlQuery(fout, query, PGRES_TUPLES_OK);
10829
10830 nrolenames = PQntuples(res);
10831
10833
10834 for (i = 0; i < nrolenames; i++)
10835 {
10836 rolenames[i].roleoid = atooid(PQgetvalue(res, i, 0));
10838 }
10839
10840 PQclear(res);
10841}
10842
10843/*
10844 * getAdditionalACLs
10845 *
10846 * We have now created all the DumpableObjects, and collected the ACL data
10847 * that appears in the directly-associated catalog entries. However, there's
10848 * more ACL-related info to collect. If any of a table's columns have ACLs,
10849 * we must set the TableInfo's DUMP_COMPONENT_ACL components flag, as well as
10850 * its hascolumnACLs flag (we won't store the ACLs themselves here, though).
10851 * Also, in versions having the pg_init_privs catalog, read that and load the
10852 * information into the relevant DumpableObjects.
10853 */
10854static void
10856{
10858 PGresult *res;
10859 int ntups,
10860 i;
10861
10862 /* Check for per-column ACLs */
10864 "SELECT DISTINCT attrelid FROM pg_attribute "
10865 "WHERE attacl IS NOT NULL");
10866
10867 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10868
10869 ntups = PQntuples(res);
10870 for (i = 0; i < ntups; i++)
10871 {
10872 Oid relid = atooid(PQgetvalue(res, i, 0));
10873 TableInfo *tblinfo;
10874
10875 tblinfo = findTableByOid(relid);
10876 /* OK to ignore tables we haven't got a DumpableObject for */
10877 if (tblinfo)
10878 {
10880 tblinfo->hascolumnACLs = true;
10881 }
10882 }
10883 PQclear(res);
10884
10885 /* Fetch initial-privileges data */
10886 if (fout->remoteVersion >= 90600)
10887 {
10888 printfPQExpBuffer(query,
10889 "SELECT objoid, classoid, objsubid, privtype, initprivs "
10890 "FROM pg_init_privs");
10891
10892 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10893
10894 ntups = PQntuples(res);
10895 for (i = 0; i < ntups; i++)
10896 {
10897 Oid objoid = atooid(PQgetvalue(res, i, 0));
10898 Oid classoid = atooid(PQgetvalue(res, i, 1));
10899 int objsubid = atoi(PQgetvalue(res, i, 2));
10900 char privtype = *(PQgetvalue(res, i, 3));
10901 char *initprivs = PQgetvalue(res, i, 4);
10902 CatalogId objId;
10903 DumpableObject *dobj;
10904
10905 objId.tableoid = classoid;
10906 objId.oid = objoid;
10907 dobj = findObjectByCatalogId(objId);
10908 /* OK to ignore entries we haven't got a DumpableObject for */
10909 if (dobj)
10910 {
10911 /* Cope with sub-object initprivs */
10912 if (objsubid != 0)
10913 {
10914 if (dobj->objType == DO_TABLE)
10915 {
10916 /* For a column initprivs, set the table's ACL flags */
10918 ((TableInfo *) dobj)->hascolumnACLs = true;
10919 }
10920 else
10921 pg_log_warning("unsupported pg_init_privs entry: %u %u %d",
10922 classoid, objoid, objsubid);
10923 continue;
10924 }
10925
10926 /*
10927 * We ignore any pg_init_privs.initprivs entry for the public
10928 * schema, as explained in getNamespaces().
10929 */
10930 if (dobj->objType == DO_NAMESPACE &&
10931 strcmp(dobj->name, "public") == 0)
10932 continue;
10933
10934 /* Else it had better be of a type we think has ACLs */
10935 if (dobj->objType == DO_NAMESPACE ||
10936 dobj->objType == DO_TYPE ||
10937 dobj->objType == DO_FUNC ||
10938 dobj->objType == DO_AGG ||
10939 dobj->objType == DO_TABLE ||
10940 dobj->objType == DO_PROCLANG ||
10941 dobj->objType == DO_FDW ||
10942 dobj->objType == DO_FOREIGN_SERVER)
10943 {
10945
10946 daobj->dacl.privtype = privtype;
10947 daobj->dacl.initprivs = pstrdup(initprivs);
10948 }
10949 else
10950 pg_log_warning("unsupported pg_init_privs entry: %u %u %d",
10951 classoid, objoid, objsubid);
10952 }
10953 }
10954 PQclear(res);
10955 }
10956
10957 destroyPQExpBuffer(query);
10958}
10959
10960/*
10961 * dumpCommentExtended --
10962 *
10963 * This routine is used to dump any comments associated with the
10964 * object handed to this routine. The routine takes the object type
10965 * and object name (ready to print, except for schema decoration), plus
10966 * the namespace and owner of the object (for labeling the ArchiveEntry),
10967 * plus catalog ID and subid which are the lookup key for pg_description,
10968 * plus the dump ID for the object (for setting a dependency).
10969 * If a matching pg_description entry is found, it is dumped.
10970 *
10971 * Note: in some cases, such as comments for triggers and rules, the "type"
10972 * string really looks like, e.g., "TRIGGER name ON". This is a bit of a hack
10973 * but it doesn't seem worth complicating the API for all callers to make
10974 * it cleaner.
10975 *
10976 * Note: although this routine takes a dumpId for dependency purposes,
10977 * that purpose is just to mark the dependency in the emitted dump file
10978 * for possible future use by pg_restore. We do NOT use it for determining
10979 * ordering of the comment in the dump file, because this routine is called
10980 * after dependency sorting occurs. This routine should be called just after
10981 * calling ArchiveEntry() for the specified object.
10982 */
10983static void
10985 const char *name, const char *namespace,
10986 const char *owner, CatalogId catalogId,
10987 int subid, DumpId dumpId,
10988 const char *initdb_comment)
10989{
10990 DumpOptions *dopt = fout->dopt;
10992 int ncomments;
10993
10994 /* do nothing, if --no-comments is supplied */
10995 if (dopt->no_comments)
10996 return;
10997
10998 /* Comments are schema not data ... except LO comments are data */
10999 if (strcmp(type, "LARGE OBJECT") != 0)
11000 {
11001 if (!dopt->dumpSchema)
11002 return;
11003 }
11004 else
11005 {
11006 /* We do dump LO comments in binary-upgrade mode */
11007 if (!dopt->dumpData && !dopt->binary_upgrade)
11008 return;
11009 }
11010
11011 /* Search for comments associated with catalogId, using table */
11012 ncomments = findComments(catalogId.tableoid, catalogId.oid,
11013 &comments);
11014
11015 /* Is there one matching the subid? */
11016 while (ncomments > 0)
11017 {
11018 if (comments->objsubid == subid)
11019 break;
11020 comments++;
11021 ncomments--;
11022 }
11023
11024 if (initdb_comment != NULL)
11025 {
11026 static CommentItem empty_comment = {.descr = ""};
11027
11028 /*
11029 * initdb creates this object with a comment. Skip dumping the
11030 * initdb-provided comment, which would complicate matters for
11031 * non-superuser use of pg_dump. When the DBA has removed initdb's
11032 * comment, replicate that.
11033 */
11034 if (ncomments == 0)
11035 {
11037 ncomments = 1;
11038 }
11039 else if (strcmp(comments->descr, initdb_comment) == 0)
11040 ncomments = 0;
11041 }
11042
11043 /* If a comment exists, build COMMENT ON statement */
11044 if (ncomments > 0)
11045 {
11048
11049 appendPQExpBuffer(query, "COMMENT ON %s ", type);
11050 if (namespace && *namespace)
11051 appendPQExpBuffer(query, "%s.", fmtId(namespace));
11052 appendPQExpBuffer(query, "%s IS ", name);
11054 appendPQExpBufferStr(query, ";\n");
11055
11056 appendPQExpBuffer(tag, "%s %s", type, name);
11057
11058 /*
11059 * We mark comments as SECTION_NONE because they really belong in the
11060 * same section as their parent, whether that is pre-data or
11061 * post-data.
11062 */
11064 ARCHIVE_OPTS(.tag = tag->data,
11065 .namespace = namespace,
11066 .owner = owner,
11067 .description = "COMMENT",
11068 .section = SECTION_NONE,
11069 .createStmt = query->data,
11070 .deps = &dumpId,
11071 .nDeps = 1));
11072
11073 destroyPQExpBuffer(query);
11074 destroyPQExpBuffer(tag);
11075 }
11076}
11077
11078/*
11079 * dumpComment --
11080 *
11081 * Typical simplification of the above function.
11082 */
11083static inline void
11085 const char *name, const char *namespace,
11086 const char *owner, CatalogId catalogId,
11087 int subid, DumpId dumpId)
11088{
11089 dumpCommentExtended(fout, type, name, namespace, owner,
11090 catalogId, subid, dumpId, NULL);
11091}
11092
11093/*
11094 * appendNamedArgument --
11095 *
11096 * Convenience routine for constructing parameters of the form:
11097 * 'paraname', 'value'::type
11098 */
11099static void
11100appendNamedArgument(PQExpBuffer out, Archive *fout, const char *argname,
11101 const char *argtype, const char *argval)
11102{
11103 appendPQExpBufferStr(out, ",\n\t");
11104
11105 appendStringLiteralAH(out, argname, fout);
11106 appendPQExpBufferStr(out, ", ");
11107
11109 appendPQExpBuffer(out, "::%s", argtype);
11110}
11111
11112/*
11113 * fetchAttributeStats --
11114 *
11115 * Fetch next batch of attribute statistics for dumpRelationStats_dumper().
11116 */
11117static PGresult *
11119{
11123 int count = 0;
11124 PGresult *res = NULL;
11125 static TocEntry *te;
11126 static bool restarted;
11128
11129 /*
11130 * Our query for retrieving statistics for multiple relations uses WITH
11131 * ORDINALITY and multi-argument UNNEST(), both of which were introduced
11132 * in v9.4. For older versions, we resort to gathering statistics for a
11133 * single relation at a time.
11134 */
11135 if (fout->remoteVersion < 90400)
11136 max_rels = 1;
11137
11138 /* If we're just starting, set our TOC pointer. */
11139 if (!te)
11140 te = AH->toc->next;
11141
11142 /*
11143 * We can't easily avoid a second TOC scan for the tar format because it
11144 * writes restore.sql separately, which means we must execute the queries
11145 * twice. This feels risky, but there is no known reason it should
11146 * generate different output than the first pass. Even if it does, the
11147 * worst-case scenario is that restore.sql might have different statistics
11148 * data than the archive.
11149 */
11150 if (!restarted && te == AH->toc && AH->format == archTar)
11151 {
11152 te = AH->toc->next;
11153 restarted = true;
11154 }
11155
11158
11159 /*
11160 * Scan the TOC for the next set of relevant stats entries. We assume
11161 * that statistics are dumped in the order they are listed in the TOC.
11162 * This is perhaps not the sturdiest assumption, so we verify it matches
11163 * reality in dumpRelationStats_dumper().
11164 */
11165 for (; te != AH->toc && count < max_rels; te = te->next)
11166 {
11167 if ((te->reqs & REQ_STATS) != 0 &&
11168 strcmp(te->desc, "STATISTICS DATA") == 0)
11169 {
11170 appendPGArray(nspnames, te->namespace);
11172 count++;
11173 }
11174 }
11175
11178
11179 /* Execute the query for the next batch of relations. */
11180 if (count > 0)
11181 {
11183
11184 appendPQExpBufferStr(query, "EXECUTE getAttributeStats(");
11186 appendPQExpBufferStr(query, "::pg_catalog.name[],");
11188 appendPQExpBufferStr(query, "::pg_catalog.name[])");
11189 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
11190 destroyPQExpBuffer(query);
11191 }
11192
11195 return res;
11196}
11197
11198/*
11199 * dumpRelationStats_dumper --
11200 *
11201 * Generate command to import stats into the relation on the new database.
11202 * This routine is called by the Archiver when it wants the statistics to be
11203 * dumped.
11204 */
11205static char *
11207{
11208 const RelStatsInfo *rsinfo = userArg;
11209 static PGresult *res;
11210 static int rownum;
11211 PQExpBuffer query;
11213 PQExpBuffer out = &out_data;
11214 int i_schemaname;
11215 int i_tablename;
11216 int i_attname;
11217 int i_inherited;
11218 int i_null_frac;
11219 int i_avg_width;
11220 int i_n_distinct;
11224 int i_correlation;
11231 static TocEntry *expected_te;
11232
11233 /*
11234 * fetchAttributeStats() assumes that the statistics are dumped in the
11235 * order they are listed in the TOC. We verify that here for safety.
11236 */
11237 if (!expected_te)
11238 expected_te = ((ArchiveHandle *) fout)->toc;
11239
11241 while ((expected_te->reqs & REQ_STATS) == 0 ||
11242 strcmp(expected_te->desc, "STATISTICS DATA") != 0)
11244
11245 if (te != expected_te)
11246 pg_fatal("statistics dumped out of order (current: %d %s %s, expected: %d %s %s)",
11247 te->dumpId, te->desc, te->tag,
11248 expected_te->dumpId, expected_te->desc, expected_te->tag);
11249
11250 query = createPQExpBuffer();
11252 {
11254 "PREPARE getAttributeStats(pg_catalog.name[], pg_catalog.name[]) AS\n"
11255 "SELECT s.schemaname, s.tablename, s.attname, s.inherited, "
11256 "s.null_frac, s.avg_width, s.n_distinct, "
11257 "s.most_common_vals, s.most_common_freqs, "
11258 "s.histogram_bounds, s.correlation, "
11259 "s.most_common_elems, s.most_common_elem_freqs, "
11260 "s.elem_count_histogram, ");
11261
11262 if (fout->remoteVersion >= 170000)
11264 "s.range_length_histogram, "
11265 "s.range_empty_frac, "
11266 "s.range_bounds_histogram ");
11267 else
11269 "NULL AS range_length_histogram,"
11270 "NULL AS range_empty_frac,"
11271 "NULL AS range_bounds_histogram ");
11272
11273 /*
11274 * The results must be in the order of the relations supplied in the
11275 * parameters to ensure we remain in sync as we walk through the TOC.
11276 * The redundant filter clause on s.tablename = ANY(...) seems
11277 * sufficient to convince the planner to use
11278 * pg_class_relname_nsp_index, which avoids a full scan of pg_stats.
11279 * This may not work for all versions.
11280 *
11281 * Our query for retrieving statistics for multiple relations uses
11282 * WITH ORDINALITY and multi-argument UNNEST(), both of which were
11283 * introduced in v9.4. For older versions, we resort to gathering
11284 * statistics for a single relation at a time.
11285 */
11286 if (fout->remoteVersion >= 90400)
11288 "FROM pg_catalog.pg_stats s "
11289 "JOIN unnest($1, $2) WITH ORDINALITY AS u (schemaname, tablename, ord) "
11290 "ON s.schemaname = u.schemaname "
11291 "AND s.tablename = u.tablename "
11292 "WHERE s.tablename = ANY($2) "
11293 "ORDER BY u.ord, s.attname, s.inherited");
11294 else
11296 "FROM pg_catalog.pg_stats s "
11297 "WHERE s.schemaname = $1[1] "
11298 "AND s.tablename = $2[1] "
11299 "ORDER BY s.attname, s.inherited");
11300
11301 ExecuteSqlStatement(fout, query->data);
11302
11304 resetPQExpBuffer(query);
11305 }
11306
11307 initPQExpBuffer(out);
11308
11309 /* restore relation stats */
11310 appendPQExpBufferStr(out, "SELECT * FROM pg_catalog.pg_restore_relation_stats(\n");
11311 appendPQExpBuffer(out, "\t'version', '%d'::integer,\n",
11313 appendPQExpBufferStr(out, "\t'schemaname', ");
11314 appendStringLiteralAH(out, rsinfo->dobj.namespace->dobj.name, fout);
11315 appendPQExpBufferStr(out, ",\n");
11316 appendPQExpBufferStr(out, "\t'relname', ");
11317 appendStringLiteralAH(out, rsinfo->dobj.name, fout);
11318 appendPQExpBufferStr(out, ",\n");
11319 appendPQExpBuffer(out, "\t'relpages', '%d'::integer,\n", rsinfo->relpages);
11320
11321 /*
11322 * Before v14, a reltuples value of 0 was ambiguous: it could either mean
11323 * the relation is empty, or it could mean that it hadn't yet been
11324 * vacuumed or analyzed. (Newer versions use -1 for the latter case.)
11325 * This ambiguity allegedly can cause the planner to choose inefficient
11326 * plans after restoring to v18 or newer. To deal with this, let's just
11327 * set reltuples to -1 in that case.
11328 */
11329 if (fout->remoteVersion < 140000 && strcmp("0", rsinfo->reltuples) == 0)
11330 appendPQExpBufferStr(out, "\t'reltuples', '-1'::real,\n");
11331 else
11332 appendPQExpBuffer(out, "\t'reltuples', '%s'::real,\n", rsinfo->reltuples);
11333
11334 appendPQExpBuffer(out, "\t'relallvisible', '%d'::integer",
11335 rsinfo->relallvisible);
11336
11337 if (fout->remoteVersion >= 180000)
11338 appendPQExpBuffer(out, ",\n\t'relallfrozen', '%d'::integer", rsinfo->relallfrozen);
11339
11340 appendPQExpBufferStr(out, "\n);\n");
11341
11342 /* Fetch the next batch of attribute statistics if needed. */
11343 if (rownum >= PQntuples(res))
11344 {
11345 PQclear(res);
11347 rownum = 0;
11348 }
11349
11350 i_schemaname = PQfnumber(res, "schemaname");
11351 i_tablename = PQfnumber(res, "tablename");
11352 i_attname = PQfnumber(res, "attname");
11353 i_inherited = PQfnumber(res, "inherited");
11354 i_null_frac = PQfnumber(res, "null_frac");
11355 i_avg_width = PQfnumber(res, "avg_width");
11356 i_n_distinct = PQfnumber(res, "n_distinct");
11357 i_most_common_vals = PQfnumber(res, "most_common_vals");
11358 i_most_common_freqs = PQfnumber(res, "most_common_freqs");
11359 i_histogram_bounds = PQfnumber(res, "histogram_bounds");
11360 i_correlation = PQfnumber(res, "correlation");
11361 i_most_common_elems = PQfnumber(res, "most_common_elems");
11362 i_most_common_elem_freqs = PQfnumber(res, "most_common_elem_freqs");
11363 i_elem_count_histogram = PQfnumber(res, "elem_count_histogram");
11364 i_range_length_histogram = PQfnumber(res, "range_length_histogram");
11365 i_range_empty_frac = PQfnumber(res, "range_empty_frac");
11366 i_range_bounds_histogram = PQfnumber(res, "range_bounds_histogram");
11367
11368 /* restore attribute stats */
11369 for (; rownum < PQntuples(res); rownum++)
11370 {
11371 const char *attname;
11372
11373 /* Stop if the next stat row in our cache isn't for this relation. */
11374 if (strcmp(te->tag, PQgetvalue(res, rownum, i_tablename)) != 0 ||
11375 strcmp(te->namespace, PQgetvalue(res, rownum, i_schemaname)) != 0)
11376 break;
11377
11378 appendPQExpBufferStr(out, "SELECT * FROM pg_catalog.pg_restore_attribute_stats(\n");
11379 appendPQExpBuffer(out, "\t'version', '%d'::integer,\n",
11381 appendPQExpBufferStr(out, "\t'schemaname', ");
11382 appendStringLiteralAH(out, rsinfo->dobj.namespace->dobj.name, fout);
11383 appendPQExpBufferStr(out, ",\n\t'relname', ");
11384 appendStringLiteralAH(out, rsinfo->dobj.name, fout);
11385
11386 if (PQgetisnull(res, rownum, i_attname))
11387 pg_fatal("unexpected null attname");
11388 attname = PQgetvalue(res, rownum, i_attname);
11389
11390 /*
11391 * Indexes look up attname in indAttNames to derive attnum, all others
11392 * use attname directly. We must specify attnum for indexes, since
11393 * their attnames are not necessarily stable across dump/reload.
11394 */
11395 if (rsinfo->nindAttNames == 0)
11396 {
11397 appendPQExpBufferStr(out, ",\n\t'attname', ");
11399 }
11400 else
11401 {
11402 bool found = false;
11403
11404 for (int i = 0; i < rsinfo->nindAttNames; i++)
11405 {
11406 if (strcmp(attname, rsinfo->indAttNames[i]) == 0)
11407 {
11408 appendPQExpBuffer(out, ",\n\t'attnum', '%d'::smallint",
11409 i + 1);
11410 found = true;
11411 break;
11412 }
11413 }
11414
11415 if (!found)
11416 pg_fatal("could not find index attname \"%s\"", attname);
11417 }
11418
11419 if (!PQgetisnull(res, rownum, i_inherited))
11420 appendNamedArgument(out, fout, "inherited", "boolean",
11421 PQgetvalue(res, rownum, i_inherited));
11422 if (!PQgetisnull(res, rownum, i_null_frac))
11423 appendNamedArgument(out, fout, "null_frac", "real",
11424 PQgetvalue(res, rownum, i_null_frac));
11425 if (!PQgetisnull(res, rownum, i_avg_width))
11426 appendNamedArgument(out, fout, "avg_width", "integer",
11427 PQgetvalue(res, rownum, i_avg_width));
11428 if (!PQgetisnull(res, rownum, i_n_distinct))
11429 appendNamedArgument(out, fout, "n_distinct", "real",
11430 PQgetvalue(res, rownum, i_n_distinct));
11431 if (!PQgetisnull(res, rownum, i_most_common_vals))
11432 appendNamedArgument(out, fout, "most_common_vals", "text",
11433 PQgetvalue(res, rownum, i_most_common_vals));
11434 if (!PQgetisnull(res, rownum, i_most_common_freqs))
11435 appendNamedArgument(out, fout, "most_common_freqs", "real[]",
11436 PQgetvalue(res, rownum, i_most_common_freqs));
11437 if (!PQgetisnull(res, rownum, i_histogram_bounds))
11438 appendNamedArgument(out, fout, "histogram_bounds", "text",
11439 PQgetvalue(res, rownum, i_histogram_bounds));
11440 if (!PQgetisnull(res, rownum, i_correlation))
11441 appendNamedArgument(out, fout, "correlation", "real",
11442 PQgetvalue(res, rownum, i_correlation));
11443 if (!PQgetisnull(res, rownum, i_most_common_elems))
11444 appendNamedArgument(out, fout, "most_common_elems", "text",
11445 PQgetvalue(res, rownum, i_most_common_elems));
11446 if (!PQgetisnull(res, rownum, i_most_common_elem_freqs))
11447 appendNamedArgument(out, fout, "most_common_elem_freqs", "real[]",
11448 PQgetvalue(res, rownum, i_most_common_elem_freqs));
11449 if (!PQgetisnull(res, rownum, i_elem_count_histogram))
11450 appendNamedArgument(out, fout, "elem_count_histogram", "real[]",
11451 PQgetvalue(res, rownum, i_elem_count_histogram));
11452 if (fout->remoteVersion >= 170000)
11453 {
11454 if (!PQgetisnull(res, rownum, i_range_length_histogram))
11455 appendNamedArgument(out, fout, "range_length_histogram", "text",
11456 PQgetvalue(res, rownum, i_range_length_histogram));
11457 if (!PQgetisnull(res, rownum, i_range_empty_frac))
11458 appendNamedArgument(out, fout, "range_empty_frac", "real",
11459 PQgetvalue(res, rownum, i_range_empty_frac));
11460 if (!PQgetisnull(res, rownum, i_range_bounds_histogram))
11461 appendNamedArgument(out, fout, "range_bounds_histogram", "text",
11462 PQgetvalue(res, rownum, i_range_bounds_histogram));
11463 }
11464 appendPQExpBufferStr(out, "\n);\n");
11465 }
11466
11467 destroyPQExpBuffer(query);
11468 return out->data;
11469}
11470
11471/*
11472 * dumpRelationStats --
11473 *
11474 * Make an ArchiveEntry for the relation statistics. The Archiver will take
11475 * care of gathering the statistics and generating the restore commands when
11476 * they are needed.
11477 */
11478static void
11480{
11481 const DumpableObject *dobj = &rsinfo->dobj;
11482
11483 /* nothing to do if we are not dumping statistics */
11484 if (!fout->dopt->dumpStatistics)
11485 return;
11486
11488 ARCHIVE_OPTS(.tag = dobj->name,
11489 .namespace = dobj->namespace->dobj.name,
11490 .description = "STATISTICS DATA",
11491 .section = rsinfo->section,
11492 .defnFn = dumpRelationStats_dumper,
11493 .defnArg = rsinfo,
11494 .deps = dobj->dependencies,
11495 .nDeps = dobj->nDeps));
11496}
11497
11498/*
11499 * dumpTableComment --
11500 *
11501 * As above, but dump comments for both the specified table (or view)
11502 * and its columns.
11503 */
11504static void
11506 const char *reltypename)
11507{
11508 DumpOptions *dopt = fout->dopt;
11510 int ncomments;
11511 PQExpBuffer query;
11512 PQExpBuffer tag;
11513
11514 /* do nothing, if --no-comments is supplied */
11515 if (dopt->no_comments)
11516 return;
11517
11518 /* Comments are SCHEMA not data */
11519 if (!dopt->dumpSchema)
11520 return;
11521
11522 /* Search for comments associated with relation, using table */
11523 ncomments = findComments(tbinfo->dobj.catId.tableoid,
11524 tbinfo->dobj.catId.oid,
11525 &comments);
11526
11527 /* If comments exist, build COMMENT ON statements */
11528 if (ncomments <= 0)
11529 return;
11530
11531 query = createPQExpBuffer();
11532 tag = createPQExpBuffer();
11533
11534 while (ncomments > 0)
11535 {
11536 const char *descr = comments->descr;
11537 int objsubid = comments->objsubid;
11538
11539 if (objsubid == 0)
11540 {
11541 resetPQExpBuffer(tag);
11542 appendPQExpBuffer(tag, "%s %s", reltypename,
11543 fmtId(tbinfo->dobj.name));
11544
11545 resetPQExpBuffer(query);
11546 appendPQExpBuffer(query, "COMMENT ON %s %s IS ", reltypename,
11548 appendStringLiteralAH(query, descr, fout);
11549 appendPQExpBufferStr(query, ";\n");
11550
11552 ARCHIVE_OPTS(.tag = tag->data,
11553 .namespace = tbinfo->dobj.namespace->dobj.name,
11554 .owner = tbinfo->rolname,
11555 .description = "COMMENT",
11556 .section = SECTION_NONE,
11557 .createStmt = query->data,
11558 .deps = &(tbinfo->dobj.dumpId),
11559 .nDeps = 1));
11560 }
11561 else if (objsubid > 0 && objsubid <= tbinfo->numatts)
11562 {
11563 resetPQExpBuffer(tag);
11564 appendPQExpBuffer(tag, "COLUMN %s.",
11565 fmtId(tbinfo->dobj.name));
11566 appendPQExpBufferStr(tag, fmtId(tbinfo->attnames[objsubid - 1]));
11567
11568 resetPQExpBuffer(query);
11569 appendPQExpBuffer(query, "COMMENT ON COLUMN %s.",
11571 appendPQExpBuffer(query, "%s IS ",
11572 fmtId(tbinfo->attnames[objsubid - 1]));
11573 appendStringLiteralAH(query, descr, fout);
11574 appendPQExpBufferStr(query, ";\n");
11575
11577 ARCHIVE_OPTS(.tag = tag->data,
11578 .namespace = tbinfo->dobj.namespace->dobj.name,
11579 .owner = tbinfo->rolname,
11580 .description = "COMMENT",
11581 .section = SECTION_NONE,
11582 .createStmt = query->data,
11583 .deps = &(tbinfo->dobj.dumpId),
11584 .nDeps = 1));
11585 }
11586
11587 comments++;
11588 ncomments--;
11589 }
11590
11591 destroyPQExpBuffer(query);
11592 destroyPQExpBuffer(tag);
11593}
11594
11595/*
11596 * findComments --
11597 *
11598 * Find the comment(s), if any, associated with the given object. All the
11599 * objsubid values associated with the given classoid/objoid are found with
11600 * one search.
11601 */
11602static int
11604{
11606 CommentItem *low;
11607 CommentItem *high;
11608 int nmatch;
11609
11610 /*
11611 * Do binary search to find some item matching the object.
11612 */
11613 low = &comments[0];
11614 high = &comments[ncomments - 1];
11615 while (low <= high)
11616 {
11617 middle = low + (high - low) / 2;
11618
11619 if (classoid < middle->classoid)
11620 high = middle - 1;
11621 else if (classoid > middle->classoid)
11622 low = middle + 1;
11623 else if (objoid < middle->objoid)
11624 high = middle - 1;
11625 else if (objoid > middle->objoid)
11626 low = middle + 1;
11627 else
11628 break; /* found a match */
11629 }
11630
11631 if (low > high) /* no matches */
11632 {
11633 *items = NULL;
11634 return 0;
11635 }
11636
11637 /*
11638 * Now determine how many items match the object. The search loop
11639 * invariant still holds: only items between low and high inclusive could
11640 * match.
11641 */
11642 nmatch = 1;
11643 while (middle > low)
11644 {
11645 if (classoid != middle[-1].classoid ||
11646 objoid != middle[-1].objoid)
11647 break;
11648 middle--;
11649 nmatch++;
11650 }
11651
11652 *items = middle;
11653
11654 middle += nmatch;
11655 while (middle <= high)
11656 {
11657 if (classoid != middle->classoid ||
11658 objoid != middle->objoid)
11659 break;
11660 middle++;
11661 nmatch++;
11662 }
11663
11664 return nmatch;
11665}
11666
11667/*
11668 * collectComments --
11669 *
11670 * Construct a table of all comments available for database objects;
11671 * also set the has-comment component flag for each relevant object.
11672 *
11673 * We used to do per-object queries for the comments, but it's much faster
11674 * to pull them all over at once, and on most databases the memory cost
11675 * isn't high.
11676 *
11677 * The table is sorted by classoid/objid/objsubid for speed in lookup.
11678 */
11679static void
11681{
11682 PGresult *res;
11683 PQExpBuffer query;
11684 int i_description;
11685 int i_classoid;
11686 int i_objoid;
11687 int i_objsubid;
11688 int ntups;
11689 int i;
11690 DumpableObject *dobj;
11691
11692 query = createPQExpBuffer();
11693
11694 appendPQExpBufferStr(query, "SELECT description, classoid, objoid, objsubid "
11695 "FROM pg_catalog.pg_description "
11696 "ORDER BY classoid, objoid, objsubid");
11697
11698 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
11699
11700 /* Construct lookup table containing OIDs in numeric form */
11701
11702 i_description = PQfnumber(res, "description");
11703 i_classoid = PQfnumber(res, "classoid");
11704 i_objoid = PQfnumber(res, "objoid");
11705 i_objsubid = PQfnumber(res, "objsubid");
11706
11707 ntups = PQntuples(res);
11708
11710 ncomments = 0;
11711 dobj = NULL;
11712
11713 for (i = 0; i < ntups; i++)
11714 {
11715 CatalogId objId;
11716 int subid;
11717
11718 objId.tableoid = atooid(PQgetvalue(res, i, i_classoid));
11719 objId.oid = atooid(PQgetvalue(res, i, i_objoid));
11720 subid = atoi(PQgetvalue(res, i, i_objsubid));
11721
11722 /* We needn't remember comments that don't match any dumpable object */
11723 if (dobj == NULL ||
11724 dobj->catId.tableoid != objId.tableoid ||
11725 dobj->catId.oid != objId.oid)
11726 dobj = findObjectByCatalogId(objId);
11727 if (dobj == NULL)
11728 continue;
11729
11730 /*
11731 * Comments on columns of composite types are linked to the type's
11732 * pg_class entry, but we need to set the DUMP_COMPONENT_COMMENT flag
11733 * in the type's own DumpableObject.
11734 */
11735 if (subid != 0 && dobj->objType == DO_TABLE &&
11736 ((TableInfo *) dobj)->relkind == RELKIND_COMPOSITE_TYPE)
11737 {
11739
11740 cTypeInfo = findTypeByOid(((TableInfo *) dobj)->reltype);
11741 if (cTypeInfo)
11742 cTypeInfo->dobj.components |= DUMP_COMPONENT_COMMENT;
11743 }
11744 else
11745 dobj->components |= DUMP_COMPONENT_COMMENT;
11746
11749 comments[ncomments].objoid = objId.oid;
11750 comments[ncomments].objsubid = subid;
11751 ncomments++;
11752 }
11753
11754 PQclear(res);
11755 destroyPQExpBuffer(query);
11756}
11757
11758/*
11759 * dumpDumpableObject
11760 *
11761 * This routine and its subsidiaries are responsible for creating
11762 * ArchiveEntries (TOC objects) for each object to be dumped.
11763 */
11764static void
11766{
11767 /*
11768 * Clear any dump-request bits for components that don't exist for this
11769 * object. (This makes it safe to initially use DUMP_COMPONENT_ALL as the
11770 * request for every kind of object.)
11771 */
11772 dobj->dump &= dobj->components;
11773
11774 /* Now, short-circuit if there's nothing to be done here. */
11775 if (dobj->dump == 0)
11776 return;
11777
11778 switch (dobj->objType)
11779 {
11780 case DO_NAMESPACE:
11781 dumpNamespace(fout, (const NamespaceInfo *) dobj);
11782 break;
11783 case DO_EXTENSION:
11784 dumpExtension(fout, (const ExtensionInfo *) dobj);
11785 break;
11786 case DO_TYPE:
11787 dumpType(fout, (const TypeInfo *) dobj);
11788 break;
11789 case DO_SHELL_TYPE:
11790 dumpShellType(fout, (const ShellTypeInfo *) dobj);
11791 break;
11792 case DO_FUNC:
11793 dumpFunc(fout, (const FuncInfo *) dobj);
11794 break;
11795 case DO_AGG:
11796 dumpAgg(fout, (const AggInfo *) dobj);
11797 break;
11798 case DO_OPERATOR:
11799 dumpOpr(fout, (const OprInfo *) dobj);
11800 break;
11801 case DO_ACCESS_METHOD:
11802 dumpAccessMethod(fout, (const AccessMethodInfo *) dobj);
11803 break;
11804 case DO_OPCLASS:
11805 dumpOpclass(fout, (const OpclassInfo *) dobj);
11806 break;
11807 case DO_OPFAMILY:
11808 dumpOpfamily(fout, (const OpfamilyInfo *) dobj);
11809 break;
11810 case DO_COLLATION:
11811 dumpCollation(fout, (const CollInfo *) dobj);
11812 break;
11813 case DO_CONVERSION:
11814 dumpConversion(fout, (const ConvInfo *) dobj);
11815 break;
11816 case DO_TABLE:
11817 dumpTable(fout, (const TableInfo *) dobj);
11818 break;
11819 case DO_TABLE_ATTACH:
11820 dumpTableAttach(fout, (const TableAttachInfo *) dobj);
11821 break;
11822 case DO_ATTRDEF:
11823 dumpAttrDef(fout, (const AttrDefInfo *) dobj);
11824 break;
11825 case DO_INDEX:
11826 dumpIndex(fout, (const IndxInfo *) dobj);
11827 break;
11828 case DO_INDEX_ATTACH:
11829 dumpIndexAttach(fout, (const IndexAttachInfo *) dobj);
11830 break;
11831 case DO_STATSEXT:
11832 dumpStatisticsExt(fout, (const StatsExtInfo *) dobj);
11833 dumpStatisticsExtStats(fout, (const StatsExtInfo *) dobj);
11834 break;
11835 case DO_REFRESH_MATVIEW:
11836 refreshMatViewData(fout, (const TableDataInfo *) dobj);
11837 break;
11838 case DO_RULE:
11839 dumpRule(fout, (const RuleInfo *) dobj);
11840 break;
11841 case DO_TRIGGER:
11842 dumpTrigger(fout, (const TriggerInfo *) dobj);
11843 break;
11844 case DO_EVENT_TRIGGER:
11845 dumpEventTrigger(fout, (const EventTriggerInfo *) dobj);
11846 break;
11847 case DO_CONSTRAINT:
11848 dumpConstraint(fout, (const ConstraintInfo *) dobj);
11849 break;
11850 case DO_FK_CONSTRAINT:
11851 dumpConstraint(fout, (const ConstraintInfo *) dobj);
11852 break;
11853 case DO_PROCLANG:
11854 dumpProcLang(fout, (const ProcLangInfo *) dobj);
11855 break;
11856 case DO_CAST:
11857 dumpCast(fout, (const CastInfo *) dobj);
11858 break;
11859 case DO_TRANSFORM:
11860 dumpTransform(fout, (const TransformInfo *) dobj);
11861 break;
11862 case DO_SEQUENCE_SET:
11863 dumpSequenceData(fout, (const TableDataInfo *) dobj);
11864 break;
11865 case DO_TABLE_DATA:
11866 dumpTableData(fout, (const TableDataInfo *) dobj);
11867 break;
11868 case DO_DUMMY_TYPE:
11869 /* table rowtypes and array types are never dumped separately */
11870 break;
11871 case DO_TSPARSER:
11872 dumpTSParser(fout, (const TSParserInfo *) dobj);
11873 break;
11874 case DO_TSDICT:
11875 dumpTSDictionary(fout, (const TSDictInfo *) dobj);
11876 break;
11877 case DO_TSTEMPLATE:
11878 dumpTSTemplate(fout, (const TSTemplateInfo *) dobj);
11879 break;
11880 case DO_TSCONFIG:
11881 dumpTSConfig(fout, (const TSConfigInfo *) dobj);
11882 break;
11883 case DO_FDW:
11884 dumpForeignDataWrapper(fout, (const FdwInfo *) dobj);
11885 break;
11886 case DO_FOREIGN_SERVER:
11887 dumpForeignServer(fout, (const ForeignServerInfo *) dobj);
11888 break;
11889 case DO_DEFAULT_ACL:
11890 dumpDefaultACL(fout, (const DefaultACLInfo *) dobj);
11891 break;
11892 case DO_LARGE_OBJECT:
11893 dumpLO(fout, (const LoInfo *) dobj);
11894 break;
11896 if (dobj->dump & DUMP_COMPONENT_DATA)
11897 {
11898 LoInfo *loinfo;
11899 TocEntry *te;
11900
11901 loinfo = (LoInfo *) findObjectByDumpId(dobj->dependencies[0]);
11902 if (loinfo == NULL)
11903 pg_fatal("missing metadata for large objects \"%s\"",
11904 dobj->name);
11905
11906 te = ArchiveEntry(fout, dobj->catId, dobj->dumpId,
11907 ARCHIVE_OPTS(.tag = dobj->name,
11908 .owner = loinfo->rolname,
11909 .description = "BLOBS",
11910 .section = SECTION_DATA,
11911 .deps = dobj->dependencies,
11912 .nDeps = dobj->nDeps,
11913 .dumpFn = dumpLOs,
11914 .dumpArg = loinfo));
11915
11916 /*
11917 * Set the TocEntry's dataLength in case we are doing a
11918 * parallel dump and want to order dump jobs by table size.
11919 * (We need some size estimate for every TocEntry with a
11920 * DataDumper function.) We don't currently have any cheap
11921 * way to estimate the size of LOs, but fortunately it doesn't
11922 * matter too much as long as we get large batches of LOs
11923 * processed reasonably early. Assume 8K per blob.
11924 */
11925 te->dataLength = loinfo->numlos * (pgoff_t) 8192;
11926 }
11927 break;
11928 case DO_POLICY:
11929 dumpPolicy(fout, (const PolicyInfo *) dobj);
11930 break;
11931 case DO_PUBLICATION:
11932 dumpPublication(fout, (const PublicationInfo *) dobj);
11933 break;
11934 case DO_PUBLICATION_REL:
11936 break;
11939 (const PublicationSchemaInfo *) dobj);
11940 break;
11941 case DO_SUBSCRIPTION:
11942 dumpSubscription(fout, (const SubscriptionInfo *) dobj);
11943 break;
11945 dumpSubscriptionTable(fout, (const SubRelInfo *) dobj);
11946 break;
11947 case DO_REL_STATS:
11948 dumpRelationStats(fout, (const RelStatsInfo *) dobj);
11949 break;
11952 /* never dumped, nothing to do */
11953 break;
11954 }
11955}
11956
11957/*
11958 * dumpNamespace
11959 * writes out to fout the queries to recreate a user-defined namespace
11960 */
11961static void
11963{
11964 DumpOptions *dopt = fout->dopt;
11965 PQExpBuffer q;
11967 char *qnspname;
11968
11969 /* Do nothing if not dumping schema */
11970 if (!dopt->dumpSchema)
11971 return;
11972
11973 q = createPQExpBuffer();
11975
11976 qnspname = pg_strdup(fmtId(nspinfo->dobj.name));
11977
11978 if (nspinfo->create)
11979 {
11980 appendPQExpBuffer(delq, "DROP SCHEMA %s;\n", qnspname);
11981 appendPQExpBuffer(q, "CREATE SCHEMA %s;\n", qnspname);
11982 }
11983 else
11984 {
11985 /* see selectDumpableNamespace() */
11987 "-- *not* dropping schema, since initdb creates it\n");
11989 "-- *not* creating schema, since initdb creates it\n");
11990 }
11991
11992 if (dopt->binary_upgrade)
11994 "SCHEMA", qnspname, NULL);
11995
11996 if (nspinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
11997 ArchiveEntry(fout, nspinfo->dobj.catId, nspinfo->dobj.dumpId,
11998 ARCHIVE_OPTS(.tag = nspinfo->dobj.name,
11999 .owner = nspinfo->rolname,
12000 .description = "SCHEMA",
12001 .section = SECTION_PRE_DATA,
12002 .createStmt = q->data,
12003 .dropStmt = delq->data));
12004
12005 /* Dump Schema Comments and Security Labels */
12006 if (nspinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12007 {
12008 const char *initdb_comment = NULL;
12009
12010 if (!nspinfo->create && strcmp(qnspname, "public") == 0)
12011 initdb_comment = "standard public schema";
12013 NULL, nspinfo->rolname,
12014 nspinfo->dobj.catId, 0, nspinfo->dobj.dumpId,
12016 }
12017
12018 if (nspinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12019 dumpSecLabel(fout, "SCHEMA", qnspname,
12020 NULL, nspinfo->rolname,
12021 nspinfo->dobj.catId, 0, nspinfo->dobj.dumpId);
12022
12023 if (nspinfo->dobj.dump & DUMP_COMPONENT_ACL)
12024 dumpACL(fout, nspinfo->dobj.dumpId, InvalidDumpId, "SCHEMA",
12025 qnspname, NULL, NULL,
12026 NULL, nspinfo->rolname, &nspinfo->dacl);
12027
12028 free(qnspname);
12029
12032}
12033
12034/*
12035 * dumpExtension
12036 * writes out to fout the queries to recreate an extension
12037 */
12038static void
12040{
12041 DumpOptions *dopt = fout->dopt;
12042 PQExpBuffer q;
12044 char *qextname;
12045
12046 /* Do nothing if not dumping schema */
12047 if (!dopt->dumpSchema)
12048 return;
12049
12050 q = createPQExpBuffer();
12052
12053 qextname = pg_strdup(fmtId(extinfo->dobj.name));
12054
12055 appendPQExpBuffer(delq, "DROP EXTENSION %s;\n", qextname);
12056
12057 if (!dopt->binary_upgrade)
12058 {
12059 /*
12060 * In a regular dump, we simply create the extension, intentionally
12061 * not specifying a version, so that the destination installation's
12062 * default version is used.
12063 *
12064 * Use of IF NOT EXISTS here is unlike our behavior for other object
12065 * types; but there are various scenarios in which it's convenient to
12066 * manually create the desired extension before restoring, so we
12067 * prefer to allow it to exist already.
12068 */
12069 appendPQExpBuffer(q, "CREATE EXTENSION IF NOT EXISTS %s WITH SCHEMA %s;\n",
12070 qextname, fmtId(extinfo->namespace));
12071 }
12072 else
12073 {
12074 /*
12075 * In binary-upgrade mode, it's critical to reproduce the state of the
12076 * database exactly, so our procedure is to create an empty extension,
12077 * restore all the contained objects normally, and add them to the
12078 * extension one by one. This function performs just the first of
12079 * those steps. binary_upgrade_extension_member() takes care of
12080 * adding member objects as they're created.
12081 */
12082 int i;
12083 int n;
12084
12085 appendPQExpBufferStr(q, "-- For binary upgrade, create an empty extension and insert objects into it\n");
12086
12087 /*
12088 * We unconditionally create the extension, so we must drop it if it
12089 * exists. This could happen if the user deleted 'plpgsql' and then
12090 * readded it, causing its oid to be greater than g_last_builtin_oid.
12091 */
12092 appendPQExpBuffer(q, "DROP EXTENSION IF EXISTS %s;\n", qextname);
12093
12095 "SELECT pg_catalog.binary_upgrade_create_empty_extension(");
12096 appendStringLiteralAH(q, extinfo->dobj.name, fout);
12097 appendPQExpBufferStr(q, ", ");
12098 appendStringLiteralAH(q, extinfo->namespace, fout);
12099 appendPQExpBufferStr(q, ", ");
12100 appendPQExpBuffer(q, "%s, ", extinfo->relocatable ? "true" : "false");
12101 appendStringLiteralAH(q, extinfo->extversion, fout);
12102 appendPQExpBufferStr(q, ", ");
12103
12104 /*
12105 * Note that we're pushing extconfig (an OID array) back into
12106 * pg_extension exactly as-is. This is OK because pg_class OIDs are
12107 * preserved in binary upgrade.
12108 */
12109 if (strlen(extinfo->extconfig) > 2)
12110 appendStringLiteralAH(q, extinfo->extconfig, fout);
12111 else
12112 appendPQExpBufferStr(q, "NULL");
12113 appendPQExpBufferStr(q, ", ");
12114 if (strlen(extinfo->extcondition) > 2)
12115 appendStringLiteralAH(q, extinfo->extcondition, fout);
12116 else
12117 appendPQExpBufferStr(q, "NULL");
12118 appendPQExpBufferStr(q, ", ");
12119 appendPQExpBufferStr(q, "ARRAY[");
12120 n = 0;
12121 for (i = 0; i < extinfo->dobj.nDeps; i++)
12122 {
12124
12125 extobj = findObjectByDumpId(extinfo->dobj.dependencies[i]);
12126 if (extobj && extobj->objType == DO_EXTENSION)
12127 {
12128 if (n++ > 0)
12129 appendPQExpBufferChar(q, ',');
12131 }
12132 }
12133 appendPQExpBufferStr(q, "]::pg_catalog.text[]");
12134 appendPQExpBufferStr(q, ");\n");
12135 }
12136
12137 if (extinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12138 ArchiveEntry(fout, extinfo->dobj.catId, extinfo->dobj.dumpId,
12139 ARCHIVE_OPTS(.tag = extinfo->dobj.name,
12140 .description = "EXTENSION",
12141 .section = SECTION_PRE_DATA,
12142 .createStmt = q->data,
12143 .dropStmt = delq->data));
12144
12145 /* Dump Extension Comments */
12146 if (extinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12147 dumpComment(fout, "EXTENSION", qextname,
12148 NULL, "",
12149 extinfo->dobj.catId, 0, extinfo->dobj.dumpId);
12150
12151 free(qextname);
12152
12155}
12156
12157/*
12158 * dumpType
12159 * writes out to fout the queries to recreate a user-defined type
12160 */
12161static void
12163{
12164 DumpOptions *dopt = fout->dopt;
12165
12166 /* Do nothing if not dumping schema */
12167 if (!dopt->dumpSchema)
12168 return;
12169
12170 /* Dump out in proper style */
12171 if (tyinfo->typtype == TYPTYPE_BASE)
12173 else if (tyinfo->typtype == TYPTYPE_DOMAIN)
12175 else if (tyinfo->typtype == TYPTYPE_COMPOSITE)
12177 else if (tyinfo->typtype == TYPTYPE_ENUM)
12179 else if (tyinfo->typtype == TYPTYPE_RANGE)
12181 else if (tyinfo->typtype == TYPTYPE_PSEUDO && !tyinfo->isDefined)
12183 else
12184 pg_log_warning("typtype of data type \"%s\" appears to be invalid",
12185 tyinfo->dobj.name);
12186}
12187
12188/*
12189 * dumpEnumType
12190 * writes out to fout the queries to recreate a user-defined enum type
12191 */
12192static void
12194{
12195 DumpOptions *dopt = fout->dopt;
12199 PGresult *res;
12200 int num,
12201 i;
12202 Oid enum_oid;
12203 char *qtypname;
12204 char *qualtypname;
12205 char *label;
12206 int i_enumlabel;
12207 int i_oid;
12208
12210 {
12211 /* Set up query for enum-specific details */
12213 "PREPARE dumpEnumType(pg_catalog.oid) AS\n"
12214 "SELECT oid, enumlabel "
12215 "FROM pg_catalog.pg_enum "
12216 "WHERE enumtypid = $1 "
12217 "ORDER BY enumsortorder");
12218
12219 ExecuteSqlStatement(fout, query->data);
12220
12222 }
12223
12224 printfPQExpBuffer(query,
12225 "EXECUTE dumpEnumType('%u')",
12226 tyinfo->dobj.catId.oid);
12227
12228 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
12229
12230 num = PQntuples(res);
12231
12232 qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12234
12235 /*
12236 * CASCADE shouldn't be required here as for normal types since the I/O
12237 * functions are generic and do not get dropped.
12238 */
12239 appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
12240
12241 if (dopt->binary_upgrade)
12243 tyinfo->dobj.catId.oid,
12244 false, false);
12245
12246 appendPQExpBuffer(q, "CREATE TYPE %s AS ENUM (",
12247 qualtypname);
12248
12249 if (!dopt->binary_upgrade)
12250 {
12251 i_enumlabel = PQfnumber(res, "enumlabel");
12252
12253 /* Labels with server-assigned oids */
12254 for (i = 0; i < num; i++)
12255 {
12256 label = PQgetvalue(res, i, i_enumlabel);
12257 if (i > 0)
12258 appendPQExpBufferChar(q, ',');
12259 appendPQExpBufferStr(q, "\n ");
12261 }
12262 }
12263
12264 appendPQExpBufferStr(q, "\n);\n");
12265
12266 if (dopt->binary_upgrade)
12267 {
12268 i_oid = PQfnumber(res, "oid");
12269 i_enumlabel = PQfnumber(res, "enumlabel");
12270
12271 /* Labels with dump-assigned (preserved) oids */
12272 for (i = 0; i < num; i++)
12273 {
12274 enum_oid = atooid(PQgetvalue(res, i, i_oid));
12275 label = PQgetvalue(res, i, i_enumlabel);
12276
12277 if (i == 0)
12278 appendPQExpBufferStr(q, "\n-- For binary upgrade, must preserve pg_enum oids\n");
12280 "SELECT pg_catalog.binary_upgrade_set_next_pg_enum_oid('%u'::pg_catalog.oid);\n",
12281 enum_oid);
12282 appendPQExpBuffer(q, "ALTER TYPE %s ADD VALUE ", qualtypname);
12284 appendPQExpBufferStr(q, ";\n\n");
12285 }
12286 }
12287
12288 if (dopt->binary_upgrade)
12290 "TYPE", qtypname,
12291 tyinfo->dobj.namespace->dobj.name);
12292
12293 if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12294 ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12295 ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12296 .namespace = tyinfo->dobj.namespace->dobj.name,
12297 .owner = tyinfo->rolname,
12298 .description = "TYPE",
12299 .section = SECTION_PRE_DATA,
12300 .createStmt = q->data,
12301 .dropStmt = delq->data));
12302
12303 /* Dump Type Comments and Security Labels */
12304 if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12305 dumpComment(fout, "TYPE", qtypname,
12306 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12307 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12308
12309 if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12310 dumpSecLabel(fout, "TYPE", qtypname,
12311 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12312 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12313
12314 if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12315 dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12316 qtypname, NULL,
12317 tyinfo->dobj.namespace->dobj.name,
12318 NULL, tyinfo->rolname, &tyinfo->dacl);
12319
12320 PQclear(res);
12323 destroyPQExpBuffer(query);
12324 free(qtypname);
12326}
12327
12328/*
12329 * dumpRangeType
12330 * writes out to fout the queries to recreate a user-defined range type
12331 */
12332static void
12334{
12335 DumpOptions *dopt = fout->dopt;
12339 PGresult *res;
12341 char *qtypname;
12342 char *qualtypname;
12343 char *procname;
12344
12346 {
12347 /* Set up query for range-specific details */
12349 "PREPARE dumpRangeType(pg_catalog.oid) AS\n");
12350
12352 "SELECT ");
12353
12354 if (fout->remoteVersion >= 140000)
12356 "pg_catalog.format_type(rngmultitypid, NULL) AS rngmultitype, ");
12357 else
12359 "NULL AS rngmultitype, ");
12360
12362 "pg_catalog.format_type(rngsubtype, NULL) AS rngsubtype, "
12363 "opc.opcname AS opcname, "
12364 "(SELECT nspname FROM pg_catalog.pg_namespace nsp "
12365 " WHERE nsp.oid = opc.opcnamespace) AS opcnsp, "
12366 "opc.opcdefault, "
12367 "CASE WHEN rngcollation = st.typcollation THEN 0 "
12368 " ELSE rngcollation END AS collation, "
12369 "rngcanonical, rngsubdiff "
12370 "FROM pg_catalog.pg_range r, pg_catalog.pg_type st, "
12371 " pg_catalog.pg_opclass opc "
12372 "WHERE st.oid = rngsubtype AND opc.oid = rngsubopc AND "
12373 "rngtypid = $1");
12374
12375 ExecuteSqlStatement(fout, query->data);
12376
12378 }
12379
12380 printfPQExpBuffer(query,
12381 "EXECUTE dumpRangeType('%u')",
12382 tyinfo->dobj.catId.oid);
12383
12384 res = ExecuteSqlQueryForSingleRow(fout, query->data);
12385
12386 qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12388
12389 /*
12390 * CASCADE shouldn't be required here as for normal types since the I/O
12391 * functions are generic and do not get dropped.
12392 */
12393 appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
12394
12395 if (dopt->binary_upgrade)
12397 tyinfo->dobj.catId.oid,
12398 false, true);
12399
12400 appendPQExpBuffer(q, "CREATE TYPE %s AS RANGE (",
12401 qualtypname);
12402
12403 appendPQExpBuffer(q, "\n subtype = %s",
12404 PQgetvalue(res, 0, PQfnumber(res, "rngsubtype")));
12405
12406 if (!PQgetisnull(res, 0, PQfnumber(res, "rngmultitype")))
12407 appendPQExpBuffer(q, ",\n multirange_type_name = %s",
12408 PQgetvalue(res, 0, PQfnumber(res, "rngmultitype")));
12409
12410 /* print subtype_opclass only if not default for subtype */
12411 if (PQgetvalue(res, 0, PQfnumber(res, "opcdefault"))[0] != 't')
12412 {
12413 char *opcname = PQgetvalue(res, 0, PQfnumber(res, "opcname"));
12414 char *nspname = PQgetvalue(res, 0, PQfnumber(res, "opcnsp"));
12415
12416 appendPQExpBuffer(q, ",\n subtype_opclass = %s.",
12417 fmtId(nspname));
12419 }
12420
12421 collationOid = atooid(PQgetvalue(res, 0, PQfnumber(res, "collation")));
12423 {
12425
12426 if (coll)
12427 appendPQExpBuffer(q, ",\n collation = %s",
12429 }
12430
12431 procname = PQgetvalue(res, 0, PQfnumber(res, "rngcanonical"));
12432 if (strcmp(procname, "-") != 0)
12433 appendPQExpBuffer(q, ",\n canonical = %s", procname);
12434
12435 procname = PQgetvalue(res, 0, PQfnumber(res, "rngsubdiff"));
12436 if (strcmp(procname, "-") != 0)
12437 appendPQExpBuffer(q, ",\n subtype_diff = %s", procname);
12438
12439 appendPQExpBufferStr(q, "\n);\n");
12440
12441 if (dopt->binary_upgrade)
12443 "TYPE", qtypname,
12444 tyinfo->dobj.namespace->dobj.name);
12445
12446 if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12447 ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12448 ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12449 .namespace = tyinfo->dobj.namespace->dobj.name,
12450 .owner = tyinfo->rolname,
12451 .description = "TYPE",
12452 .section = SECTION_PRE_DATA,
12453 .createStmt = q->data,
12454 .dropStmt = delq->data));
12455
12456 /* Dump Type Comments and Security Labels */
12457 if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12458 dumpComment(fout, "TYPE", qtypname,
12459 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12460 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12461
12462 if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12463 dumpSecLabel(fout, "TYPE", qtypname,
12464 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12465 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12466
12467 if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12468 dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12469 qtypname, NULL,
12470 tyinfo->dobj.namespace->dobj.name,
12471 NULL, tyinfo->rolname, &tyinfo->dacl);
12472
12473 PQclear(res);
12476 destroyPQExpBuffer(query);
12477 free(qtypname);
12479}
12480
12481/*
12482 * dumpUndefinedType
12483 * writes out to fout the queries to recreate a !typisdefined type
12484 *
12485 * This is a shell type, but we use different terminology to distinguish
12486 * this case from where we have to emit a shell type definition to break
12487 * circular dependencies. An undefined type shouldn't ever have anything
12488 * depending on it.
12489 */
12490static void
12492{
12493 DumpOptions *dopt = fout->dopt;
12496 char *qtypname;
12497 char *qualtypname;
12498
12499 qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12501
12502 appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
12503
12504 if (dopt->binary_upgrade)
12506 tyinfo->dobj.catId.oid,
12507 false, false);
12508
12509 appendPQExpBuffer(q, "CREATE TYPE %s;\n",
12510 qualtypname);
12511
12512 if (dopt->binary_upgrade)
12514 "TYPE", qtypname,
12515 tyinfo->dobj.namespace->dobj.name);
12516
12517 if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12518 ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12519 ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12520 .namespace = tyinfo->dobj.namespace->dobj.name,
12521 .owner = tyinfo->rolname,
12522 .description = "TYPE",
12523 .section = SECTION_PRE_DATA,
12524 .createStmt = q->data,
12525 .dropStmt = delq->data));
12526
12527 /* Dump Type Comments and Security Labels */
12528 if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12529 dumpComment(fout, "TYPE", qtypname,
12530 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12531 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12532
12533 if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12534 dumpSecLabel(fout, "TYPE", qtypname,
12535 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12536 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12537
12538 if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12539 dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12540 qtypname, NULL,
12541 tyinfo->dobj.namespace->dobj.name,
12542 NULL, tyinfo->rolname, &tyinfo->dacl);
12543
12546 free(qtypname);
12548}
12549
12550/*
12551 * dumpBaseType
12552 * writes out to fout the queries to recreate a user-defined base type
12553 */
12554static void
12556{
12557 DumpOptions *dopt = fout->dopt;
12561 PGresult *res;
12562 char *qtypname;
12563 char *qualtypname;
12564 char *typlen;
12565 char *typinput;
12566 char *typoutput;
12567 char *typreceive;
12568 char *typsend;
12569 char *typmodin;
12570 char *typmodout;
12571 char *typanalyze;
12572 char *typsubscript;
12579 char *typcategory;
12580 char *typispreferred;
12581 char *typdelim;
12582 char *typbyval;
12583 char *typalign;
12584 char *typstorage;
12585 char *typcollatable;
12586 char *typdefault;
12587 bool typdefault_is_literal = false;
12588
12590 {
12591 /* Set up query for type-specific details */
12593 "PREPARE dumpBaseType(pg_catalog.oid) AS\n"
12594 "SELECT typlen, "
12595 "typinput, typoutput, typreceive, typsend, "
12596 "typreceive::pg_catalog.oid AS typreceiveoid, "
12597 "typsend::pg_catalog.oid AS typsendoid, "
12598 "typanalyze, "
12599 "typanalyze::pg_catalog.oid AS typanalyzeoid, "
12600 "typdelim, typbyval, typalign, typstorage, "
12601 "typmodin, typmodout, "
12602 "typmodin::pg_catalog.oid AS typmodinoid, "
12603 "typmodout::pg_catalog.oid AS typmodoutoid, "
12604 "typcategory, typispreferred, "
12605 "(typcollation <> 0) AS typcollatable, "
12606 "pg_catalog.pg_get_expr(typdefaultbin, 0) AS typdefaultbin, typdefault, ");
12607
12608 if (fout->remoteVersion >= 140000)
12610 "typsubscript, "
12611 "typsubscript::pg_catalog.oid AS typsubscriptoid ");
12612 else
12614 "'-' AS typsubscript, 0 AS typsubscriptoid ");
12615
12616 appendPQExpBufferStr(query, "FROM pg_catalog.pg_type "
12617 "WHERE oid = $1");
12618
12619 ExecuteSqlStatement(fout, query->data);
12620
12622 }
12623
12624 printfPQExpBuffer(query,
12625 "EXECUTE dumpBaseType('%u')",
12626 tyinfo->dobj.catId.oid);
12627
12628 res = ExecuteSqlQueryForSingleRow(fout, query->data);
12629
12630 typlen = PQgetvalue(res, 0, PQfnumber(res, "typlen"));
12631 typinput = PQgetvalue(res, 0, PQfnumber(res, "typinput"));
12632 typoutput = PQgetvalue(res, 0, PQfnumber(res, "typoutput"));
12633 typreceive = PQgetvalue(res, 0, PQfnumber(res, "typreceive"));
12634 typsend = PQgetvalue(res, 0, PQfnumber(res, "typsend"));
12635 typmodin = PQgetvalue(res, 0, PQfnumber(res, "typmodin"));
12636 typmodout = PQgetvalue(res, 0, PQfnumber(res, "typmodout"));
12637 typanalyze = PQgetvalue(res, 0, PQfnumber(res, "typanalyze"));
12638 typsubscript = PQgetvalue(res, 0, PQfnumber(res, "typsubscript"));
12639 typreceiveoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typreceiveoid")));
12640 typsendoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typsendoid")));
12641 typmodinoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typmodinoid")));
12642 typmodoutoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typmodoutoid")));
12643 typanalyzeoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typanalyzeoid")));
12644 typsubscriptoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typsubscriptoid")));
12645 typcategory = PQgetvalue(res, 0, PQfnumber(res, "typcategory"));
12646 typispreferred = PQgetvalue(res, 0, PQfnumber(res, "typispreferred"));
12647 typdelim = PQgetvalue(res, 0, PQfnumber(res, "typdelim"));
12648 typbyval = PQgetvalue(res, 0, PQfnumber(res, "typbyval"));
12649 typalign = PQgetvalue(res, 0, PQfnumber(res, "typalign"));
12650 typstorage = PQgetvalue(res, 0, PQfnumber(res, "typstorage"));
12651 typcollatable = PQgetvalue(res, 0, PQfnumber(res, "typcollatable"));
12652 if (!PQgetisnull(res, 0, PQfnumber(res, "typdefaultbin")))
12653 typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefaultbin"));
12654 else if (!PQgetisnull(res, 0, PQfnumber(res, "typdefault")))
12655 {
12656 typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefault"));
12657 typdefault_is_literal = true; /* it needs quotes */
12658 }
12659 else
12660 typdefault = NULL;
12661
12662 qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12664
12665 /*
12666 * The reason we include CASCADE is that the circular dependency between
12667 * the type and its I/O functions makes it impossible to drop the type any
12668 * other way.
12669 */
12670 appendPQExpBuffer(delq, "DROP TYPE %s CASCADE;\n", qualtypname);
12671
12672 /*
12673 * We might already have a shell type, but setting pg_type_oid is
12674 * harmless, and in any case we'd better set the array type OID.
12675 */
12676 if (dopt->binary_upgrade)
12678 tyinfo->dobj.catId.oid,
12679 false, false);
12680
12682 "CREATE TYPE %s (\n"
12683 " INTERNALLENGTH = %s",
12685 (strcmp(typlen, "-1") == 0) ? "variable" : typlen);
12686
12687 /* regproc result is sufficiently quoted already */
12688 appendPQExpBuffer(q, ",\n INPUT = %s", typinput);
12689 appendPQExpBuffer(q, ",\n OUTPUT = %s", typoutput);
12691 appendPQExpBuffer(q, ",\n RECEIVE = %s", typreceive);
12693 appendPQExpBuffer(q, ",\n SEND = %s", typsend);
12695 appendPQExpBuffer(q, ",\n TYPMOD_IN = %s", typmodin);
12697 appendPQExpBuffer(q, ",\n TYPMOD_OUT = %s", typmodout);
12699 appendPQExpBuffer(q, ",\n ANALYZE = %s", typanalyze);
12700
12701 if (strcmp(typcollatable, "t") == 0)
12702 appendPQExpBufferStr(q, ",\n COLLATABLE = true");
12703
12704 if (typdefault != NULL)
12705 {
12706 appendPQExpBufferStr(q, ",\n DEFAULT = ");
12709 else
12711 }
12712
12714 appendPQExpBuffer(q, ",\n SUBSCRIPT = %s", typsubscript);
12715
12716 if (OidIsValid(tyinfo->typelem))
12717 appendPQExpBuffer(q, ",\n ELEMENT = %s",
12719 zeroIsError));
12720
12721 if (strcmp(typcategory, "U") != 0)
12722 {
12723 appendPQExpBufferStr(q, ",\n CATEGORY = ");
12725 }
12726
12727 if (strcmp(typispreferred, "t") == 0)
12728 appendPQExpBufferStr(q, ",\n PREFERRED = true");
12729
12730 if (typdelim && strcmp(typdelim, ",") != 0)
12731 {
12732 appendPQExpBufferStr(q, ",\n DELIMITER = ");
12733 appendStringLiteralAH(q, typdelim, fout);
12734 }
12735
12736 if (*typalign == TYPALIGN_CHAR)
12737 appendPQExpBufferStr(q, ",\n ALIGNMENT = char");
12738 else if (*typalign == TYPALIGN_SHORT)
12739 appendPQExpBufferStr(q, ",\n ALIGNMENT = int2");
12740 else if (*typalign == TYPALIGN_INT)
12741 appendPQExpBufferStr(q, ",\n ALIGNMENT = int4");
12742 else if (*typalign == TYPALIGN_DOUBLE)
12743 appendPQExpBufferStr(q, ",\n ALIGNMENT = double");
12744
12745 if (*typstorage == TYPSTORAGE_PLAIN)
12746 appendPQExpBufferStr(q, ",\n STORAGE = plain");
12747 else if (*typstorage == TYPSTORAGE_EXTERNAL)
12748 appendPQExpBufferStr(q, ",\n STORAGE = external");
12749 else if (*typstorage == TYPSTORAGE_EXTENDED)
12750 appendPQExpBufferStr(q, ",\n STORAGE = extended");
12751 else if (*typstorage == TYPSTORAGE_MAIN)
12752 appendPQExpBufferStr(q, ",\n STORAGE = main");
12753
12754 if (strcmp(typbyval, "t") == 0)
12755 appendPQExpBufferStr(q, ",\n PASSEDBYVALUE");
12756
12757 appendPQExpBufferStr(q, "\n);\n");
12758
12759 if (dopt->binary_upgrade)
12761 "TYPE", qtypname,
12762 tyinfo->dobj.namespace->dobj.name);
12763
12764 if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12765 ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12766 ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12767 .namespace = tyinfo->dobj.namespace->dobj.name,
12768 .owner = tyinfo->rolname,
12769 .description = "TYPE",
12770 .section = SECTION_PRE_DATA,
12771 .createStmt = q->data,
12772 .dropStmt = delq->data));
12773
12774 /* Dump Type Comments and Security Labels */
12775 if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12776 dumpComment(fout, "TYPE", qtypname,
12777 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12778 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12779
12780 if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12781 dumpSecLabel(fout, "TYPE", qtypname,
12782 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12783 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12784
12785 if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12786 dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12787 qtypname, NULL,
12788 tyinfo->dobj.namespace->dobj.name,
12789 NULL, tyinfo->rolname, &tyinfo->dacl);
12790
12791 PQclear(res);
12794 destroyPQExpBuffer(query);
12795 free(qtypname);
12797}
12798
12799/*
12800 * dumpDomain
12801 * writes out to fout the queries to recreate a user-defined domain
12802 */
12803static void
12805{
12806 DumpOptions *dopt = fout->dopt;
12810 PGresult *res;
12811 int i;
12812 char *qtypname;
12813 char *qualtypname;
12814 char *typnotnull;
12815 char *typdefn;
12816 char *typdefault;
12817 Oid typcollation;
12818 bool typdefault_is_literal = false;
12819
12821 {
12822 /* Set up query for domain-specific details */
12824 "PREPARE dumpDomain(pg_catalog.oid) AS\n");
12825
12826 appendPQExpBufferStr(query, "SELECT t.typnotnull, "
12827 "pg_catalog.format_type(t.typbasetype, t.typtypmod) AS typdefn, "
12828 "pg_catalog.pg_get_expr(t.typdefaultbin, 'pg_catalog.pg_type'::pg_catalog.regclass) AS typdefaultbin, "
12829 "t.typdefault, "
12830 "CASE WHEN t.typcollation <> u.typcollation "
12831 "THEN t.typcollation ELSE 0 END AS typcollation "
12832 "FROM pg_catalog.pg_type t "
12833 "LEFT JOIN pg_catalog.pg_type u ON (t.typbasetype = u.oid) "
12834 "WHERE t.oid = $1");
12835
12836 ExecuteSqlStatement(fout, query->data);
12837
12839 }
12840
12841 printfPQExpBuffer(query,
12842 "EXECUTE dumpDomain('%u')",
12843 tyinfo->dobj.catId.oid);
12844
12845 res = ExecuteSqlQueryForSingleRow(fout, query->data);
12846
12847 typnotnull = PQgetvalue(res, 0, PQfnumber(res, "typnotnull"));
12848 typdefn = PQgetvalue(res, 0, PQfnumber(res, "typdefn"));
12849 if (!PQgetisnull(res, 0, PQfnumber(res, "typdefaultbin")))
12850 typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefaultbin"));
12851 else if (!PQgetisnull(res, 0, PQfnumber(res, "typdefault")))
12852 {
12853 typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefault"));
12854 typdefault_is_literal = true; /* it needs quotes */
12855 }
12856 else
12857 typdefault = NULL;
12858 typcollation = atooid(PQgetvalue(res, 0, PQfnumber(res, "typcollation")));
12859
12860 if (dopt->binary_upgrade)
12862 tyinfo->dobj.catId.oid,
12863 true, /* force array type */
12864 false); /* force multirange type */
12865
12866 qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12868
12870 "CREATE DOMAIN %s AS %s",
12872 typdefn);
12873
12874 /* Print collation only if different from base type's collation */
12875 if (OidIsValid(typcollation))
12876 {
12877 CollInfo *coll;
12878
12879 coll = findCollationByOid(typcollation);
12880 if (coll)
12881 appendPQExpBuffer(q, " COLLATE %s", fmtQualifiedDumpable(coll));
12882 }
12883
12884 /*
12885 * Print a not-null constraint if there's one. In servers older than 17
12886 * these don't have names, so just print it unadorned; in newer ones they
12887 * do, but most of the time it's going to be the standard generated one,
12888 * so omit the name in that case also.
12889 */
12890 if (typnotnull[0] == 't')
12891 {
12892 if (fout->remoteVersion < 170000 || tyinfo->notnull == NULL)
12893 appendPQExpBufferStr(q, " NOT NULL");
12894 else
12895 {
12896 ConstraintInfo *notnull = tyinfo->notnull;
12897
12898 if (!notnull->separate)
12899 {
12900 char *default_name;
12901
12902 /* XXX should match ChooseConstraintName better */
12903 default_name = psprintf("%s_not_null", tyinfo->dobj.name);
12904
12905 if (strcmp(default_name, notnull->dobj.name) == 0)
12906 appendPQExpBufferStr(q, " NOT NULL");
12907 else
12908 appendPQExpBuffer(q, " CONSTRAINT %s %s",
12909 fmtId(notnull->dobj.name), notnull->condef);
12911 }
12912 }
12913 }
12914
12915 if (typdefault != NULL)
12916 {
12917 appendPQExpBufferStr(q, " DEFAULT ");
12920 else
12922 }
12923
12924 PQclear(res);
12925
12926 /*
12927 * Add any CHECK constraints for the domain
12928 */
12929 for (i = 0; i < tyinfo->nDomChecks; i++)
12930 {
12931 ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
12932
12933 if (!domcheck->separate && domcheck->contype == 'c')
12934 appendPQExpBuffer(q, "\n\tCONSTRAINT %s %s",
12935 fmtId(domcheck->dobj.name), domcheck->condef);
12936 }
12937
12938 appendPQExpBufferStr(q, ";\n");
12939
12940 appendPQExpBuffer(delq, "DROP DOMAIN %s;\n", qualtypname);
12941
12942 if (dopt->binary_upgrade)
12944 "DOMAIN", qtypname,
12945 tyinfo->dobj.namespace->dobj.name);
12946
12947 if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12948 ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12949 ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12950 .namespace = tyinfo->dobj.namespace->dobj.name,
12951 .owner = tyinfo->rolname,
12952 .description = "DOMAIN",
12953 .section = SECTION_PRE_DATA,
12954 .createStmt = q->data,
12955 .dropStmt = delq->data));
12956
12957 /* Dump Domain Comments and Security Labels */
12958 if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12959 dumpComment(fout, "DOMAIN", qtypname,
12960 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12961 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12962
12963 if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12964 dumpSecLabel(fout, "DOMAIN", qtypname,
12965 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12966 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12967
12968 if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12969 dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12970 qtypname, NULL,
12971 tyinfo->dobj.namespace->dobj.name,
12972 NULL, tyinfo->rolname, &tyinfo->dacl);
12973
12974 /* Dump any per-constraint comments */
12975 for (i = 0; i < tyinfo->nDomChecks; i++)
12976 {
12977 ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
12979
12980 /* but only if the constraint itself was dumped here */
12981 if (domcheck->separate)
12982 continue;
12983
12985 appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
12986 fmtId(domcheck->dobj.name));
12987
12988 if (domcheck->dobj.dump & DUMP_COMPONENT_COMMENT)
12990 tyinfo->dobj.namespace->dobj.name,
12991 tyinfo->rolname,
12992 domcheck->dobj.catId, 0, tyinfo->dobj.dumpId);
12993
12995 }
12996
12997 /*
12998 * And a comment on the not-null constraint, if there's one -- but only if
12999 * the constraint itself was dumped here
13000 */
13001 if (tyinfo->notnull != NULL && !tyinfo->notnull->separate)
13002 {
13004
13005 appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
13006 fmtId(tyinfo->notnull->dobj.name));
13007
13008 if (tyinfo->notnull->dobj.dump & DUMP_COMPONENT_COMMENT)
13010 tyinfo->dobj.namespace->dobj.name,
13011 tyinfo->rolname,
13012 tyinfo->notnull->dobj.catId, 0, tyinfo->dobj.dumpId);
13014 }
13015
13018 destroyPQExpBuffer(query);
13019 free(qtypname);
13021}
13022
13023/*
13024 * dumpCompositeType
13025 * writes out to fout the queries to recreate a user-defined stand-alone
13026 * composite type
13027 */
13028static void
13030{
13031 DumpOptions *dopt = fout->dopt;
13033 PQExpBuffer dropped = createPQExpBuffer();
13036 PGresult *res;
13037 char *qtypname;
13038 char *qualtypname;
13039 int ntups;
13040 int i_attname;
13041 int i_atttypdefn;
13042 int i_attlen;
13043 int i_attalign;
13044 int i_attisdropped;
13045 int i_attcollation;
13046 int i;
13047 int actual_atts;
13048
13050 {
13051 /*
13052 * Set up query for type-specific details.
13053 *
13054 * Since we only want to dump COLLATE clauses for attributes whose
13055 * collation is different from their type's default, we use a CASE
13056 * here to suppress uninteresting attcollations cheaply. atttypid
13057 * will be 0 for dropped columns; collation does not matter for those.
13058 */
13060 "PREPARE dumpCompositeType(pg_catalog.oid) AS\n"
13061 "SELECT a.attname, a.attnum, "
13062 "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
13063 "a.attlen, a.attalign, a.attisdropped, "
13064 "CASE WHEN a.attcollation <> at.typcollation "
13065 "THEN a.attcollation ELSE 0 END AS attcollation "
13066 "FROM pg_catalog.pg_type ct "
13067 "JOIN pg_catalog.pg_attribute a ON a.attrelid = ct.typrelid "
13068 "LEFT JOIN pg_catalog.pg_type at ON at.oid = a.atttypid "
13069 "WHERE ct.oid = $1 "
13070 "ORDER BY a.attnum");
13071
13072 ExecuteSqlStatement(fout, query->data);
13073
13075 }
13076
13077 printfPQExpBuffer(query,
13078 "EXECUTE dumpCompositeType('%u')",
13079 tyinfo->dobj.catId.oid);
13080
13081 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
13082
13083 ntups = PQntuples(res);
13084
13085 i_attname = PQfnumber(res, "attname");
13086 i_atttypdefn = PQfnumber(res, "atttypdefn");
13087 i_attlen = PQfnumber(res, "attlen");
13088 i_attalign = PQfnumber(res, "attalign");
13089 i_attisdropped = PQfnumber(res, "attisdropped");
13090 i_attcollation = PQfnumber(res, "attcollation");
13091
13092 if (dopt->binary_upgrade)
13093 {
13095 tyinfo->dobj.catId.oid,
13096 false, false);
13098 }
13099
13100 qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
13102
13103 appendPQExpBuffer(q, "CREATE TYPE %s AS (",
13104 qualtypname);
13105
13106 actual_atts = 0;
13107 for (i = 0; i < ntups; i++)
13108 {
13109 char *attname;
13110 char *atttypdefn;
13111 char *attlen;
13112 char *attalign;
13113 bool attisdropped;
13114 Oid attcollation;
13115
13116 attname = PQgetvalue(res, i, i_attname);
13118 attlen = PQgetvalue(res, i, i_attlen);
13120 attisdropped = (PQgetvalue(res, i, i_attisdropped)[0] == 't');
13121 attcollation = atooid(PQgetvalue(res, i, i_attcollation));
13122
13123 if (attisdropped && !dopt->binary_upgrade)
13124 continue;
13125
13126 /* Format properly if not first attr */
13127 if (actual_atts++ > 0)
13128 appendPQExpBufferChar(q, ',');
13129 appendPQExpBufferStr(q, "\n\t");
13130
13131 if (!attisdropped)
13132 {
13134
13135 /* Add collation if not default for the column type */
13136 if (OidIsValid(attcollation))
13137 {
13138 CollInfo *coll;
13139
13140 coll = findCollationByOid(attcollation);
13141 if (coll)
13142 appendPQExpBuffer(q, " COLLATE %s",
13144 }
13145 }
13146 else
13147 {
13148 /*
13149 * This is a dropped attribute and we're in binary_upgrade mode.
13150 * Insert a placeholder for it in the CREATE TYPE command, and set
13151 * length and alignment with direct UPDATE to the catalogs
13152 * afterwards. See similar code in dumpTableSchema().
13153 */
13154 appendPQExpBuffer(q, "%s INTEGER /* dummy */", fmtId(attname));
13155
13156 /* stash separately for insertion after the CREATE TYPE */
13157 appendPQExpBufferStr(dropped,
13158 "\n-- For binary upgrade, recreate dropped column.\n");
13159 appendPQExpBuffer(dropped, "UPDATE pg_catalog.pg_attribute\n"
13160 "SET attlen = %s, "
13161 "attalign = '%s', attbyval = false\n"
13162 "WHERE attname = ", attlen, attalign);
13164 appendPQExpBufferStr(dropped, "\n AND attrelid = ");
13166 appendPQExpBufferStr(dropped, "::pg_catalog.regclass;\n");
13167
13168 appendPQExpBuffer(dropped, "ALTER TYPE %s ",
13169 qualtypname);
13170 appendPQExpBuffer(dropped, "DROP ATTRIBUTE %s;\n",
13171 fmtId(attname));
13172 }
13173 }
13174 appendPQExpBufferStr(q, "\n);\n");
13175 appendPQExpBufferStr(q, dropped->data);
13176
13177 appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
13178
13179 if (dopt->binary_upgrade)
13181 "TYPE", qtypname,
13182 tyinfo->dobj.namespace->dobj.name);
13183
13184 if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
13185 ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
13186 ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
13187 .namespace = tyinfo->dobj.namespace->dobj.name,
13188 .owner = tyinfo->rolname,
13189 .description = "TYPE",
13190 .section = SECTION_PRE_DATA,
13191 .createStmt = q->data,
13192 .dropStmt = delq->data));
13193
13194
13195 /* Dump Type Comments and Security Labels */
13196 if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
13197 dumpComment(fout, "TYPE", qtypname,
13198 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
13199 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
13200
13201 if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
13202 dumpSecLabel(fout, "TYPE", qtypname,
13203 tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
13204 tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
13205
13206 if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
13207 dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
13208 qtypname, NULL,
13209 tyinfo->dobj.namespace->dobj.name,
13210 NULL, tyinfo->rolname, &tyinfo->dacl);
13211
13212 /* Dump any per-column comments */
13213 if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
13215
13216 PQclear(res);
13218 destroyPQExpBuffer(dropped);
13220 destroyPQExpBuffer(query);
13221 free(qtypname);
13223}
13224
13225/*
13226 * dumpCompositeTypeColComments
13227 * writes out to fout the queries to recreate comments on the columns of
13228 * a user-defined stand-alone composite type.
13229 *
13230 * The caller has already made a query to collect the names and attnums
13231 * of the type's columns, so we just pass that result into here rather
13232 * than reading them again.
13233 */
13234static void
13236 PGresult *res)
13237{
13239 int ncomments;
13240 PQExpBuffer query;
13241 PQExpBuffer target;
13242 int i;
13243 int ntups;
13244 int i_attname;
13245 int i_attnum;
13246 int i_attisdropped;
13247
13248 /* do nothing, if --no-comments is supplied */
13249 if (fout->dopt->no_comments)
13250 return;
13251
13252 /* Search for comments associated with type's pg_class OID */
13254 &comments);
13255
13256 /* If no comments exist, we're done */
13257 if (ncomments <= 0)
13258 return;
13259
13260 /* Build COMMENT ON statements */
13261 query = createPQExpBuffer();
13262 target = createPQExpBuffer();
13263
13264 ntups = PQntuples(res);
13265 i_attnum = PQfnumber(res, "attnum");
13266 i_attname = PQfnumber(res, "attname");
13267 i_attisdropped = PQfnumber(res, "attisdropped");
13268 while (ncomments > 0)
13269 {
13270 const char *attname;
13271
13272 attname = NULL;
13273 for (i = 0; i < ntups; i++)
13274 {
13275 if (atoi(PQgetvalue(res, i, i_attnum)) == comments->objsubid &&
13276 PQgetvalue(res, i, i_attisdropped)[0] != 't')
13277 {
13278 attname = PQgetvalue(res, i, i_attname);
13279 break;
13280 }
13281 }
13282 if (attname) /* just in case we don't find it */
13283 {
13284 const char *descr = comments->descr;
13285
13286 resetPQExpBuffer(target);
13287 appendPQExpBuffer(target, "COLUMN %s.",
13288 fmtId(tyinfo->dobj.name));
13290
13291 resetPQExpBuffer(query);
13292 appendPQExpBuffer(query, "COMMENT ON COLUMN %s.",
13294 appendPQExpBuffer(query, "%s IS ", fmtId(attname));
13295 appendStringLiteralAH(query, descr, fout);
13296 appendPQExpBufferStr(query, ";\n");
13297
13299 ARCHIVE_OPTS(.tag = target->data,
13300 .namespace = tyinfo->dobj.namespace->dobj.name,
13301 .owner = tyinfo->rolname,
13302 .description = "COMMENT",
13303 .section = SECTION_NONE,
13304 .createStmt = query->data,
13305 .deps = &(tyinfo->dobj.dumpId),
13306 .nDeps = 1));
13307 }
13308
13309 comments++;
13310 ncomments--;
13311 }
13312
13313 destroyPQExpBuffer(query);
13314 destroyPQExpBuffer(target);
13315}
13316
13317/*
13318 * dumpShellType
13319 * writes out to fout the queries to create a shell type
13320 *
13321 * We dump a shell definition in advance of the I/O functions for the type.
13322 */
13323static void
13325{
13326 DumpOptions *dopt = fout->dopt;
13327 PQExpBuffer q;
13328
13329 /* Do nothing if not dumping schema */
13330 if (!dopt->dumpSchema)
13331 return;
13332
13333 q = createPQExpBuffer();
13334
13335 /*
13336 * Note the lack of a DROP command for the shell type; any required DROP
13337 * is driven off the base type entry, instead. This interacts with
13338 * _printTocEntry()'s use of the presence of a DROP command to decide
13339 * whether an entry needs an ALTER OWNER command. We don't want to alter
13340 * the shell type's owner immediately on creation; that should happen only
13341 * after it's filled in, otherwise the backend complains.
13342 */
13343
13344 if (dopt->binary_upgrade)
13346 stinfo->baseType->dobj.catId.oid,
13347 false, false);
13348
13349 appendPQExpBuffer(q, "CREATE TYPE %s;\n",
13351
13352 if (stinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
13353 ArchiveEntry(fout, stinfo->dobj.catId, stinfo->dobj.dumpId,
13354 ARCHIVE_OPTS(.tag = stinfo->dobj.name,
13355 .namespace = stinfo->dobj.namespace->dobj.name,
13356 .owner = stinfo->baseType->rolname,
13357 .description = "SHELL TYPE",
13358 .section = SECTION_PRE_DATA,
13359 .createStmt = q->data));
13360
13362}
13363
13364/*
13365 * dumpProcLang
13366 * writes out to fout the queries to recreate a user-defined
13367 * procedural language
13368 */
13369static void
13371{
13372 DumpOptions *dopt = fout->dopt;
13375 bool useParams;
13376 char *qlanname;
13380
13381 /* Do nothing if not dumping schema */
13382 if (!dopt->dumpSchema)
13383 return;
13384
13385 /*
13386 * Try to find the support function(s). It is not an error if we don't
13387 * find them --- if the functions are in the pg_catalog schema, as is
13388 * standard in 8.1 and up, then we won't have loaded them. (In this case
13389 * we will emit a parameterless CREATE LANGUAGE command, which will
13390 * require PL template knowledge in the backend to reload.)
13391 */
13392
13393 funcInfo = findFuncByOid(plang->lanplcallfoid);
13394 if (funcInfo != NULL && !funcInfo->dobj.dump)
13395 funcInfo = NULL; /* treat not-dumped same as not-found */
13396
13397 if (OidIsValid(plang->laninline))
13398 {
13399 inlineInfo = findFuncByOid(plang->laninline);
13400 if (inlineInfo != NULL && !inlineInfo->dobj.dump)
13401 inlineInfo = NULL;
13402 }
13403
13404 if (OidIsValid(plang->lanvalidator))
13405 {
13406 validatorInfo = findFuncByOid(plang->lanvalidator);
13407 if (validatorInfo != NULL && !validatorInfo->dobj.dump)
13409 }
13410
13411 /*
13412 * If the functions are dumpable then emit a complete CREATE LANGUAGE with
13413 * parameters. Otherwise, we'll write a parameterless command, which will
13414 * be interpreted as CREATE EXTENSION.
13415 */
13416 useParams = (funcInfo != NULL &&
13417 (inlineInfo != NULL || !OidIsValid(plang->laninline)) &&
13418 (validatorInfo != NULL || !OidIsValid(plang->lanvalidator)));
13419
13422
13423 qlanname = pg_strdup(fmtId(plang->dobj.name));
13424
13425 appendPQExpBuffer(delqry, "DROP PROCEDURAL LANGUAGE %s;\n",
13426 qlanname);
13427
13428 if (useParams)
13429 {
13430 appendPQExpBuffer(defqry, "CREATE %sPROCEDURAL LANGUAGE %s",
13431 plang->lanpltrusted ? "TRUSTED " : "",
13432 qlanname);
13433 appendPQExpBuffer(defqry, " HANDLER %s",
13435 if (OidIsValid(plang->laninline))
13436 appendPQExpBuffer(defqry, " INLINE %s",
13438 if (OidIsValid(plang->lanvalidator))
13439 appendPQExpBuffer(defqry, " VALIDATOR %s",
13441 }
13442 else
13443 {
13444 /*
13445 * If not dumping parameters, then use CREATE OR REPLACE so that the
13446 * command will not fail if the language is preinstalled in the target
13447 * database.
13448 *
13449 * Modern servers will interpret this as CREATE EXTENSION IF NOT
13450 * EXISTS; perhaps we should emit that instead? But it might just add
13451 * confusion.
13452 */
13453 appendPQExpBuffer(defqry, "CREATE OR REPLACE PROCEDURAL LANGUAGE %s",
13454 qlanname);
13455 }
13457
13458 if (dopt->binary_upgrade)
13460 "LANGUAGE", qlanname, NULL);
13461
13462 if (plang->dobj.dump & DUMP_COMPONENT_DEFINITION)
13463 ArchiveEntry(fout, plang->dobj.catId, plang->dobj.dumpId,
13464 ARCHIVE_OPTS(.tag = plang->dobj.name,
13465 .owner = plang->lanowner,
13466 .description = "PROCEDURAL LANGUAGE",
13467 .section = SECTION_PRE_DATA,
13468 .createStmt = defqry->data,
13469 .dropStmt = delqry->data,
13470 ));
13471
13472 /* Dump Proc Lang Comments and Security Labels */
13473 if (plang->dobj.dump & DUMP_COMPONENT_COMMENT)
13474 dumpComment(fout, "LANGUAGE", qlanname,
13475 NULL, plang->lanowner,
13476 plang->dobj.catId, 0, plang->dobj.dumpId);
13477
13478 if (plang->dobj.dump & DUMP_COMPONENT_SECLABEL)
13479 dumpSecLabel(fout, "LANGUAGE", qlanname,
13480 NULL, plang->lanowner,
13481 plang->dobj.catId, 0, plang->dobj.dumpId);
13482
13483 if (plang->lanpltrusted && plang->dobj.dump & DUMP_COMPONENT_ACL)
13484 dumpACL(fout, plang->dobj.dumpId, InvalidDumpId, "LANGUAGE",
13485 qlanname, NULL, NULL,
13486 NULL, plang->lanowner, &plang->dacl);
13487
13488 free(qlanname);
13489
13492}
13493
13494/*
13495 * format_function_arguments: generate function name and argument list
13496 *
13497 * This is used when we can rely on pg_get_function_arguments to format
13498 * the argument list. Note, however, that pg_get_function_arguments
13499 * does not special-case zero-argument aggregates.
13500 */
13501static char *
13502format_function_arguments(const FuncInfo *finfo, const char *funcargs, bool is_agg)
13503{
13505
13508 if (is_agg && finfo->nargs == 0)
13509 appendPQExpBufferStr(&fn, "(*)");
13510 else
13511 appendPQExpBuffer(&fn, "(%s)", funcargs);
13512 return fn.data;
13513}
13514
13515/*
13516 * format_function_signature: generate function name and argument list
13517 *
13518 * Only a minimal list of input argument types is generated; this is
13519 * sufficient to reference the function, but not to define it.
13520 *
13521 * If honor_quotes is false then the function name is never quoted.
13522 * This is appropriate for use in TOC tags, but not in SQL commands.
13523 */
13524static char *
13526{
13528 int j;
13529
13531 if (honor_quotes)
13532 appendPQExpBuffer(&fn, "%s(", fmtId(finfo->dobj.name));
13533 else
13534 appendPQExpBuffer(&fn, "%s(", finfo->dobj.name);
13535 for (j = 0; j < finfo->nargs; j++)
13536 {
13537 if (j > 0)
13538 appendPQExpBufferStr(&fn, ", ");
13539
13542 zeroIsError));
13543 }
13545 return fn.data;
13546}
13547
13548
13549/*
13550 * dumpFunc:
13551 * dump out one function
13552 */
13553static void
13555{
13556 DumpOptions *dopt = fout->dopt;
13557 PQExpBuffer query;
13558 PQExpBuffer q;
13561 PGresult *res;
13562 char *funcsig; /* identity signature */
13563 char *funcfullsig = NULL; /* full signature */
13564 char *funcsig_tag;
13565 char *qual_funcsig;
13566 char *proretset;
13567 char *prosrc;
13568 char *probin;
13569 char *prosqlbody;
13570 char *funcargs;
13571 char *funciargs;
13572 char *funcresult;
13573 char *protrftypes;
13574 char *prokind;
13575 char *provolatile;
13576 char *proisstrict;
13577 char *prosecdef;
13578 char *proleakproof;
13579 char *proconfig;
13580 char *procost;
13581 char *prorows;
13582 char *prosupport;
13583 char *proparallel;
13584 char *lanname;
13585 char **configitems = NULL;
13586 int nconfigitems = 0;
13587 const char *keyword;
13588
13589 /* Do nothing if not dumping schema */
13590 if (!dopt->dumpSchema)
13591 return;
13592
13593 query = createPQExpBuffer();
13594 q = createPQExpBuffer();
13597
13599 {
13600 /* Set up query for function-specific details */
13602 "PREPARE dumpFunc(pg_catalog.oid) AS\n");
13603
13605 "SELECT\n"
13606 "proretset,\n"
13607 "prosrc,\n"
13608 "probin,\n"
13609 "provolatile,\n"
13610 "proisstrict,\n"
13611 "prosecdef,\n"
13612 "lanname,\n"
13613 "proconfig,\n"
13614 "procost,\n"
13615 "prorows,\n"
13616 "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs,\n"
13617 "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs,\n"
13618 "pg_catalog.pg_get_function_result(p.oid) AS funcresult,\n"
13619 "proleakproof,\n");
13620
13621 if (fout->remoteVersion >= 90500)
13623 "array_to_string(protrftypes, ' ') AS protrftypes,\n");
13624 else
13626 "NULL AS protrftypes,\n");
13627
13628 if (fout->remoteVersion >= 90600)
13630 "proparallel,\n");
13631 else
13633 "'u' AS proparallel,\n");
13634
13635 if (fout->remoteVersion >= 110000)
13637 "prokind,\n");
13638 else
13640 "CASE WHEN proiswindow THEN 'w' ELSE 'f' END AS prokind,\n");
13641
13642 if (fout->remoteVersion >= 120000)
13644 "prosupport,\n");
13645 else
13647 "'-' AS prosupport,\n");
13648
13649 if (fout->remoteVersion >= 140000)
13651 "pg_get_function_sqlbody(p.oid) AS prosqlbody\n");
13652 else
13654 "NULL AS prosqlbody\n");
13655
13657 "FROM pg_catalog.pg_proc p, pg_catalog.pg_language l\n"
13658 "WHERE p.oid = $1 "
13659 "AND l.oid = p.prolang");
13660
13661 ExecuteSqlStatement(fout, query->data);
13662
13664 }
13665
13666 printfPQExpBuffer(query,
13667 "EXECUTE dumpFunc('%u')",
13668 finfo->dobj.catId.oid);
13669
13670 res = ExecuteSqlQueryForSingleRow(fout, query->data);
13671
13672 proretset = PQgetvalue(res, 0, PQfnumber(res, "proretset"));
13673 if (PQgetisnull(res, 0, PQfnumber(res, "prosqlbody")))
13674 {
13675 prosrc = PQgetvalue(res, 0, PQfnumber(res, "prosrc"));
13676 probin = PQgetvalue(res, 0, PQfnumber(res, "probin"));
13677 prosqlbody = NULL;
13678 }
13679 else
13680 {
13681 prosrc = NULL;
13682 probin = NULL;
13683 prosqlbody = PQgetvalue(res, 0, PQfnumber(res, "prosqlbody"));
13684 }
13685 funcargs = PQgetvalue(res, 0, PQfnumber(res, "funcargs"));
13686 funciargs = PQgetvalue(res, 0, PQfnumber(res, "funciargs"));
13687 funcresult = PQgetvalue(res, 0, PQfnumber(res, "funcresult"));
13688 protrftypes = PQgetvalue(res, 0, PQfnumber(res, "protrftypes"));
13689 prokind = PQgetvalue(res, 0, PQfnumber(res, "prokind"));
13690 provolatile = PQgetvalue(res, 0, PQfnumber(res, "provolatile"));
13691 proisstrict = PQgetvalue(res, 0, PQfnumber(res, "proisstrict"));
13692 prosecdef = PQgetvalue(res, 0, PQfnumber(res, "prosecdef"));
13693 proleakproof = PQgetvalue(res, 0, PQfnumber(res, "proleakproof"));
13694 proconfig = PQgetvalue(res, 0, PQfnumber(res, "proconfig"));
13695 procost = PQgetvalue(res, 0, PQfnumber(res, "procost"));
13696 prorows = PQgetvalue(res, 0, PQfnumber(res, "prorows"));
13697 prosupport = PQgetvalue(res, 0, PQfnumber(res, "prosupport"));
13698 proparallel = PQgetvalue(res, 0, PQfnumber(res, "proparallel"));
13699 lanname = PQgetvalue(res, 0, PQfnumber(res, "lanname"));
13700
13701 /*
13702 * See backend/commands/functioncmds.c for details of how the 'AS' clause
13703 * is used.
13704 */
13705 if (prosqlbody)
13706 {
13708 }
13709 else if (probin[0] != '\0')
13710 {
13713 if (prosrc[0] != '\0')
13714 {
13716
13717 /*
13718 * where we have bin, use dollar quoting if allowed and src
13719 * contains quote or backslash; else use regular quoting.
13720 */
13721 if (dopt->disable_dollar_quoting ||
13722 (strchr(prosrc, '\'') == NULL && strchr(prosrc, '\\') == NULL))
13724 else
13726 }
13727 }
13728 else
13729 {
13731 /* with no bin, dollar quote src unconditionally if allowed */
13732 if (dopt->disable_dollar_quoting)
13734 else
13736 }
13737
13738 if (*proconfig)
13739 {
13741 pg_fatal("could not parse %s array", "proconfig");
13742 }
13743 else
13744 {
13745 configitems = NULL;
13746 nconfigitems = 0;
13747 }
13748
13751
13753
13754 qual_funcsig = psprintf("%s.%s",
13755 fmtId(finfo->dobj.namespace->dobj.name),
13756 funcsig);
13757
13758 if (prokind[0] == PROKIND_PROCEDURE)
13759 keyword = "PROCEDURE";
13760 else
13761 keyword = "FUNCTION"; /* works for window functions too */
13762
13763 appendPQExpBuffer(delqry, "DROP %s %s;\n",
13764 keyword, qual_funcsig);
13765
13766 appendPQExpBuffer(q, "CREATE %s %s.%s",
13767 keyword,
13768 fmtId(finfo->dobj.namespace->dobj.name),
13770 funcsig);
13771
13772 if (prokind[0] == PROKIND_PROCEDURE)
13773 /* no result type to output */ ;
13774 else if (funcresult)
13775 appendPQExpBuffer(q, " RETURNS %s", funcresult);
13776 else
13777 appendPQExpBuffer(q, " RETURNS %s%s",
13778 (proretset[0] == 't') ? "SETOF " : "",
13780 zeroIsError));
13781
13782 appendPQExpBuffer(q, "\n LANGUAGE %s", fmtId(lanname));
13783
13784 if (*protrftypes)
13785 {
13787 int i;
13788
13789 appendPQExpBufferStr(q, " TRANSFORM ");
13791 for (i = 0; typeids[i]; i++)
13792 {
13793 if (i != 0)
13794 appendPQExpBufferStr(q, ", ");
13795 appendPQExpBuffer(q, "FOR TYPE %s",
13797 }
13798
13799 free(typeids);
13800 }
13801
13802 if (prokind[0] == PROKIND_WINDOW)
13803 appendPQExpBufferStr(q, " WINDOW");
13804
13806 {
13808 appendPQExpBufferStr(q, " IMMUTABLE");
13809 else if (provolatile[0] == PROVOLATILE_STABLE)
13810 appendPQExpBufferStr(q, " STABLE");
13811 else if (provolatile[0] != PROVOLATILE_VOLATILE)
13812 pg_fatal("unrecognized provolatile value for function \"%s\"",
13813 finfo->dobj.name);
13814 }
13815
13816 if (proisstrict[0] == 't')
13817 appendPQExpBufferStr(q, " STRICT");
13818
13819 if (prosecdef[0] == 't')
13820 appendPQExpBufferStr(q, " SECURITY DEFINER");
13821
13822 if (proleakproof[0] == 't')
13823 appendPQExpBufferStr(q, " LEAKPROOF");
13824
13825 /*
13826 * COST and ROWS are emitted only if present and not default, so as not to
13827 * break backwards-compatibility of the dump without need. Keep this code
13828 * in sync with the defaults in functioncmds.c.
13829 */
13830 if (strcmp(procost, "0") != 0)
13831 {
13832 if (strcmp(lanname, "internal") == 0 || strcmp(lanname, "c") == 0)
13833 {
13834 /* default cost is 1 */
13835 if (strcmp(procost, "1") != 0)
13836 appendPQExpBuffer(q, " COST %s", procost);
13837 }
13838 else
13839 {
13840 /* default cost is 100 */
13841 if (strcmp(procost, "100") != 0)
13842 appendPQExpBuffer(q, " COST %s", procost);
13843 }
13844 }
13845 if (proretset[0] == 't' &&
13846 strcmp(prorows, "0") != 0 && strcmp(prorows, "1000") != 0)
13847 appendPQExpBuffer(q, " ROWS %s", prorows);
13848
13849 if (strcmp(prosupport, "-") != 0)
13850 {
13851 /* We rely on regprocout to provide quoting and qualification */
13852 appendPQExpBuffer(q, " SUPPORT %s", prosupport);
13853 }
13854
13856 {
13857 if (proparallel[0] == PROPARALLEL_SAFE)
13858 appendPQExpBufferStr(q, " PARALLEL SAFE");
13859 else if (proparallel[0] == PROPARALLEL_RESTRICTED)
13860 appendPQExpBufferStr(q, " PARALLEL RESTRICTED");
13861 else if (proparallel[0] != PROPARALLEL_UNSAFE)
13862 pg_fatal("unrecognized proparallel value for function \"%s\"",
13863 finfo->dobj.name);
13864 }
13865
13866 for (int i = 0; i < nconfigitems; i++)
13867 {
13868 /* we feel free to scribble on configitems[] here */
13869 char *configitem = configitems[i];
13870 char *pos;
13871
13872 pos = strchr(configitem, '=');
13873 if (pos == NULL)
13874 continue;
13875 *pos++ = '\0';
13876 appendPQExpBuffer(q, "\n SET %s TO ", fmtId(configitem));
13877
13878 /*
13879 * Variables that are marked GUC_LIST_QUOTE were already fully quoted
13880 * by flatten_set_variable_args() before they were put into the
13881 * proconfig array. However, because the quoting rules used there
13882 * aren't exactly like SQL's, we have to break the list value apart
13883 * and then quote the elements as string literals. (The elements may
13884 * be double-quoted as-is, but we can't just feed them to the SQL
13885 * parser; it would do the wrong thing with elements that are
13886 * zero-length or longer than NAMEDATALEN.) Also, we need a special
13887 * case for empty lists.
13888 *
13889 * Variables that are not so marked should just be emitted as simple
13890 * string literals. If the variable is not known to
13891 * variable_is_guc_list_quote(), we'll do that; this makes it unsafe
13892 * to use GUC_LIST_QUOTE for extension variables.
13893 */
13895 {
13896 char **namelist;
13897 char **nameptr;
13898
13899 /* Parse string into list of identifiers */
13900 /* this shouldn't fail really */
13901 if (SplitGUCList(pos, ',', &namelist))
13902 {
13903 /* Special case: represent an empty list as NULL */
13904 if (*namelist == NULL)
13905 appendPQExpBufferStr(q, "NULL");
13906 for (nameptr = namelist; *nameptr; nameptr++)
13907 {
13908 if (nameptr != namelist)
13909 appendPQExpBufferStr(q, ", ");
13911 }
13912 }
13914 }
13915 else
13916 appendStringLiteralAH(q, pos, fout);
13917 }
13918
13919 appendPQExpBuffer(q, "\n %s;\n", asPart->data);
13920
13922 "pg_catalog.pg_proc", keyword,
13923 qual_funcsig);
13924
13925 if (dopt->binary_upgrade)
13927 keyword, funcsig,
13928 finfo->dobj.namespace->dobj.name);
13929
13930 if (finfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
13931 ArchiveEntry(fout, finfo->dobj.catId, finfo->dobj.dumpId,
13933 .namespace = finfo->dobj.namespace->dobj.name,
13934 .owner = finfo->rolname,
13935 .description = keyword,
13936 .section = finfo->postponed_def ?
13938 .createStmt = q->data,
13939 .dropStmt = delqry->data));
13940
13941 /* Dump Function Comments and Security Labels */
13942 if (finfo->dobj.dump & DUMP_COMPONENT_COMMENT)
13943 dumpComment(fout, keyword, funcsig,
13944 finfo->dobj.namespace->dobj.name, finfo->rolname,
13945 finfo->dobj.catId, 0, finfo->dobj.dumpId);
13946
13947 if (finfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
13948 dumpSecLabel(fout, keyword, funcsig,
13949 finfo->dobj.namespace->dobj.name, finfo->rolname,
13950 finfo->dobj.catId, 0, finfo->dobj.dumpId);
13951
13952 if (finfo->dobj.dump & DUMP_COMPONENT_ACL)
13953 dumpACL(fout, finfo->dobj.dumpId, InvalidDumpId, keyword,
13954 funcsig, NULL,
13955 finfo->dobj.namespace->dobj.name,
13956 NULL, finfo->rolname, &finfo->dacl);
13957
13958 PQclear(res);
13959
13960 destroyPQExpBuffer(query);
13964 free(funcsig);
13969}
13970
13971
13972/*
13973 * Dump a user-defined cast
13974 */
13975static void
13977{
13978 DumpOptions *dopt = fout->dopt;
13984 const char *sourceType;
13985 const char *targetType;
13986
13987 /* Do nothing if not dumping schema */
13988 if (!dopt->dumpSchema)
13989 return;
13990
13991 /* Cannot dump if we don't have the cast function's info */
13992 if (OidIsValid(cast->castfunc))
13993 {
13994 funcInfo = findFuncByOid(cast->castfunc);
13995 if (funcInfo == NULL)
13996 pg_fatal("could not find function definition for function with OID %u",
13997 cast->castfunc);
13998 }
13999
14004
14007 appendPQExpBuffer(delqry, "DROP CAST (%s AS %s);\n",
14009
14010 appendPQExpBuffer(defqry, "CREATE CAST (%s AS %s) ",
14012
14013 switch (cast->castmethod)
14014 {
14016 appendPQExpBufferStr(defqry, "WITHOUT FUNCTION");
14017 break;
14019 appendPQExpBufferStr(defqry, "WITH INOUT");
14020 break;
14022 if (funcInfo)
14023 {
14025
14026 /*
14027 * Always qualify the function name (format_function_signature
14028 * won't qualify it).
14029 */
14030 appendPQExpBuffer(defqry, "WITH FUNCTION %s.%s",
14031 fmtId(funcInfo->dobj.namespace->dobj.name), fsig);
14032 free(fsig);
14033 }
14034 else
14035 pg_log_warning("bogus value in pg_cast.castfunc or pg_cast.castmethod field");
14036 break;
14037 default:
14038 pg_log_warning("bogus value in pg_cast.castmethod field");
14039 }
14040
14041 if (cast->castcontext == 'a')
14042 appendPQExpBufferStr(defqry, " AS ASSIGNMENT");
14043 else if (cast->castcontext == 'i')
14044 appendPQExpBufferStr(defqry, " AS IMPLICIT");
14046
14047 appendPQExpBuffer(labelq, "CAST (%s AS %s)",
14049
14050 appendPQExpBuffer(castargs, "(%s AS %s)",
14052
14053 if (dopt->binary_upgrade)
14055 "CAST", castargs->data, NULL);
14056
14057 if (cast->dobj.dump & DUMP_COMPONENT_DEFINITION)
14058 ArchiveEntry(fout, cast->dobj.catId, cast->dobj.dumpId,
14059 ARCHIVE_OPTS(.tag = labelq->data,
14060 .description = "CAST",
14061 .section = SECTION_PRE_DATA,
14062 .createStmt = defqry->data,
14063 .dropStmt = delqry->data));
14064
14065 /* Dump Cast Comments */
14066 if (cast->dobj.dump & DUMP_COMPONENT_COMMENT)
14067 dumpComment(fout, "CAST", castargs->data,
14068 NULL, "",
14069 cast->dobj.catId, 0, cast->dobj.dumpId);
14070
14075}
14076
14077/*
14078 * Dump a transform
14079 */
14080static void
14082{
14083 DumpOptions *dopt = fout->dopt;
14090 char *lanname;
14091 const char *transformType;
14092
14093 /* Do nothing if not dumping schema */
14094 if (!dopt->dumpSchema)
14095 return;
14096
14097 /* Cannot dump if we don't have the transform functions' info */
14098 if (OidIsValid(transform->trffromsql))
14099 {
14101 if (fromsqlFuncInfo == NULL)
14102 pg_fatal("could not find function definition for function with OID %u",
14103 transform->trffromsql);
14104 }
14105 if (OidIsValid(transform->trftosql))
14106 {
14107 tosqlFuncInfo = findFuncByOid(transform->trftosql);
14108 if (tosqlFuncInfo == NULL)
14109 pg_fatal("could not find function definition for function with OID %u",
14110 transform->trftosql);
14111 }
14112
14117
14118 lanname = get_language_name(fout, transform->trflang);
14120
14121 appendPQExpBuffer(delqry, "DROP TRANSFORM FOR %s LANGUAGE %s;\n",
14123
14124 appendPQExpBuffer(defqry, "CREATE TRANSFORM FOR %s LANGUAGE %s (",
14126
14127 if (!transform->trffromsql && !transform->trftosql)
14128 pg_log_warning("bogus transform definition, at least one of trffromsql and trftosql should be nonzero");
14129
14130 if (transform->trffromsql)
14131 {
14132 if (fromsqlFuncInfo)
14133 {
14135
14136 /*
14137 * Always qualify the function name (format_function_signature
14138 * won't qualify it).
14139 */
14140 appendPQExpBuffer(defqry, "FROM SQL WITH FUNCTION %s.%s",
14141 fmtId(fromsqlFuncInfo->dobj.namespace->dobj.name), fsig);
14142 free(fsig);
14143 }
14144 else
14145 pg_log_warning("bogus value in pg_transform.trffromsql field");
14146 }
14147
14148 if (transform->trftosql)
14149 {
14150 if (transform->trffromsql)
14152
14153 if (tosqlFuncInfo)
14154 {
14156
14157 /*
14158 * Always qualify the function name (format_function_signature
14159 * won't qualify it).
14160 */
14161 appendPQExpBuffer(defqry, "TO SQL WITH FUNCTION %s.%s",
14162 fmtId(tosqlFuncInfo->dobj.namespace->dobj.name), fsig);
14163 free(fsig);
14164 }
14165 else
14166 pg_log_warning("bogus value in pg_transform.trftosql field");
14167 }
14168
14170
14171 appendPQExpBuffer(labelq, "TRANSFORM FOR %s LANGUAGE %s",
14173
14174 appendPQExpBuffer(transformargs, "FOR %s LANGUAGE %s",
14176
14177 if (dopt->binary_upgrade)
14179 "TRANSFORM", transformargs->data, NULL);
14180
14181 if (transform->dobj.dump & DUMP_COMPONENT_DEFINITION)
14182 ArchiveEntry(fout, transform->dobj.catId, transform->dobj.dumpId,
14183 ARCHIVE_OPTS(.tag = labelq->data,
14184 .description = "TRANSFORM",
14185 .section = SECTION_PRE_DATA,
14186 .createStmt = defqry->data,
14187 .dropStmt = delqry->data,
14188 .deps = transform->dobj.dependencies,
14189 .nDeps = transform->dobj.nDeps));
14190
14191 /* Dump Transform Comments */
14192 if (transform->dobj.dump & DUMP_COMPONENT_COMMENT)
14193 dumpComment(fout, "TRANSFORM", transformargs->data,
14194 NULL, "",
14195 transform->dobj.catId, 0, transform->dobj.dumpId);
14196
14197 free(lanname);
14202}
14203
14204
14205/*
14206 * dumpOpr
14207 * write out a single operator definition
14208 */
14209static void
14211{
14212 DumpOptions *dopt = fout->dopt;
14213 PQExpBuffer query;
14214 PQExpBuffer q;
14217 PQExpBuffer details;
14218 PGresult *res;
14219 int i_oprkind;
14220 int i_oprcode;
14221 int i_oprleft;
14222 int i_oprright;
14223 int i_oprcom;
14224 int i_oprnegate;
14225 int i_oprrest;
14226 int i_oprjoin;
14227 int i_oprcanmerge;
14228 int i_oprcanhash;
14229 char *oprkind;
14230 char *oprcode;
14231 char *oprleft;
14232 char *oprright;
14233 char *oprcom;
14234 char *oprnegate;
14235 char *oprrest;
14236 char *oprjoin;
14237 char *oprcanmerge;
14238 char *oprcanhash;
14239 char *oprregproc;
14240 char *oprref;
14241
14242 /* Do nothing if not dumping schema */
14243 if (!dopt->dumpSchema)
14244 return;
14245
14246 /*
14247 * some operators are invalid because they were the result of user
14248 * defining operators before commutators exist
14249 */
14250 if (!OidIsValid(oprinfo->oprcode))
14251 return;
14252
14253 query = createPQExpBuffer();
14254 q = createPQExpBuffer();
14257 details = createPQExpBuffer();
14258
14260 {
14261 /* Set up query for operator-specific details */
14263 "PREPARE dumpOpr(pg_catalog.oid) AS\n"
14264 "SELECT oprkind, "
14265 "oprcode::pg_catalog.regprocedure, "
14266 "oprleft::pg_catalog.regtype, "
14267 "oprright::pg_catalog.regtype, "
14268 "oprcom, "
14269 "oprnegate, "
14270 "oprrest::pg_catalog.regprocedure, "
14271 "oprjoin::pg_catalog.regprocedure, "
14272 "oprcanmerge, oprcanhash "
14273 "FROM pg_catalog.pg_operator "
14274 "WHERE oid = $1");
14275
14276 ExecuteSqlStatement(fout, query->data);
14277
14279 }
14280
14281 printfPQExpBuffer(query,
14282 "EXECUTE dumpOpr('%u')",
14283 oprinfo->dobj.catId.oid);
14284
14285 res = ExecuteSqlQueryForSingleRow(fout, query->data);
14286
14287 i_oprkind = PQfnumber(res, "oprkind");
14288 i_oprcode = PQfnumber(res, "oprcode");
14289 i_oprleft = PQfnumber(res, "oprleft");
14290 i_oprright = PQfnumber(res, "oprright");
14291 i_oprcom = PQfnumber(res, "oprcom");
14292 i_oprnegate = PQfnumber(res, "oprnegate");
14293 i_oprrest = PQfnumber(res, "oprrest");
14294 i_oprjoin = PQfnumber(res, "oprjoin");
14295 i_oprcanmerge = PQfnumber(res, "oprcanmerge");
14296 i_oprcanhash = PQfnumber(res, "oprcanhash");
14297
14298 oprkind = PQgetvalue(res, 0, i_oprkind);
14299 oprcode = PQgetvalue(res, 0, i_oprcode);
14300 oprleft = PQgetvalue(res, 0, i_oprleft);
14301 oprright = PQgetvalue(res, 0, i_oprright);
14302 oprcom = PQgetvalue(res, 0, i_oprcom);
14303 oprnegate = PQgetvalue(res, 0, i_oprnegate);
14304 oprrest = PQgetvalue(res, 0, i_oprrest);
14305 oprjoin = PQgetvalue(res, 0, i_oprjoin);
14308
14309 /* In PG14 upwards postfix operator support does not exist anymore. */
14310 if (strcmp(oprkind, "r") == 0)
14311 pg_log_warning("postfix operators are not supported anymore (operator \"%s\")",
14312 oprcode);
14313
14315 if (oprregproc)
14316 {
14317 appendPQExpBuffer(details, " FUNCTION = %s", oprregproc);
14319 }
14320
14321 appendPQExpBuffer(oprid, "%s (",
14322 oprinfo->dobj.name);
14323
14324 /*
14325 * right unary means there's a left arg and left unary means there's a
14326 * right arg. (Although the "r" case is dead code for PG14 and later,
14327 * continue to support it in case we're dumping from an old server.)
14328 */
14329 if (strcmp(oprkind, "r") == 0 ||
14330 strcmp(oprkind, "b") == 0)
14331 {
14332 appendPQExpBuffer(details, ",\n LEFTARG = %s", oprleft);
14333 appendPQExpBufferStr(oprid, oprleft);
14334 }
14335 else
14336 appendPQExpBufferStr(oprid, "NONE");
14337
14338 if (strcmp(oprkind, "l") == 0 ||
14339 strcmp(oprkind, "b") == 0)
14340 {
14341 appendPQExpBuffer(details, ",\n RIGHTARG = %s", oprright);
14342 appendPQExpBuffer(oprid, ", %s)", oprright);
14343 }
14344 else
14345 appendPQExpBufferStr(oprid, ", NONE)");
14346
14348 if (oprref)
14349 {
14350 appendPQExpBuffer(details, ",\n COMMUTATOR = %s", oprref);
14351 free(oprref);
14352 }
14353
14355 if (oprref)
14356 {
14357 appendPQExpBuffer(details, ",\n NEGATOR = %s", oprref);
14358 free(oprref);
14359 }
14360
14361 if (strcmp(oprcanmerge, "t") == 0)
14362 appendPQExpBufferStr(details, ",\n MERGES");
14363
14364 if (strcmp(oprcanhash, "t") == 0)
14365 appendPQExpBufferStr(details, ",\n HASHES");
14366
14368 if (oprregproc)
14369 {
14370 appendPQExpBuffer(details, ",\n RESTRICT = %s", oprregproc);
14372 }
14373
14375 if (oprregproc)
14376 {
14377 appendPQExpBuffer(details, ",\n JOIN = %s", oprregproc);
14379 }
14380
14381 appendPQExpBuffer(delq, "DROP OPERATOR %s.%s;\n",
14382 fmtId(oprinfo->dobj.namespace->dobj.name),
14383 oprid->data);
14384
14385 appendPQExpBuffer(q, "CREATE OPERATOR %s.%s (\n%s\n);\n",
14386 fmtId(oprinfo->dobj.namespace->dobj.name),
14387 oprinfo->dobj.name, details->data);
14388
14389 if (dopt->binary_upgrade)
14391 "OPERATOR", oprid->data,
14392 oprinfo->dobj.namespace->dobj.name);
14393
14394 if (oprinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14395 ArchiveEntry(fout, oprinfo->dobj.catId, oprinfo->dobj.dumpId,
14396 ARCHIVE_OPTS(.tag = oprinfo->dobj.name,
14397 .namespace = oprinfo->dobj.namespace->dobj.name,
14398 .owner = oprinfo->rolname,
14399 .description = "OPERATOR",
14400 .section = SECTION_PRE_DATA,
14401 .createStmt = q->data,
14402 .dropStmt = delq->data));
14403
14404 /* Dump Operator Comments */
14405 if (oprinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14406 dumpComment(fout, "OPERATOR", oprid->data,
14407 oprinfo->dobj.namespace->dobj.name, oprinfo->rolname,
14408 oprinfo->dobj.catId, 0, oprinfo->dobj.dumpId);
14409
14410 PQclear(res);
14411
14412 destroyPQExpBuffer(query);
14416 destroyPQExpBuffer(details);
14417}
14418
14419/*
14420 * Convert a function reference obtained from pg_operator
14421 *
14422 * Returns allocated string of what to print, or NULL if function references
14423 * is InvalidOid. Returned string is expected to be free'd by the caller.
14424 *
14425 * The input is a REGPROCEDURE display; we have to strip the argument-types
14426 * part.
14427 */
14428static char *
14430{
14431 char *name;
14432 char *paren;
14433 bool inquote;
14434
14435 /* In all cases "-" means a null reference */
14436 if (strcmp(proc, "-") == 0)
14437 return NULL;
14438
14439 name = pg_strdup(proc);
14440 /* find non-double-quoted left paren */
14441 inquote = false;
14442 for (paren = name; *paren; paren++)
14443 {
14444 if (*paren == '(' && !inquote)
14445 {
14446 *paren = '\0';
14447 break;
14448 }
14449 if (*paren == '"')
14450 inquote = !inquote;
14451 }
14452 return name;
14453}
14454
14455/*
14456 * getFormattedOperatorName - retrieve the operator name for the
14457 * given operator OID (presented in string form).
14458 *
14459 * Returns an allocated string, or NULL if the given OID is invalid.
14460 * Caller is responsible for free'ing result string.
14461 *
14462 * What we produce has the format "OPERATOR(schema.oprname)". This is only
14463 * useful in commands where the operator's argument types can be inferred from
14464 * context. We always schema-qualify the name, though. The predecessor to
14465 * this code tried to skip the schema qualification if possible, but that led
14466 * to wrong results in corner cases, such as if an operator and its negator
14467 * are in different schemas.
14468 */
14469static char *
14471{
14473
14474 /* In all cases "0" means a null reference */
14475 if (strcmp(oproid, "0") == 0)
14476 return NULL;
14477
14479 if (oprInfo == NULL)
14480 {
14481 pg_log_warning("could not find operator with OID %s",
14482 oproid);
14483 return NULL;
14484 }
14485
14486 return psprintf("OPERATOR(%s.%s)",
14487 fmtId(oprInfo->dobj.namespace->dobj.name),
14488 oprInfo->dobj.name);
14489}
14490
14491/*
14492 * Convert a function OID obtained from pg_ts_parser or pg_ts_template
14493 *
14494 * It is sufficient to use REGPROC rather than REGPROCEDURE, since the
14495 * argument lists of these functions are predetermined. Note that the
14496 * caller should ensure we are in the proper schema, because the results
14497 * are search path dependent!
14498 */
14499static char *
14501{
14502 char *result;
14503 char query[128];
14504 PGresult *res;
14505
14506 snprintf(query, sizeof(query),
14507 "SELECT '%u'::pg_catalog.regproc", funcOid);
14508 res = ExecuteSqlQueryForSingleRow(fout, query);
14509
14510 result = pg_strdup(PQgetvalue(res, 0, 0));
14511
14512 PQclear(res);
14513
14514 return result;
14515}
14516
14517/*
14518 * dumpAccessMethod
14519 * write out a single access method definition
14520 */
14521static void
14523{
14524 DumpOptions *dopt = fout->dopt;
14525 PQExpBuffer q;
14527 char *qamname;
14528
14529 /* Do nothing if not dumping schema */
14530 if (!dopt->dumpSchema)
14531 return;
14532
14533 q = createPQExpBuffer();
14535
14536 qamname = pg_strdup(fmtId(aminfo->dobj.name));
14537
14538 appendPQExpBuffer(q, "CREATE ACCESS METHOD %s ", qamname);
14539
14540 switch (aminfo->amtype)
14541 {
14542 case AMTYPE_INDEX:
14543 appendPQExpBufferStr(q, "TYPE INDEX ");
14544 break;
14545 case AMTYPE_TABLE:
14546 appendPQExpBufferStr(q, "TYPE TABLE ");
14547 break;
14548 default:
14549 pg_log_warning("invalid type \"%c\" of access method \"%s\"",
14550 aminfo->amtype, qamname);
14553 free(qamname);
14554 return;
14555 }
14556
14557 appendPQExpBuffer(q, "HANDLER %s;\n", aminfo->amhandler);
14558
14559 appendPQExpBuffer(delq, "DROP ACCESS METHOD %s;\n",
14560 qamname);
14561
14562 if (dopt->binary_upgrade)
14564 "ACCESS METHOD", qamname, NULL);
14565
14566 if (aminfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14567 ArchiveEntry(fout, aminfo->dobj.catId, aminfo->dobj.dumpId,
14568 ARCHIVE_OPTS(.tag = aminfo->dobj.name,
14569 .description = "ACCESS METHOD",
14570 .section = SECTION_PRE_DATA,
14571 .createStmt = q->data,
14572 .dropStmt = delq->data));
14573
14574 /* Dump Access Method Comments */
14575 if (aminfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14576 dumpComment(fout, "ACCESS METHOD", qamname,
14577 NULL, "",
14578 aminfo->dobj.catId, 0, aminfo->dobj.dumpId);
14579
14582 free(qamname);
14583}
14584
14585/*
14586 * dumpOpclass
14587 * write out a single operator class definition
14588 */
14589static void
14591{
14592 DumpOptions *dopt = fout->dopt;
14593 PQExpBuffer query;
14594 PQExpBuffer q;
14597 PGresult *res;
14598 int ntups;
14599 int i_opcintype;
14600 int i_opckeytype;
14601 int i_opcdefault;
14602 int i_opcfamily;
14603 int i_opcfamilyname;
14604 int i_opcfamilynsp;
14605 int i_amname;
14606 int i_amopstrategy;
14607 int i_amopopr;
14608 int i_sortfamily;
14609 int i_sortfamilynsp;
14610 int i_amprocnum;
14611 int i_amproc;
14612 int i_amproclefttype;
14614 char *opcintype;
14615 char *opckeytype;
14616 char *opcdefault;
14617 char *opcfamily;
14618 char *opcfamilyname;
14619 char *opcfamilynsp;
14620 char *amname;
14621 char *amopstrategy;
14622 char *amopopr;
14623 char *sortfamily;
14624 char *sortfamilynsp;
14625 char *amprocnum;
14626 char *amproc;
14627 char *amproclefttype;
14628 char *amprocrighttype;
14629 bool needComma;
14630 int i;
14631
14632 /* Do nothing if not dumping schema */
14633 if (!dopt->dumpSchema)
14634 return;
14635
14636 query = createPQExpBuffer();
14637 q = createPQExpBuffer();
14640
14641 /* Get additional fields from the pg_opclass row */
14642 appendPQExpBuffer(query, "SELECT opcintype::pg_catalog.regtype, "
14643 "opckeytype::pg_catalog.regtype, "
14644 "opcdefault, opcfamily, "
14645 "opfname AS opcfamilyname, "
14646 "nspname AS opcfamilynsp, "
14647 "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opcmethod) AS amname "
14648 "FROM pg_catalog.pg_opclass c "
14649 "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = opcfamily "
14650 "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
14651 "WHERE c.oid = '%u'::pg_catalog.oid",
14652 opcinfo->dobj.catId.oid);
14653
14654 res = ExecuteSqlQueryForSingleRow(fout, query->data);
14655
14656 i_opcintype = PQfnumber(res, "opcintype");
14657 i_opckeytype = PQfnumber(res, "opckeytype");
14658 i_opcdefault = PQfnumber(res, "opcdefault");
14659 i_opcfamily = PQfnumber(res, "opcfamily");
14660 i_opcfamilyname = PQfnumber(res, "opcfamilyname");
14661 i_opcfamilynsp = PQfnumber(res, "opcfamilynsp");
14662 i_amname = PQfnumber(res, "amname");
14663
14664 /* opcintype may still be needed after we PQclear res */
14665 opcintype = pg_strdup(PQgetvalue(res, 0, i_opcintype));
14668 /* opcfamily will still be needed after we PQclear res */
14669 opcfamily = pg_strdup(PQgetvalue(res, 0, i_opcfamily));
14672 /* amname will still be needed after we PQclear res */
14673 amname = pg_strdup(PQgetvalue(res, 0, i_amname));
14674
14675 appendPQExpBuffer(delq, "DROP OPERATOR CLASS %s",
14677 appendPQExpBuffer(delq, " USING %s;\n",
14678 fmtId(amname));
14679
14680 /* Build the fixed portion of the CREATE command */
14681 appendPQExpBuffer(q, "CREATE OPERATOR CLASS %s\n ",
14683 if (strcmp(opcdefault, "t") == 0)
14684 appendPQExpBufferStr(q, "DEFAULT ");
14685 appendPQExpBuffer(q, "FOR TYPE %s USING %s",
14686 opcintype,
14687 fmtId(amname));
14688 if (strlen(opcfamilyname) > 0)
14689 {
14690 appendPQExpBufferStr(q, " FAMILY ");
14693 }
14694 appendPQExpBufferStr(q, " AS\n ");
14695
14696 needComma = false;
14697
14698 if (strcmp(opckeytype, "-") != 0)
14699 {
14700 appendPQExpBuffer(q, "STORAGE %s",
14701 opckeytype);
14702 needComma = true;
14703 }
14704
14705 PQclear(res);
14706
14707 /*
14708 * Now fetch and print the OPERATOR entries (pg_amop rows).
14709 *
14710 * Print only those opfamily members that are tied to the opclass by
14711 * pg_depend entries.
14712 */
14713 resetPQExpBuffer(query);
14714 appendPQExpBuffer(query, "SELECT amopstrategy, "
14715 "amopopr::pg_catalog.regoperator, "
14716 "opfname AS sortfamily, "
14717 "nspname AS sortfamilynsp "
14718 "FROM pg_catalog.pg_amop ao JOIN pg_catalog.pg_depend ON "
14719 "(classid = 'pg_catalog.pg_amop'::pg_catalog.regclass AND objid = ao.oid) "
14720 "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = amopsortfamily "
14721 "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
14722 "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
14723 "AND refobjid = '%u'::pg_catalog.oid "
14724 "AND amopfamily = '%s'::pg_catalog.oid "
14725 "ORDER BY amopstrategy",
14726 opcinfo->dobj.catId.oid,
14727 opcfamily);
14728
14729 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
14730
14731 ntups = PQntuples(res);
14732
14733 i_amopstrategy = PQfnumber(res, "amopstrategy");
14734 i_amopopr = PQfnumber(res, "amopopr");
14735 i_sortfamily = PQfnumber(res, "sortfamily");
14736 i_sortfamilynsp = PQfnumber(res, "sortfamilynsp");
14737
14738 for (i = 0; i < ntups; i++)
14739 {
14741 amopopr = PQgetvalue(res, i, i_amopopr);
14742 sortfamily = PQgetvalue(res, i, i_sortfamily);
14744
14745 if (needComma)
14746 appendPQExpBufferStr(q, " ,\n ");
14747
14748 appendPQExpBuffer(q, "OPERATOR %s %s",
14750
14751 if (strlen(sortfamily) > 0)
14752 {
14753 appendPQExpBufferStr(q, " FOR ORDER BY ");
14755 appendPQExpBufferStr(q, fmtId(sortfamily));
14756 }
14757
14758 needComma = true;
14759 }
14760
14761 PQclear(res);
14762
14763 /*
14764 * Now fetch and print the FUNCTION entries (pg_amproc rows).
14765 *
14766 * Print only those opfamily members that are tied to the opclass by
14767 * pg_depend entries.
14768 *
14769 * We print the amproclefttype/amprocrighttype even though in most cases
14770 * the backend could deduce the right values, because of the corner case
14771 * of a btree sort support function for a cross-type comparison.
14772 */
14773 resetPQExpBuffer(query);
14774
14775 appendPQExpBuffer(query, "SELECT amprocnum, "
14776 "amproc::pg_catalog.regprocedure, "
14777 "amproclefttype::pg_catalog.regtype, "
14778 "amprocrighttype::pg_catalog.regtype "
14779 "FROM pg_catalog.pg_amproc ap, pg_catalog.pg_depend "
14780 "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
14781 "AND refobjid = '%u'::pg_catalog.oid "
14782 "AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass "
14783 "AND objid = ap.oid "
14784 "ORDER BY amprocnum",
14785 opcinfo->dobj.catId.oid);
14786
14787 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
14788
14789 ntups = PQntuples(res);
14790
14791 i_amprocnum = PQfnumber(res, "amprocnum");
14792 i_amproc = PQfnumber(res, "amproc");
14793 i_amproclefttype = PQfnumber(res, "amproclefttype");
14794 i_amprocrighttype = PQfnumber(res, "amprocrighttype");
14795
14796 for (i = 0; i < ntups; i++)
14797 {
14799 amproc = PQgetvalue(res, i, i_amproc);
14802
14803 if (needComma)
14804 appendPQExpBufferStr(q, " ,\n ");
14805
14806 appendPQExpBuffer(q, "FUNCTION %s", amprocnum);
14807
14810
14811 appendPQExpBuffer(q, " %s", amproc);
14812
14813 needComma = true;
14814 }
14815
14816 PQclear(res);
14817
14818 /*
14819 * If needComma is still false it means we haven't added anything after
14820 * the AS keyword. To avoid printing broken SQL, append a dummy STORAGE
14821 * clause with the same datatype. This isn't sanctioned by the
14822 * documentation, but actually DefineOpClass will treat it as a no-op.
14823 */
14824 if (!needComma)
14825 appendPQExpBuffer(q, "STORAGE %s", opcintype);
14826
14827 appendPQExpBufferStr(q, ";\n");
14828
14830 appendPQExpBuffer(nameusing, " USING %s",
14831 fmtId(amname));
14832
14833 if (dopt->binary_upgrade)
14835 "OPERATOR CLASS", nameusing->data,
14836 opcinfo->dobj.namespace->dobj.name);
14837
14838 if (opcinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14839 ArchiveEntry(fout, opcinfo->dobj.catId, opcinfo->dobj.dumpId,
14840 ARCHIVE_OPTS(.tag = opcinfo->dobj.name,
14841 .namespace = opcinfo->dobj.namespace->dobj.name,
14842 .owner = opcinfo->rolname,
14843 .description = "OPERATOR CLASS",
14844 .section = SECTION_PRE_DATA,
14845 .createStmt = q->data,
14846 .dropStmt = delq->data));
14847
14848 /* Dump Operator Class Comments */
14849 if (opcinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14850 dumpComment(fout, "OPERATOR CLASS", nameusing->data,
14851 opcinfo->dobj.namespace->dobj.name, opcinfo->rolname,
14852 opcinfo->dobj.catId, 0, opcinfo->dobj.dumpId);
14853
14854 free(opcintype);
14855 free(opcfamily);
14856 free(amname);
14857 destroyPQExpBuffer(query);
14861}
14862
14863/*
14864 * dumpOpfamily
14865 * write out a single operator family definition
14866 *
14867 * Note: this also dumps any "loose" operator members that aren't bound to a
14868 * specific opclass within the opfamily.
14869 */
14870static void
14872{
14873 DumpOptions *dopt = fout->dopt;
14874 PQExpBuffer query;
14875 PQExpBuffer q;
14878 PGresult *res;
14881 int ntups;
14882 int i_amname;
14883 int i_amopstrategy;
14884 int i_amopopr;
14885 int i_sortfamily;
14886 int i_sortfamilynsp;
14887 int i_amprocnum;
14888 int i_amproc;
14889 int i_amproclefttype;
14891 char *amname;
14892 char *amopstrategy;
14893 char *amopopr;
14894 char *sortfamily;
14895 char *sortfamilynsp;
14896 char *amprocnum;
14897 char *amproc;
14898 char *amproclefttype;
14899 char *amprocrighttype;
14900 bool needComma;
14901 int i;
14902
14903 /* Do nothing if not dumping schema */
14904 if (!dopt->dumpSchema)
14905 return;
14906
14907 query = createPQExpBuffer();
14908 q = createPQExpBuffer();
14911
14912 /*
14913 * Fetch only those opfamily members that are tied directly to the
14914 * opfamily by pg_depend entries.
14915 */
14916 appendPQExpBuffer(query, "SELECT amopstrategy, "
14917 "amopopr::pg_catalog.regoperator, "
14918 "opfname AS sortfamily, "
14919 "nspname AS sortfamilynsp "
14920 "FROM pg_catalog.pg_amop ao JOIN pg_catalog.pg_depend ON "
14921 "(classid = 'pg_catalog.pg_amop'::pg_catalog.regclass AND objid = ao.oid) "
14922 "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = amopsortfamily "
14923 "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
14924 "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
14925 "AND refobjid = '%u'::pg_catalog.oid "
14926 "AND amopfamily = '%u'::pg_catalog.oid "
14927 "ORDER BY amopstrategy",
14928 opfinfo->dobj.catId.oid,
14929 opfinfo->dobj.catId.oid);
14930
14932
14933 resetPQExpBuffer(query);
14934
14935 appendPQExpBuffer(query, "SELECT amprocnum, "
14936 "amproc::pg_catalog.regprocedure, "
14937 "amproclefttype::pg_catalog.regtype, "
14938 "amprocrighttype::pg_catalog.regtype "
14939 "FROM pg_catalog.pg_amproc ap, pg_catalog.pg_depend "
14940 "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
14941 "AND refobjid = '%u'::pg_catalog.oid "
14942 "AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass "
14943 "AND objid = ap.oid "
14944 "ORDER BY amprocnum",
14945 opfinfo->dobj.catId.oid);
14946
14948
14949 /* Get additional fields from the pg_opfamily row */
14950 resetPQExpBuffer(query);
14951
14952 appendPQExpBuffer(query, "SELECT "
14953 "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opfmethod) AS amname "
14954 "FROM pg_catalog.pg_opfamily "
14955 "WHERE oid = '%u'::pg_catalog.oid",
14956 opfinfo->dobj.catId.oid);
14957
14958 res = ExecuteSqlQueryForSingleRow(fout, query->data);
14959
14960 i_amname = PQfnumber(res, "amname");
14961
14962 /* amname will still be needed after we PQclear res */
14963 amname = pg_strdup(PQgetvalue(res, 0, i_amname));
14964
14965 appendPQExpBuffer(delq, "DROP OPERATOR FAMILY %s",
14967 appendPQExpBuffer(delq, " USING %s;\n",
14968 fmtId(amname));
14969
14970 /* Build the fixed portion of the CREATE command */
14971 appendPQExpBuffer(q, "CREATE OPERATOR FAMILY %s",
14973 appendPQExpBuffer(q, " USING %s;\n",
14974 fmtId(amname));
14975
14976 PQclear(res);
14977
14978 /* Do we need an ALTER to add loose members? */
14979 if (PQntuples(res_ops) > 0 || PQntuples(res_procs) > 0)
14980 {
14981 appendPQExpBuffer(q, "ALTER OPERATOR FAMILY %s",
14983 appendPQExpBuffer(q, " USING %s ADD\n ",
14984 fmtId(amname));
14985
14986 needComma = false;
14987
14988 /*
14989 * Now fetch and print the OPERATOR entries (pg_amop rows).
14990 */
14991 ntups = PQntuples(res_ops);
14992
14993 i_amopstrategy = PQfnumber(res_ops, "amopstrategy");
14994 i_amopopr = PQfnumber(res_ops, "amopopr");
14995 i_sortfamily = PQfnumber(res_ops, "sortfamily");
14996 i_sortfamilynsp = PQfnumber(res_ops, "sortfamilynsp");
14997
14998 for (i = 0; i < ntups; i++)
14999 {
15002 sortfamily = PQgetvalue(res_ops, i, i_sortfamily);
15004
15005 if (needComma)
15006 appendPQExpBufferStr(q, " ,\n ");
15007
15008 appendPQExpBuffer(q, "OPERATOR %s %s",
15010
15011 if (strlen(sortfamily) > 0)
15012 {
15013 appendPQExpBufferStr(q, " FOR ORDER BY ");
15015 appendPQExpBufferStr(q, fmtId(sortfamily));
15016 }
15017
15018 needComma = true;
15019 }
15020
15021 /*
15022 * Now fetch and print the FUNCTION entries (pg_amproc rows).
15023 */
15024 ntups = PQntuples(res_procs);
15025
15026 i_amprocnum = PQfnumber(res_procs, "amprocnum");
15027 i_amproc = PQfnumber(res_procs, "amproc");
15028 i_amproclefttype = PQfnumber(res_procs, "amproclefttype");
15029 i_amprocrighttype = PQfnumber(res_procs, "amprocrighttype");
15030
15031 for (i = 0; i < ntups; i++)
15032 {
15037
15038 if (needComma)
15039 appendPQExpBufferStr(q, " ,\n ");
15040
15041 appendPQExpBuffer(q, "FUNCTION %s (%s, %s) %s",
15043 amproc);
15044
15045 needComma = true;
15046 }
15047
15048 appendPQExpBufferStr(q, ";\n");
15049 }
15050
15052 appendPQExpBuffer(nameusing, " USING %s",
15053 fmtId(amname));
15054
15055 if (dopt->binary_upgrade)
15057 "OPERATOR FAMILY", nameusing->data,
15058 opfinfo->dobj.namespace->dobj.name);
15059
15060 if (opfinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15061 ArchiveEntry(fout, opfinfo->dobj.catId, opfinfo->dobj.dumpId,
15062 ARCHIVE_OPTS(.tag = opfinfo->dobj.name,
15063 .namespace = opfinfo->dobj.namespace->dobj.name,
15064 .owner = opfinfo->rolname,
15065 .description = "OPERATOR FAMILY",
15066 .section = SECTION_PRE_DATA,
15067 .createStmt = q->data,
15068 .dropStmt = delq->data));
15069
15070 /* Dump Operator Family Comments */
15071 if (opfinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15072 dumpComment(fout, "OPERATOR FAMILY", nameusing->data,
15073 opfinfo->dobj.namespace->dobj.name, opfinfo->rolname,
15074 opfinfo->dobj.catId, 0, opfinfo->dobj.dumpId);
15075
15076 free(amname);
15079 destroyPQExpBuffer(query);
15083}
15084
15085/*
15086 * dumpCollation
15087 * write out a single collation definition
15088 */
15089static void
15091{
15092 DumpOptions *dopt = fout->dopt;
15093 PQExpBuffer query;
15094 PQExpBuffer q;
15096 char *qcollname;
15097 PGresult *res;
15098 int i_collprovider;
15100 int i_collcollate;
15101 int i_collctype;
15102 int i_colllocale;
15103 int i_collicurules;
15104 const char *collprovider;
15105 const char *collcollate;
15106 const char *collctype;
15107 const char *colllocale;
15108 const char *collicurules;
15109
15110 /* Do nothing if not dumping schema */
15111 if (!dopt->dumpSchema)
15112 return;
15113
15114 query = createPQExpBuffer();
15115 q = createPQExpBuffer();
15117
15118 qcollname = pg_strdup(fmtId(collinfo->dobj.name));
15119
15120 /* Get collation-specific details */
15121 appendPQExpBufferStr(query, "SELECT ");
15122
15123 if (fout->remoteVersion >= 100000)
15125 "collprovider, "
15126 "collversion, ");
15127 else
15129 "'c' AS collprovider, "
15130 "NULL AS collversion, ");
15131
15132 if (fout->remoteVersion >= 120000)
15134 "collisdeterministic, ");
15135 else
15137 "true AS collisdeterministic, ");
15138
15139 if (fout->remoteVersion >= 170000)
15141 "colllocale, ");
15142 else if (fout->remoteVersion >= 150000)
15144 "colliculocale AS colllocale, ");
15145 else
15147 "NULL AS colllocale, ");
15148
15149 if (fout->remoteVersion >= 160000)
15151 "collicurules, ");
15152 else
15154 "NULL AS collicurules, ");
15155
15156 appendPQExpBuffer(query,
15157 "collcollate, "
15158 "collctype "
15159 "FROM pg_catalog.pg_collation c "
15160 "WHERE c.oid = '%u'::pg_catalog.oid",
15161 collinfo->dobj.catId.oid);
15162
15163 res = ExecuteSqlQueryForSingleRow(fout, query->data);
15164
15165 i_collprovider = PQfnumber(res, "collprovider");
15166 i_collisdeterministic = PQfnumber(res, "collisdeterministic");
15167 i_collcollate = PQfnumber(res, "collcollate");
15168 i_collctype = PQfnumber(res, "collctype");
15169 i_colllocale = PQfnumber(res, "colllocale");
15170 i_collicurules = PQfnumber(res, "collicurules");
15171
15173
15174 if (!PQgetisnull(res, 0, i_collcollate))
15176 else
15177 collcollate = NULL;
15178
15179 if (!PQgetisnull(res, 0, i_collctype))
15180 collctype = PQgetvalue(res, 0, i_collctype);
15181 else
15182 collctype = NULL;
15183
15184 /*
15185 * Before version 15, collcollate and collctype were of type NAME and
15186 * non-nullable. Treat empty strings as NULL for consistency.
15187 */
15188 if (fout->remoteVersion < 150000)
15189 {
15190 if (collcollate[0] == '\0')
15191 collcollate = NULL;
15192 if (collctype[0] == '\0')
15193 collctype = NULL;
15194 }
15195
15196 if (!PQgetisnull(res, 0, i_colllocale))
15198 else
15199 colllocale = NULL;
15200
15201 if (!PQgetisnull(res, 0, i_collicurules))
15203 else
15205
15206 appendPQExpBuffer(delq, "DROP COLLATION %s;\n",
15208
15209 appendPQExpBuffer(q, "CREATE COLLATION %s (",
15211
15212 appendPQExpBufferStr(q, "provider = ");
15213 if (collprovider[0] == 'b')
15214 appendPQExpBufferStr(q, "builtin");
15215 else if (collprovider[0] == 'c')
15216 appendPQExpBufferStr(q, "libc");
15217 else if (collprovider[0] == 'i')
15218 appendPQExpBufferStr(q, "icu");
15219 else if (collprovider[0] == 'd')
15220 /* to allow dumping pg_catalog; not accepted on input */
15221 appendPQExpBufferStr(q, "default");
15222 else
15223 pg_fatal("unrecognized collation provider: %s",
15224 collprovider);
15225
15226 if (strcmp(PQgetvalue(res, 0, i_collisdeterministic), "f") == 0)
15227 appendPQExpBufferStr(q, ", deterministic = false");
15228
15229 if (collprovider[0] == 'd')
15230 {
15232 pg_log_warning("invalid collation \"%s\"", qcollname);
15233
15234 /* no locale -- the default collation cannot be reloaded anyway */
15235 }
15236 else if (collprovider[0] == 'b')
15237 {
15239 pg_log_warning("invalid collation \"%s\"", qcollname);
15240
15241 appendPQExpBufferStr(q, ", locale = ");
15243 fout);
15244 }
15245 else if (collprovider[0] == 'i')
15246 {
15247 if (fout->remoteVersion >= 150000)
15248 {
15249 if (collcollate || collctype || !colllocale)
15250 pg_log_warning("invalid collation \"%s\"", qcollname);
15251
15252 appendPQExpBufferStr(q, ", locale = ");
15254 fout);
15255 }
15256 else
15257 {
15258 if (!collcollate || !collctype || colllocale ||
15260 pg_log_warning("invalid collation \"%s\"", qcollname);
15261
15262 appendPQExpBufferStr(q, ", locale = ");
15264 }
15265
15266 if (collicurules)
15267 {
15268 appendPQExpBufferStr(q, ", rules = ");
15270 }
15271 }
15272 else if (collprovider[0] == 'c')
15273 {
15275 pg_log_warning("invalid collation \"%s\"", qcollname);
15276
15278 {
15279 appendPQExpBufferStr(q, ", locale = ");
15281 }
15282 else
15283 {
15284 appendPQExpBufferStr(q, ", lc_collate = ");
15286 appendPQExpBufferStr(q, ", lc_ctype = ");
15288 }
15289 }
15290 else
15291 pg_fatal("unrecognized collation provider: %s", collprovider);
15292
15293 /*
15294 * For binary upgrade, carry over the collation version. For normal
15295 * dump/restore, omit the version, so that it is computed upon restore.
15296 */
15297 if (dopt->binary_upgrade)
15298 {
15299 int i_collversion;
15300
15301 i_collversion = PQfnumber(res, "collversion");
15302 if (!PQgetisnull(res, 0, i_collversion))
15303 {
15304 appendPQExpBufferStr(q, ", version = ");
15306 PQgetvalue(res, 0, i_collversion),
15307 fout);
15308 }
15309 }
15310
15311 appendPQExpBufferStr(q, ");\n");
15312
15313 if (dopt->binary_upgrade)
15315 "COLLATION", qcollname,
15316 collinfo->dobj.namespace->dobj.name);
15317
15318 if (collinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15319 ArchiveEntry(fout, collinfo->dobj.catId, collinfo->dobj.dumpId,
15320 ARCHIVE_OPTS(.tag = collinfo->dobj.name,
15321 .namespace = collinfo->dobj.namespace->dobj.name,
15322 .owner = collinfo->rolname,
15323 .description = "COLLATION",
15324 .section = SECTION_PRE_DATA,
15325 .createStmt = q->data,
15326 .dropStmt = delq->data));
15327
15328 /* Dump Collation Comments */
15329 if (collinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15330 dumpComment(fout, "COLLATION", qcollname,
15331 collinfo->dobj.namespace->dobj.name, collinfo->rolname,
15332 collinfo->dobj.catId, 0, collinfo->dobj.dumpId);
15333
15334 PQclear(res);
15335
15336 destroyPQExpBuffer(query);
15339 free(qcollname);
15340}
15341
15342/*
15343 * dumpConversion
15344 * write out a single conversion definition
15345 */
15346static void
15348{
15349 DumpOptions *dopt = fout->dopt;
15350 PQExpBuffer query;
15351 PQExpBuffer q;
15353 char *qconvname;
15354 PGresult *res;
15355 int i_conforencoding;
15356 int i_contoencoding;
15357 int i_conproc;
15358 int i_condefault;
15359 const char *conforencoding;
15360 const char *contoencoding;
15361 const char *conproc;
15362 bool condefault;
15363
15364 /* Do nothing if not dumping schema */
15365 if (!dopt->dumpSchema)
15366 return;
15367
15368 query = createPQExpBuffer();
15369 q = createPQExpBuffer();
15371
15372 qconvname = pg_strdup(fmtId(convinfo->dobj.name));
15373
15374 /* Get conversion-specific details */
15375 appendPQExpBuffer(query, "SELECT "
15376 "pg_catalog.pg_encoding_to_char(conforencoding) AS conforencoding, "
15377 "pg_catalog.pg_encoding_to_char(contoencoding) AS contoencoding, "
15378 "conproc, condefault "
15379 "FROM pg_catalog.pg_conversion c "
15380 "WHERE c.oid = '%u'::pg_catalog.oid",
15381 convinfo->dobj.catId.oid);
15382
15383 res = ExecuteSqlQueryForSingleRow(fout, query->data);
15384
15385 i_conforencoding = PQfnumber(res, "conforencoding");
15386 i_contoencoding = PQfnumber(res, "contoencoding");
15387 i_conproc = PQfnumber(res, "conproc");
15388 i_condefault = PQfnumber(res, "condefault");
15389
15392 conproc = PQgetvalue(res, 0, i_conproc);
15393 condefault = (PQgetvalue(res, 0, i_condefault)[0] == 't');
15394
15395 appendPQExpBuffer(delq, "DROP CONVERSION %s;\n",
15397
15398 appendPQExpBuffer(q, "CREATE %sCONVERSION %s FOR ",
15399 (condefault) ? "DEFAULT " : "",
15402 appendPQExpBufferStr(q, " TO ");
15404 /* regproc output is already sufficiently quoted */
15405 appendPQExpBuffer(q, " FROM %s;\n", conproc);
15406
15407 if (dopt->binary_upgrade)
15409 "CONVERSION", qconvname,
15410 convinfo->dobj.namespace->dobj.name);
15411
15412 if (convinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15413 ArchiveEntry(fout, convinfo->dobj.catId, convinfo->dobj.dumpId,
15414 ARCHIVE_OPTS(.tag = convinfo->dobj.name,
15415 .namespace = convinfo->dobj.namespace->dobj.name,
15416 .owner = convinfo->rolname,
15417 .description = "CONVERSION",
15418 .section = SECTION_PRE_DATA,
15419 .createStmt = q->data,
15420 .dropStmt = delq->data));
15421
15422 /* Dump Conversion Comments */
15423 if (convinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15424 dumpComment(fout, "CONVERSION", qconvname,
15425 convinfo->dobj.namespace->dobj.name, convinfo->rolname,
15426 convinfo->dobj.catId, 0, convinfo->dobj.dumpId);
15427
15428 PQclear(res);
15429
15430 destroyPQExpBuffer(query);
15433 free(qconvname);
15434}
15435
15436/*
15437 * format_aggregate_signature: generate aggregate name and argument list
15438 *
15439 * The argument type names are qualified if needed. The aggregate name
15440 * is never qualified.
15441 */
15442static char *
15444{
15446 int j;
15447
15449 if (honor_quotes)
15450 appendPQExpBufferStr(&buf, fmtId(agginfo->aggfn.dobj.name));
15451 else
15452 appendPQExpBufferStr(&buf, agginfo->aggfn.dobj.name);
15453
15454 if (agginfo->aggfn.nargs == 0)
15455 appendPQExpBufferStr(&buf, "(*)");
15456 else
15457 {
15459 for (j = 0; j < agginfo->aggfn.nargs; j++)
15460 appendPQExpBuffer(&buf, "%s%s",
15461 (j > 0) ? ", " : "",
15463 agginfo->aggfn.argtypes[j],
15464 zeroIsError));
15466 }
15467 return buf.data;
15468}
15469
15470/*
15471 * dumpAgg
15472 * write out a single aggregate definition
15473 */
15474static void
15476{
15477 DumpOptions *dopt = fout->dopt;
15478 PQExpBuffer query;
15479 PQExpBuffer q;
15481 PQExpBuffer details;
15482 char *aggsig; /* identity signature */
15483 char *aggfullsig = NULL; /* full signature */
15484 char *aggsig_tag;
15485 PGresult *res;
15486 int i_agginitval;
15487 int i_aggminitval;
15488 const char *aggtransfn;
15489 const char *aggfinalfn;
15490 const char *aggcombinefn;
15491 const char *aggserialfn;
15492 const char *aggdeserialfn;
15493 const char *aggmtransfn;
15494 const char *aggminvtransfn;
15495 const char *aggmfinalfn;
15496 bool aggfinalextra;
15497 bool aggmfinalextra;
15498 char aggfinalmodify;
15499 char aggmfinalmodify;
15500 const char *aggsortop;
15501 char *aggsortconvop;
15502 char aggkind;
15503 const char *aggtranstype;
15504 const char *aggtransspace;
15505 const char *aggmtranstype;
15506 const char *aggmtransspace;
15507 const char *agginitval;
15508 const char *aggminitval;
15509 const char *proparallel;
15510 char defaultfinalmodify;
15511
15512 /* Do nothing if not dumping schema */
15513 if (!dopt->dumpSchema)
15514 return;
15515
15516 query = createPQExpBuffer();
15517 q = createPQExpBuffer();
15519 details = createPQExpBuffer();
15520
15522 {
15523 /* Set up query for aggregate-specific details */
15525 "PREPARE dumpAgg(pg_catalog.oid) AS\n");
15526
15528 "SELECT "
15529 "aggtransfn,\n"
15530 "aggfinalfn,\n"
15531 "aggtranstype::pg_catalog.regtype,\n"
15532 "agginitval,\n"
15533 "aggsortop,\n"
15534 "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs,\n"
15535 "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs,\n");
15536
15537 if (fout->remoteVersion >= 90400)
15539 "aggkind,\n"
15540 "aggmtransfn,\n"
15541 "aggminvtransfn,\n"
15542 "aggmfinalfn,\n"
15543 "aggmtranstype::pg_catalog.regtype,\n"
15544 "aggfinalextra,\n"
15545 "aggmfinalextra,\n"
15546 "aggtransspace,\n"
15547 "aggmtransspace,\n"
15548 "aggminitval,\n");
15549 else
15551 "'n' AS aggkind,\n"
15552 "'-' AS aggmtransfn,\n"
15553 "'-' AS aggminvtransfn,\n"
15554 "'-' AS aggmfinalfn,\n"
15555 "0 AS aggmtranstype,\n"
15556 "false AS aggfinalextra,\n"
15557 "false AS aggmfinalextra,\n"
15558 "0 AS aggtransspace,\n"
15559 "0 AS aggmtransspace,\n"
15560 "NULL AS aggminitval,\n");
15561
15562 if (fout->remoteVersion >= 90600)
15564 "aggcombinefn,\n"
15565 "aggserialfn,\n"
15566 "aggdeserialfn,\n"
15567 "proparallel,\n");
15568 else
15570 "'-' AS aggcombinefn,\n"
15571 "'-' AS aggserialfn,\n"
15572 "'-' AS aggdeserialfn,\n"
15573 "'u' AS proparallel,\n");
15574
15575 if (fout->remoteVersion >= 110000)
15577 "aggfinalmodify,\n"
15578 "aggmfinalmodify\n");
15579 else
15581 "'0' AS aggfinalmodify,\n"
15582 "'0' AS aggmfinalmodify\n");
15583
15585 "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
15586 "WHERE a.aggfnoid = p.oid "
15587 "AND p.oid = $1");
15588
15589 ExecuteSqlStatement(fout, query->data);
15590
15592 }
15593
15594 printfPQExpBuffer(query,
15595 "EXECUTE dumpAgg('%u')",
15596 agginfo->aggfn.dobj.catId.oid);
15597
15598 res = ExecuteSqlQueryForSingleRow(fout, query->data);
15599
15600 i_agginitval = PQfnumber(res, "agginitval");
15601 i_aggminitval = PQfnumber(res, "aggminitval");
15602
15603 aggtransfn = PQgetvalue(res, 0, PQfnumber(res, "aggtransfn"));
15604 aggfinalfn = PQgetvalue(res, 0, PQfnumber(res, "aggfinalfn"));
15605 aggcombinefn = PQgetvalue(res, 0, PQfnumber(res, "aggcombinefn"));
15606 aggserialfn = PQgetvalue(res, 0, PQfnumber(res, "aggserialfn"));
15607 aggdeserialfn = PQgetvalue(res, 0, PQfnumber(res, "aggdeserialfn"));
15608 aggmtransfn = PQgetvalue(res, 0, PQfnumber(res, "aggmtransfn"));
15609 aggminvtransfn = PQgetvalue(res, 0, PQfnumber(res, "aggminvtransfn"));
15610 aggmfinalfn = PQgetvalue(res, 0, PQfnumber(res, "aggmfinalfn"));
15611 aggfinalextra = (PQgetvalue(res, 0, PQfnumber(res, "aggfinalextra"))[0] == 't');
15612 aggmfinalextra = (PQgetvalue(res, 0, PQfnumber(res, "aggmfinalextra"))[0] == 't');
15613 aggfinalmodify = PQgetvalue(res, 0, PQfnumber(res, "aggfinalmodify"))[0];
15614 aggmfinalmodify = PQgetvalue(res, 0, PQfnumber(res, "aggmfinalmodify"))[0];
15615 aggsortop = PQgetvalue(res, 0, PQfnumber(res, "aggsortop"));
15616 aggkind = PQgetvalue(res, 0, PQfnumber(res, "aggkind"))[0];
15617 aggtranstype = PQgetvalue(res, 0, PQfnumber(res, "aggtranstype"));
15618 aggtransspace = PQgetvalue(res, 0, PQfnumber(res, "aggtransspace"));
15619 aggmtranstype = PQgetvalue(res, 0, PQfnumber(res, "aggmtranstype"));
15620 aggmtransspace = PQgetvalue(res, 0, PQfnumber(res, "aggmtransspace"));
15623 proparallel = PQgetvalue(res, 0, PQfnumber(res, "proparallel"));
15624
15625 {
15626 char *funcargs;
15627 char *funciargs;
15628
15629 funcargs = PQgetvalue(res, 0, PQfnumber(res, "funcargs"));
15630 funciargs = PQgetvalue(res, 0, PQfnumber(res, "funciargs"));
15633 }
15634
15636
15637 /* identify default modify flag for aggkind (must match DefineAggregate) */
15639 /* replace omitted flags for old versions */
15640 if (aggfinalmodify == '0')
15642 if (aggmfinalmodify == '0')
15644
15645 /* regproc and regtype output is already sufficiently quoted */
15646 appendPQExpBuffer(details, " SFUNC = %s,\n STYPE = %s",
15647 aggtransfn, aggtranstype);
15648
15649 if (strcmp(aggtransspace, "0") != 0)
15650 {
15651 appendPQExpBuffer(details, ",\n SSPACE = %s",
15652 aggtransspace);
15653 }
15654
15655 if (!PQgetisnull(res, 0, i_agginitval))
15656 {
15657 appendPQExpBufferStr(details, ",\n INITCOND = ");
15659 }
15660
15661 if (strcmp(aggfinalfn, "-") != 0)
15662 {
15663 appendPQExpBuffer(details, ",\n FINALFUNC = %s",
15664 aggfinalfn);
15665 if (aggfinalextra)
15666 appendPQExpBufferStr(details, ",\n FINALFUNC_EXTRA");
15668 {
15669 switch (aggfinalmodify)
15670 {
15672 appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = READ_ONLY");
15673 break;
15675 appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = SHAREABLE");
15676 break;
15678 appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = READ_WRITE");
15679 break;
15680 default:
15681 pg_fatal("unrecognized aggfinalmodify value for aggregate \"%s\"",
15682 agginfo->aggfn.dobj.name);
15683 break;
15684 }
15685 }
15686 }
15687
15688 if (strcmp(aggcombinefn, "-") != 0)
15689 appendPQExpBuffer(details, ",\n COMBINEFUNC = %s", aggcombinefn);
15690
15691 if (strcmp(aggserialfn, "-") != 0)
15692 appendPQExpBuffer(details, ",\n SERIALFUNC = %s", aggserialfn);
15693
15694 if (strcmp(aggdeserialfn, "-") != 0)
15695 appendPQExpBuffer(details, ",\n DESERIALFUNC = %s", aggdeserialfn);
15696
15697 if (strcmp(aggmtransfn, "-") != 0)
15698 {
15699 appendPQExpBuffer(details, ",\n MSFUNC = %s,\n MINVFUNC = %s,\n MSTYPE = %s",
15703 }
15704
15705 if (strcmp(aggmtransspace, "0") != 0)
15706 {
15707 appendPQExpBuffer(details, ",\n MSSPACE = %s",
15709 }
15710
15711 if (!PQgetisnull(res, 0, i_aggminitval))
15712 {
15713 appendPQExpBufferStr(details, ",\n MINITCOND = ");
15715 }
15716
15717 if (strcmp(aggmfinalfn, "-") != 0)
15718 {
15719 appendPQExpBuffer(details, ",\n MFINALFUNC = %s",
15720 aggmfinalfn);
15721 if (aggmfinalextra)
15722 appendPQExpBufferStr(details, ",\n MFINALFUNC_EXTRA");
15724 {
15725 switch (aggmfinalmodify)
15726 {
15728 appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = READ_ONLY");
15729 break;
15731 appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = SHAREABLE");
15732 break;
15734 appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = READ_WRITE");
15735 break;
15736 default:
15737 pg_fatal("unrecognized aggmfinalmodify value for aggregate \"%s\"",
15738 agginfo->aggfn.dobj.name);
15739 break;
15740 }
15741 }
15742 }
15743
15745 if (aggsortconvop)
15746 {
15747 appendPQExpBuffer(details, ",\n SORTOP = %s",
15750 }
15751
15753 appendPQExpBufferStr(details, ",\n HYPOTHETICAL");
15754
15756 {
15757 if (proparallel[0] == PROPARALLEL_SAFE)
15758 appendPQExpBufferStr(details, ",\n PARALLEL = safe");
15759 else if (proparallel[0] == PROPARALLEL_RESTRICTED)
15760 appendPQExpBufferStr(details, ",\n PARALLEL = restricted");
15761 else if (proparallel[0] != PROPARALLEL_UNSAFE)
15762 pg_fatal("unrecognized proparallel value for function \"%s\"",
15763 agginfo->aggfn.dobj.name);
15764 }
15765
15766 appendPQExpBuffer(delq, "DROP AGGREGATE %s.%s;\n",
15767 fmtId(agginfo->aggfn.dobj.namespace->dobj.name),
15768 aggsig);
15769
15770 appendPQExpBuffer(q, "CREATE AGGREGATE %s.%s (\n%s\n);\n",
15771 fmtId(agginfo->aggfn.dobj.namespace->dobj.name),
15772 aggfullsig ? aggfullsig : aggsig, details->data);
15773
15774 if (dopt->binary_upgrade)
15776 "AGGREGATE", aggsig,
15777 agginfo->aggfn.dobj.namespace->dobj.name);
15778
15779 if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_DEFINITION)
15780 ArchiveEntry(fout, agginfo->aggfn.dobj.catId,
15781 agginfo->aggfn.dobj.dumpId,
15782 ARCHIVE_OPTS(.tag = aggsig_tag,
15783 .namespace = agginfo->aggfn.dobj.namespace->dobj.name,
15784 .owner = agginfo->aggfn.rolname,
15785 .description = "AGGREGATE",
15786 .section = SECTION_PRE_DATA,
15787 .createStmt = q->data,
15788 .dropStmt = delq->data));
15789
15790 /* Dump Aggregate Comments */
15791 if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_COMMENT)
15792 dumpComment(fout, "AGGREGATE", aggsig,
15793 agginfo->aggfn.dobj.namespace->dobj.name,
15794 agginfo->aggfn.rolname,
15795 agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId);
15796
15797 if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_SECLABEL)
15798 dumpSecLabel(fout, "AGGREGATE", aggsig,
15799 agginfo->aggfn.dobj.namespace->dobj.name,
15800 agginfo->aggfn.rolname,
15801 agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId);
15802
15803 /*
15804 * Since there is no GRANT ON AGGREGATE syntax, we have to make the ACL
15805 * command look like a function's GRANT; in particular this affects the
15806 * syntax for zero-argument aggregates and ordered-set aggregates.
15807 */
15808 free(aggsig);
15809
15810 aggsig = format_function_signature(fout, &agginfo->aggfn, true);
15811
15812 if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_ACL)
15813 dumpACL(fout, agginfo->aggfn.dobj.dumpId, InvalidDumpId,
15814 "FUNCTION", aggsig, NULL,
15815 agginfo->aggfn.dobj.namespace->dobj.name,
15816 NULL, agginfo->aggfn.rolname, &agginfo->aggfn.dacl);
15817
15818 free(aggsig);
15821
15822 PQclear(res);
15823
15824 destroyPQExpBuffer(query);
15827 destroyPQExpBuffer(details);
15828}
15829
15830/*
15831 * dumpTSParser
15832 * write out a single text search parser
15833 */
15834static void
15836{
15837 DumpOptions *dopt = fout->dopt;
15838 PQExpBuffer q;
15840 char *qprsname;
15841
15842 /* Do nothing if not dumping schema */
15843 if (!dopt->dumpSchema)
15844 return;
15845
15846 q = createPQExpBuffer();
15848
15849 qprsname = pg_strdup(fmtId(prsinfo->dobj.name));
15850
15851 appendPQExpBuffer(q, "CREATE TEXT SEARCH PARSER %s (\n",
15853
15854 appendPQExpBuffer(q, " START = %s,\n",
15855 convertTSFunction(fout, prsinfo->prsstart));
15856 appendPQExpBuffer(q, " GETTOKEN = %s,\n",
15857 convertTSFunction(fout, prsinfo->prstoken));
15858 appendPQExpBuffer(q, " END = %s,\n",
15859 convertTSFunction(fout, prsinfo->prsend));
15860 if (prsinfo->prsheadline != InvalidOid)
15861 appendPQExpBuffer(q, " HEADLINE = %s,\n",
15862 convertTSFunction(fout, prsinfo->prsheadline));
15863 appendPQExpBuffer(q, " LEXTYPES = %s );\n",
15864 convertTSFunction(fout, prsinfo->prslextype));
15865
15866 appendPQExpBuffer(delq, "DROP TEXT SEARCH PARSER %s;\n",
15868
15869 if (dopt->binary_upgrade)
15871 "TEXT SEARCH PARSER", qprsname,
15872 prsinfo->dobj.namespace->dobj.name);
15873
15874 if (prsinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15875 ArchiveEntry(fout, prsinfo->dobj.catId, prsinfo->dobj.dumpId,
15876 ARCHIVE_OPTS(.tag = prsinfo->dobj.name,
15877 .namespace = prsinfo->dobj.namespace->dobj.name,
15878 .description = "TEXT SEARCH PARSER",
15879 .section = SECTION_PRE_DATA,
15880 .createStmt = q->data,
15881 .dropStmt = delq->data));
15882
15883 /* Dump Parser Comments */
15884 if (prsinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15885 dumpComment(fout, "TEXT SEARCH PARSER", qprsname,
15886 prsinfo->dobj.namespace->dobj.name, "",
15887 prsinfo->dobj.catId, 0, prsinfo->dobj.dumpId);
15888
15891 free(qprsname);
15892}
15893
15894/*
15895 * dumpTSDictionary
15896 * write out a single text search dictionary
15897 */
15898static void
15900{
15901 DumpOptions *dopt = fout->dopt;
15902 PQExpBuffer q;
15904 PQExpBuffer query;
15905 char *qdictname;
15906 PGresult *res;
15907 char *nspname;
15908 char *tmplname;
15909
15910 /* Do nothing if not dumping schema */
15911 if (!dopt->dumpSchema)
15912 return;
15913
15914 q = createPQExpBuffer();
15916 query = createPQExpBuffer();
15917
15918 qdictname = pg_strdup(fmtId(dictinfo->dobj.name));
15919
15920 /* Fetch name and namespace of the dictionary's template */
15921 appendPQExpBuffer(query, "SELECT nspname, tmplname "
15922 "FROM pg_ts_template p, pg_namespace n "
15923 "WHERE p.oid = '%u' AND n.oid = tmplnamespace",
15924 dictinfo->dicttemplate);
15925 res = ExecuteSqlQueryForSingleRow(fout, query->data);
15926 nspname = PQgetvalue(res, 0, 0);
15927 tmplname = PQgetvalue(res, 0, 1);
15928
15929 appendPQExpBuffer(q, "CREATE TEXT SEARCH DICTIONARY %s (\n",
15931
15932 appendPQExpBufferStr(q, " TEMPLATE = ");
15933 appendPQExpBuffer(q, "%s.", fmtId(nspname));
15935
15936 PQclear(res);
15937
15938 /* the dictinitoption can be dumped straight into the command */
15939 if (dictinfo->dictinitoption)
15940 appendPQExpBuffer(q, ",\n %s", dictinfo->dictinitoption);
15941
15942 appendPQExpBufferStr(q, " );\n");
15943
15944 appendPQExpBuffer(delq, "DROP TEXT SEARCH DICTIONARY %s;\n",
15946
15947 if (dopt->binary_upgrade)
15949 "TEXT SEARCH DICTIONARY", qdictname,
15950 dictinfo->dobj.namespace->dobj.name);
15951
15952 if (dictinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15953 ArchiveEntry(fout, dictinfo->dobj.catId, dictinfo->dobj.dumpId,
15954 ARCHIVE_OPTS(.tag = dictinfo->dobj.name,
15955 .namespace = dictinfo->dobj.namespace->dobj.name,
15956 .owner = dictinfo->rolname,
15957 .description = "TEXT SEARCH DICTIONARY",
15958 .section = SECTION_PRE_DATA,
15959 .createStmt = q->data,
15960 .dropStmt = delq->data));
15961
15962 /* Dump Dictionary Comments */
15963 if (dictinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15964 dumpComment(fout, "TEXT SEARCH DICTIONARY", qdictname,
15965 dictinfo->dobj.namespace->dobj.name, dictinfo->rolname,
15966 dictinfo->dobj.catId, 0, dictinfo->dobj.dumpId);
15967
15970 destroyPQExpBuffer(query);
15971 free(qdictname);
15972}
15973
15974/*
15975 * dumpTSTemplate
15976 * write out a single text search template
15977 */
15978static void
15980{
15981 DumpOptions *dopt = fout->dopt;
15982 PQExpBuffer q;
15984 char *qtmplname;
15985
15986 /* Do nothing if not dumping schema */
15987 if (!dopt->dumpSchema)
15988 return;
15989
15990 q = createPQExpBuffer();
15992
15993 qtmplname = pg_strdup(fmtId(tmplinfo->dobj.name));
15994
15995 appendPQExpBuffer(q, "CREATE TEXT SEARCH TEMPLATE %s (\n",
15997
15998 if (tmplinfo->tmplinit != InvalidOid)
15999 appendPQExpBuffer(q, " INIT = %s,\n",
16000 convertTSFunction(fout, tmplinfo->tmplinit));
16001 appendPQExpBuffer(q, " LEXIZE = %s );\n",
16002 convertTSFunction(fout, tmplinfo->tmpllexize));
16003
16004 appendPQExpBuffer(delq, "DROP TEXT SEARCH TEMPLATE %s;\n",
16006
16007 if (dopt->binary_upgrade)
16009 "TEXT SEARCH TEMPLATE", qtmplname,
16010 tmplinfo->dobj.namespace->dobj.name);
16011
16012 if (tmplinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16013 ArchiveEntry(fout, tmplinfo->dobj.catId, tmplinfo->dobj.dumpId,
16014 ARCHIVE_OPTS(.tag = tmplinfo->dobj.name,
16015 .namespace = tmplinfo->dobj.namespace->dobj.name,
16016 .description = "TEXT SEARCH TEMPLATE",
16017 .section = SECTION_PRE_DATA,
16018 .createStmt = q->data,
16019 .dropStmt = delq->data));
16020
16021 /* Dump Template Comments */
16022 if (tmplinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
16023 dumpComment(fout, "TEXT SEARCH TEMPLATE", qtmplname,
16024 tmplinfo->dobj.namespace->dobj.name, "",
16025 tmplinfo->dobj.catId, 0, tmplinfo->dobj.dumpId);
16026
16029 free(qtmplname);
16030}
16031
16032/*
16033 * dumpTSConfig
16034 * write out a single text search configuration
16035 */
16036static void
16038{
16039 DumpOptions *dopt = fout->dopt;
16040 PQExpBuffer q;
16042 PQExpBuffer query;
16043 char *qcfgname;
16044 PGresult *res;
16045 char *nspname;
16046 char *prsname;
16047 int ntups,
16048 i;
16049 int i_tokenname;
16050 int i_dictname;
16051
16052 /* Do nothing if not dumping schema */
16053 if (!dopt->dumpSchema)
16054 return;
16055
16056 q = createPQExpBuffer();
16058 query = createPQExpBuffer();
16059
16060 qcfgname = pg_strdup(fmtId(cfginfo->dobj.name));
16061
16062 /* Fetch name and namespace of the config's parser */
16063 appendPQExpBuffer(query, "SELECT nspname, prsname "
16064 "FROM pg_ts_parser p, pg_namespace n "
16065 "WHERE p.oid = '%u' AND n.oid = prsnamespace",
16066 cfginfo->cfgparser);
16067 res = ExecuteSqlQueryForSingleRow(fout, query->data);
16068 nspname = PQgetvalue(res, 0, 0);
16069 prsname = PQgetvalue(res, 0, 1);
16070
16071 appendPQExpBuffer(q, "CREATE TEXT SEARCH CONFIGURATION %s (\n",
16073
16074 appendPQExpBuffer(q, " PARSER = %s.", fmtId(nspname));
16075 appendPQExpBuffer(q, "%s );\n", fmtId(prsname));
16076
16077 PQclear(res);
16078
16079 resetPQExpBuffer(query);
16080 appendPQExpBuffer(query,
16081 "SELECT\n"
16082 " ( SELECT alias FROM pg_catalog.ts_token_type('%u'::pg_catalog.oid) AS t\n"
16083 " WHERE t.tokid = m.maptokentype ) AS tokenname,\n"
16084 " m.mapdict::pg_catalog.regdictionary AS dictname\n"
16085 "FROM pg_catalog.pg_ts_config_map AS m\n"
16086 "WHERE m.mapcfg = '%u'\n"
16087 "ORDER BY m.mapcfg, m.maptokentype, m.mapseqno",
16088 cfginfo->cfgparser, cfginfo->dobj.catId.oid);
16089
16090 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
16091 ntups = PQntuples(res);
16092
16093 i_tokenname = PQfnumber(res, "tokenname");
16094 i_dictname = PQfnumber(res, "dictname");
16095
16096 for (i = 0; i < ntups; i++)
16097 {
16098 char *tokenname = PQgetvalue(res, i, i_tokenname);
16099 char *dictname = PQgetvalue(res, i, i_dictname);
16100
16101 if (i == 0 ||
16102 strcmp(tokenname, PQgetvalue(res, i - 1, i_tokenname)) != 0)
16103 {
16104 /* starting a new token type, so start a new command */
16105 if (i > 0)
16106 appendPQExpBufferStr(q, ";\n");
16107 appendPQExpBuffer(q, "\nALTER TEXT SEARCH CONFIGURATION %s\n",
16109 /* tokenname needs quoting, dictname does NOT */
16110 appendPQExpBuffer(q, " ADD MAPPING FOR %s WITH %s",
16111 fmtId(tokenname), dictname);
16112 }
16113 else
16114 appendPQExpBuffer(q, ", %s", dictname);
16115 }
16116
16117 if (ntups > 0)
16118 appendPQExpBufferStr(q, ";\n");
16119
16120 PQclear(res);
16121
16122 appendPQExpBuffer(delq, "DROP TEXT SEARCH CONFIGURATION %s;\n",
16124
16125 if (dopt->binary_upgrade)
16127 "TEXT SEARCH CONFIGURATION", qcfgname,
16128 cfginfo->dobj.namespace->dobj.name);
16129
16130 if (cfginfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16131 ArchiveEntry(fout, cfginfo->dobj.catId, cfginfo->dobj.dumpId,
16132 ARCHIVE_OPTS(.tag = cfginfo->dobj.name,
16133 .namespace = cfginfo->dobj.namespace->dobj.name,
16134 .owner = cfginfo->rolname,
16135 .description = "TEXT SEARCH CONFIGURATION",
16136 .section = SECTION_PRE_DATA,
16137 .createStmt = q->data,
16138 .dropStmt = delq->data));
16139
16140 /* Dump Configuration Comments */
16141 if (cfginfo->dobj.dump & DUMP_COMPONENT_COMMENT)
16142 dumpComment(fout, "TEXT SEARCH CONFIGURATION", qcfgname,
16143 cfginfo->dobj.namespace->dobj.name, cfginfo->rolname,
16144 cfginfo->dobj.catId, 0, cfginfo->dobj.dumpId);
16145
16148 destroyPQExpBuffer(query);
16149 free(qcfgname);
16150}
16151
16152/*
16153 * dumpForeignDataWrapper
16154 * write out a single foreign-data wrapper definition
16155 */
16156static void
16158{
16159 DumpOptions *dopt = fout->dopt;
16160 PQExpBuffer q;
16162 char *qfdwname;
16163
16164 /* Do nothing if not dumping schema */
16165 if (!dopt->dumpSchema)
16166 return;
16167
16168 q = createPQExpBuffer();
16170
16171 qfdwname = pg_strdup(fmtId(fdwinfo->dobj.name));
16172
16173 appendPQExpBuffer(q, "CREATE FOREIGN DATA WRAPPER %s",
16174 qfdwname);
16175
16176 if (strcmp(fdwinfo->fdwhandler, "-") != 0)
16177 appendPQExpBuffer(q, " HANDLER %s", fdwinfo->fdwhandler);
16178
16179 if (strcmp(fdwinfo->fdwvalidator, "-") != 0)
16180 appendPQExpBuffer(q, " VALIDATOR %s", fdwinfo->fdwvalidator);
16181
16182 if (strlen(fdwinfo->fdwoptions) > 0)
16183 appendPQExpBuffer(q, " OPTIONS (\n %s\n)", fdwinfo->fdwoptions);
16184
16185 appendPQExpBufferStr(q, ";\n");
16186
16187 appendPQExpBuffer(delq, "DROP FOREIGN DATA WRAPPER %s;\n",
16188 qfdwname);
16189
16190 if (dopt->binary_upgrade)
16192 "FOREIGN DATA WRAPPER", qfdwname,
16193 NULL);
16194
16195 if (fdwinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16196 ArchiveEntry(fout, fdwinfo->dobj.catId, fdwinfo->dobj.dumpId,
16197 ARCHIVE_OPTS(.tag = fdwinfo->dobj.name,
16198 .owner = fdwinfo->rolname,
16199 .description = "FOREIGN DATA WRAPPER",
16200 .section = SECTION_PRE_DATA,
16201 .createStmt = q->data,
16202 .dropStmt = delq->data));
16203
16204 /* Dump Foreign Data Wrapper Comments */
16205 if (fdwinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
16206 dumpComment(fout, "FOREIGN DATA WRAPPER", qfdwname,
16207 NULL, fdwinfo->rolname,
16208 fdwinfo->dobj.catId, 0, fdwinfo->dobj.dumpId);
16209
16210 /* Handle the ACL */
16211 if (fdwinfo->dobj.dump & DUMP_COMPONENT_ACL)
16212 dumpACL(fout, fdwinfo->dobj.dumpId, InvalidDumpId,
16213 "FOREIGN DATA WRAPPER", qfdwname, NULL, NULL,
16214 NULL, fdwinfo->rolname, &fdwinfo->dacl);
16215
16216 free(qfdwname);
16217
16220}
16221
16222/*
16223 * dumpForeignServer
16224 * write out a foreign server definition
16225 */
16226static void
16228{
16229 DumpOptions *dopt = fout->dopt;
16230 PQExpBuffer q;
16232 PQExpBuffer query;
16233 PGresult *res;
16234 char *qsrvname;
16235 char *fdwname;
16236
16237 /* Do nothing if not dumping schema */
16238 if (!dopt->dumpSchema)
16239 return;
16240
16241 q = createPQExpBuffer();
16243 query = createPQExpBuffer();
16244
16245 qsrvname = pg_strdup(fmtId(srvinfo->dobj.name));
16246
16247 /* look up the foreign-data wrapper */
16248 appendPQExpBuffer(query, "SELECT fdwname "
16249 "FROM pg_foreign_data_wrapper w "
16250 "WHERE w.oid = '%u'",
16251 srvinfo->srvfdw);
16252 res = ExecuteSqlQueryForSingleRow(fout, query->data);
16253 fdwname = PQgetvalue(res, 0, 0);
16254
16255 appendPQExpBuffer(q, "CREATE SERVER %s", qsrvname);
16256 if (srvinfo->srvtype && strlen(srvinfo->srvtype) > 0)
16257 {
16258 appendPQExpBufferStr(q, " TYPE ");
16259 appendStringLiteralAH(q, srvinfo->srvtype, fout);
16260 }
16261 if (srvinfo->srvversion && strlen(srvinfo->srvversion) > 0)
16262 {
16263 appendPQExpBufferStr(q, " VERSION ");
16264 appendStringLiteralAH(q, srvinfo->srvversion, fout);
16265 }
16266
16267 appendPQExpBufferStr(q, " FOREIGN DATA WRAPPER ");
16268 appendPQExpBufferStr(q, fmtId(fdwname));
16269
16270 if (srvinfo->srvoptions && strlen(srvinfo->srvoptions) > 0)
16271 appendPQExpBuffer(q, " OPTIONS (\n %s\n)", srvinfo->srvoptions);
16272
16273 appendPQExpBufferStr(q, ";\n");
16274
16275 appendPQExpBuffer(delq, "DROP SERVER %s;\n",
16276 qsrvname);
16277
16278 if (dopt->binary_upgrade)
16280 "SERVER", qsrvname, NULL);
16281
16282 if (srvinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16283 ArchiveEntry(fout, srvinfo->dobj.catId, srvinfo->dobj.dumpId,
16284 ARCHIVE_OPTS(.tag = srvinfo->dobj.name,
16285 .owner = srvinfo->rolname,
16286 .description = "SERVER",
16287 .section = SECTION_PRE_DATA,
16288 .createStmt = q->data,
16289 .dropStmt = delq->data));
16290
16291 /* Dump Foreign Server Comments */
16292 if (srvinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
16293 dumpComment(fout, "SERVER", qsrvname,
16294 NULL, srvinfo->rolname,
16295 srvinfo->dobj.catId, 0, srvinfo->dobj.dumpId);
16296
16297 /* Handle the ACL */
16298 if (srvinfo->dobj.dump & DUMP_COMPONENT_ACL)
16299 dumpACL(fout, srvinfo->dobj.dumpId, InvalidDumpId,
16300 "FOREIGN SERVER", qsrvname, NULL, NULL,
16301 NULL, srvinfo->rolname, &srvinfo->dacl);
16302
16303 /* Dump user mappings */
16304 if (srvinfo->dobj.dump & DUMP_COMPONENT_USERMAP)
16306 srvinfo->dobj.name, NULL,
16307 srvinfo->rolname,
16308 srvinfo->dobj.catId, srvinfo->dobj.dumpId);
16309
16310 PQclear(res);
16311
16312 free(qsrvname);
16313
16316 destroyPQExpBuffer(query);
16317}
16318
16319/*
16320 * dumpUserMappings
16321 *
16322 * This routine is used to dump any user mappings associated with the
16323 * server handed to this routine. Should be called after ArchiveEntry()
16324 * for the server.
16325 */
16326static void
16328 const char *servername, const char *namespace,
16329 const char *owner,
16330 CatalogId catalogId, DumpId dumpId)
16331{
16332 PQExpBuffer q;
16334 PQExpBuffer query;
16335 PQExpBuffer tag;
16336 PGresult *res;
16337 int ntups;
16338 int i_usename;
16339 int i_umoptions;
16340 int i;
16341
16342 q = createPQExpBuffer();
16343 tag = createPQExpBuffer();
16345 query = createPQExpBuffer();
16346
16347 /*
16348 * We read from the publicly accessible view pg_user_mappings, so as not
16349 * to fail if run by a non-superuser. Note that the view will show
16350 * umoptions as null if the user hasn't got privileges for the associated
16351 * server; this means that pg_dump will dump such a mapping, but with no
16352 * OPTIONS clause. A possible alternative is to skip such mappings
16353 * altogether, but it's not clear that that's an improvement.
16354 */
16355 appendPQExpBuffer(query,
16356 "SELECT usename, "
16357 "array_to_string(ARRAY("
16358 "SELECT quote_ident(option_name) || ' ' || "
16359 "quote_literal(option_value) "
16360 "FROM pg_options_to_table(umoptions) "
16361 "ORDER BY option_name"
16362 "), E',\n ') AS umoptions "
16363 "FROM pg_user_mappings "
16364 "WHERE srvid = '%u' "
16365 "ORDER BY usename",
16366 catalogId.oid);
16367
16368 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
16369
16370 ntups = PQntuples(res);
16371 i_usename = PQfnumber(res, "usename");
16372 i_umoptions = PQfnumber(res, "umoptions");
16373
16374 for (i = 0; i < ntups; i++)
16375 {
16376 char *usename;
16377 char *umoptions;
16378
16379 usename = PQgetvalue(res, i, i_usename);
16381
16383 appendPQExpBuffer(q, "CREATE USER MAPPING FOR %s", fmtId(usename));
16384 appendPQExpBuffer(q, " SERVER %s", fmtId(servername));
16385
16386 if (umoptions && strlen(umoptions) > 0)
16387 appendPQExpBuffer(q, " OPTIONS (\n %s\n)", umoptions);
16388
16389 appendPQExpBufferStr(q, ";\n");
16390
16392 appendPQExpBuffer(delq, "DROP USER MAPPING FOR %s", fmtId(usename));
16393 appendPQExpBuffer(delq, " SERVER %s;\n", fmtId(servername));
16394
16395 resetPQExpBuffer(tag);
16396 appendPQExpBuffer(tag, "USER MAPPING %s SERVER %s",
16397 usename, servername);
16398
16400 ARCHIVE_OPTS(.tag = tag->data,
16401 .namespace = namespace,
16402 .owner = owner,
16403 .description = "USER MAPPING",
16404 .section = SECTION_PRE_DATA,
16405 .createStmt = q->data,
16406 .dropStmt = delq->data));
16407 }
16408
16409 PQclear(res);
16410
16411 destroyPQExpBuffer(query);
16413 destroyPQExpBuffer(tag);
16415}
16416
16417/*
16418 * Write out default privileges information
16419 */
16420static void
16422{
16423 DumpOptions *dopt = fout->dopt;
16424 PQExpBuffer q;
16425 PQExpBuffer tag;
16426 const char *type;
16427
16428 /* Do nothing if not dumping schema, or if we're skipping ACLs */
16429 if (!dopt->dumpSchema || dopt->aclsSkip)
16430 return;
16431
16432 q = createPQExpBuffer();
16433 tag = createPQExpBuffer();
16434
16435 switch (daclinfo->defaclobjtype)
16436 {
16437 case DEFACLOBJ_RELATION:
16438 type = "TABLES";
16439 break;
16440 case DEFACLOBJ_SEQUENCE:
16441 type = "SEQUENCES";
16442 break;
16443 case DEFACLOBJ_FUNCTION:
16444 type = "FUNCTIONS";
16445 break;
16446 case DEFACLOBJ_TYPE:
16447 type = "TYPES";
16448 break;
16450 type = "SCHEMAS";
16451 break;
16453 type = "LARGE OBJECTS";
16454 break;
16455 default:
16456 /* shouldn't get here */
16457 pg_fatal("unrecognized object type in default privileges: %d",
16458 (int) daclinfo->defaclobjtype);
16459 type = ""; /* keep compiler quiet */
16460 }
16461
16462 appendPQExpBuffer(tag, "DEFAULT PRIVILEGES FOR %s", type);
16463
16464 /* build the actual command(s) for this tuple */
16466 daclinfo->dobj.namespace != NULL ?
16467 daclinfo->dobj.namespace->dobj.name : NULL,
16468 daclinfo->dacl.acl,
16469 daclinfo->dacl.acldefault,
16470 daclinfo->defaclrole,
16472 q))
16473 pg_fatal("could not parse default ACL list (%s)",
16474 daclinfo->dacl.acl);
16475
16476 if (daclinfo->dobj.dump & DUMP_COMPONENT_ACL)
16477 ArchiveEntry(fout, daclinfo->dobj.catId, daclinfo->dobj.dumpId,
16478 ARCHIVE_OPTS(.tag = tag->data,
16479 .namespace = daclinfo->dobj.namespace ?
16480 daclinfo->dobj.namespace->dobj.name : NULL,
16481 .owner = daclinfo->defaclrole,
16482 .description = "DEFAULT ACL",
16483 .section = SECTION_POST_DATA,
16484 .createStmt = q->data));
16485
16486 destroyPQExpBuffer(tag);
16488}
16489
16490/*----------
16491 * Write out grant/revoke information
16492 *
16493 * 'objDumpId' is the dump ID of the underlying object.
16494 * 'altDumpId' can be a second dumpId that the ACL entry must also depend on,
16495 * or InvalidDumpId if there is no need for a second dependency.
16496 * 'type' must be one of
16497 * TABLE, SEQUENCE, FUNCTION, LANGUAGE, SCHEMA, DATABASE, TABLESPACE,
16498 * FOREIGN DATA WRAPPER, SERVER, or LARGE OBJECT.
16499 * 'name' is the formatted name of the object. Must be quoted etc. already.
16500 * 'subname' is the formatted name of the sub-object, if any. Must be quoted.
16501 * (Currently we assume that subname is only provided for table columns.)
16502 * 'nspname' is the namespace the object is in (NULL if none).
16503 * 'tag' is the tag to use for the ACL TOC entry; typically, this is NULL
16504 * to use the default for the object type.
16505 * 'owner' is the owner, NULL if there is no owner (for languages).
16506 * 'dacl' is the DumpableAcl struct for the object.
16507 *
16508 * Returns the dump ID assigned to the ACL TocEntry, or InvalidDumpId if
16509 * no ACL entry was created.
16510 *----------
16511 */
16512static DumpId
16514 const char *type, const char *name, const char *subname,
16515 const char *nspname, const char *tag, const char *owner,
16516 const DumpableAcl *dacl)
16517{
16519 DumpOptions *dopt = fout->dopt;
16520 const char *acls = dacl->acl;
16521 const char *acldefault = dacl->acldefault;
16522 char privtype = dacl->privtype;
16523 const char *initprivs = dacl->initprivs;
16524 const char *baseacls;
16525 PQExpBuffer sql;
16526
16527 /* Do nothing if ACL dump is not enabled */
16528 if (dopt->aclsSkip)
16529 return InvalidDumpId;
16530
16531 /* --data-only skips ACLs *except* large object ACLs */
16532 if (!dopt->dumpSchema && strcmp(type, "LARGE OBJECT") != 0)
16533 return InvalidDumpId;
16534
16535 sql = createPQExpBuffer();
16536
16537 /*
16538 * In binary upgrade mode, we don't run an extension's script but instead
16539 * dump out the objects independently and then recreate them. To preserve
16540 * any initial privileges which were set on extension objects, we need to
16541 * compute the set of GRANT and REVOKE commands necessary to get from the
16542 * default privileges of an object to its initial privileges as recorded
16543 * in pg_init_privs.
16544 *
16545 * At restore time, we apply these commands after having called
16546 * binary_upgrade_set_record_init_privs(true). That tells the backend to
16547 * copy the results into pg_init_privs. This is how we preserve the
16548 * contents of that catalog across binary upgrades.
16549 */
16550 if (dopt->binary_upgrade && privtype == 'e' &&
16551 initprivs && *initprivs != '\0')
16552 {
16553 appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\n");
16554 if (!buildACLCommands(name, subname, nspname, type,
16555 initprivs, acldefault, owner,
16556 "", fout->remoteVersion, sql))
16557 pg_fatal("could not parse initial ACL list (%s) or default (%s) for object \"%s\" (%s)",
16558 initprivs, acldefault, name, type);
16559 appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n");
16560 }
16561
16562 /*
16563 * Now figure the GRANT and REVOKE commands needed to get to the object's
16564 * actual current ACL, starting from the initprivs if given, else from the
16565 * object-type-specific default. Also, while buildACLCommands will assume
16566 * that a NULL/empty acls string means it needn't do anything, what that
16567 * actually represents is the object-type-specific default; so we need to
16568 * substitute the acldefault string to get the right results in that case.
16569 */
16570 if (initprivs && *initprivs != '\0')
16571 {
16572 baseacls = initprivs;
16573 if (acls == NULL || *acls == '\0')
16574 acls = acldefault;
16575 }
16576 else
16578
16579 if (!buildACLCommands(name, subname, nspname, type,
16580 acls, baseacls, owner,
16581 "", fout->remoteVersion, sql))
16582 pg_fatal("could not parse ACL list (%s) or default (%s) for object \"%s\" (%s)",
16583 acls, baseacls, name, type);
16584
16585 if (sql->len > 0)
16586 {
16588 DumpId aclDeps[2];
16589 int nDeps = 0;
16590
16591 if (tag)
16593 else if (subname)
16594 appendPQExpBuffer(tagbuf, "COLUMN %s.%s", name, subname);
16595 else
16596 appendPQExpBuffer(tagbuf, "%s %s", type, name);
16597
16598 aclDeps[nDeps++] = objDumpId;
16599 if (altDumpId != InvalidDumpId)
16600 aclDeps[nDeps++] = altDumpId;
16601
16603
16605 ARCHIVE_OPTS(.tag = tagbuf->data,
16606 .namespace = nspname,
16607 .owner = owner,
16608 .description = "ACL",
16609 .section = SECTION_NONE,
16610 .createStmt = sql->data,
16611 .deps = aclDeps,
16612 .nDeps = nDeps));
16613
16615 }
16616
16617 destroyPQExpBuffer(sql);
16618
16619 return aclDumpId;
16620}
16621
16622/*
16623 * dumpSecLabel
16624 *
16625 * This routine is used to dump any security labels associated with the
16626 * object handed to this routine. The routine takes the object type
16627 * and object name (ready to print, except for schema decoration), plus
16628 * the namespace and owner of the object (for labeling the ArchiveEntry),
16629 * plus catalog ID and subid which are the lookup key for pg_seclabel,
16630 * plus the dump ID for the object (for setting a dependency).
16631 * If a matching pg_seclabel entry is found, it is dumped.
16632 *
16633 * Note: although this routine takes a dumpId for dependency purposes,
16634 * that purpose is just to mark the dependency in the emitted dump file
16635 * for possible future use by pg_restore. We do NOT use it for determining
16636 * ordering of the label in the dump file, because this routine is called
16637 * after dependency sorting occurs. This routine should be called just after
16638 * calling ArchiveEntry() for the specified object.
16639 */
16640static void
16641dumpSecLabel(Archive *fout, const char *type, const char *name,
16642 const char *namespace, const char *owner,
16643 CatalogId catalogId, int subid, DumpId dumpId)
16644{
16645 DumpOptions *dopt = fout->dopt;
16647 int nlabels;
16648 int i;
16649 PQExpBuffer query;
16650
16651 /* do nothing, if --no-security-labels is supplied */
16652 if (dopt->no_security_labels)
16653 return;
16654
16655 /*
16656 * Security labels are schema not data ... except large object labels are
16657 * data
16658 */
16659 if (strcmp(type, "LARGE OBJECT") != 0)
16660 {
16661 if (!dopt->dumpSchema)
16662 return;
16663 }
16664 else
16665 {
16666 /* We do dump large object security labels in binary-upgrade mode */
16667 if (!dopt->dumpData && !dopt->binary_upgrade)
16668 return;
16669 }
16670
16671 /* Search for security labels associated with catalogId, using table */
16672 nlabels = findSecLabels(catalogId.tableoid, catalogId.oid, &labels);
16673
16674 query = createPQExpBuffer();
16675
16676 for (i = 0; i < nlabels; i++)
16677 {
16678 /*
16679 * Ignore label entries for which the subid doesn't match.
16680 */
16681 if (labels[i].objsubid != subid)
16682 continue;
16683
16684 appendPQExpBuffer(query,
16685 "SECURITY LABEL FOR %s ON %s ",
16687 if (namespace && *namespace)
16688 appendPQExpBuffer(query, "%s.", fmtId(namespace));
16689 appendPQExpBuffer(query, "%s IS ", name);
16691 appendPQExpBufferStr(query, ";\n");
16692 }
16693
16694 if (query->len > 0)
16695 {
16697
16698 appendPQExpBuffer(tag, "%s %s", type, name);
16700 ARCHIVE_OPTS(.tag = tag->data,
16701 .namespace = namespace,
16702 .owner = owner,
16703 .description = "SECURITY LABEL",
16704 .section = SECTION_NONE,
16705 .createStmt = query->data,
16706 .deps = &dumpId,
16707 .nDeps = 1));
16708 destroyPQExpBuffer(tag);
16709 }
16710
16711 destroyPQExpBuffer(query);
16712}
16713
16714/*
16715 * dumpTableSecLabel
16716 *
16717 * As above, but dump security label for both the specified table (or view)
16718 * and its columns.
16719 */
16720static void
16722{
16723 DumpOptions *dopt = fout->dopt;
16725 int nlabels;
16726 int i;
16727 PQExpBuffer query;
16728 PQExpBuffer target;
16729
16730 /* do nothing, if --no-security-labels is supplied */
16731 if (dopt->no_security_labels)
16732 return;
16733
16734 /* SecLabel are SCHEMA not data */
16735 if (!dopt->dumpSchema)
16736 return;
16737
16738 /* Search for comments associated with relation, using table */
16739 nlabels = findSecLabels(tbinfo->dobj.catId.tableoid,
16740 tbinfo->dobj.catId.oid,
16741 &labels);
16742
16743 /* If security labels exist, build SECURITY LABEL statements */
16744 if (nlabels <= 0)
16745 return;
16746
16747 query = createPQExpBuffer();
16748 target = createPQExpBuffer();
16749
16750 for (i = 0; i < nlabels; i++)
16751 {
16752 const char *colname;
16753 const char *provider = labels[i].provider;
16754 const char *label = labels[i].label;
16755 int objsubid = labels[i].objsubid;
16756
16757 resetPQExpBuffer(target);
16758 if (objsubid == 0)
16759 {
16760 appendPQExpBuffer(target, "%s %s", reltypename,
16762 }
16763 else
16764 {
16765 colname = getAttrName(objsubid, tbinfo);
16766 /* first fmtXXX result must be consumed before calling again */
16767 appendPQExpBuffer(target, "COLUMN %s",
16769 appendPQExpBuffer(target, ".%s", fmtId(colname));
16770 }
16771 appendPQExpBuffer(query, "SECURITY LABEL FOR %s ON %s IS ",
16772 fmtId(provider), target->data);
16774 appendPQExpBufferStr(query, ";\n");
16775 }
16776 if (query->len > 0)
16777 {
16778 resetPQExpBuffer(target);
16779 appendPQExpBuffer(target, "%s %s", reltypename,
16780 fmtId(tbinfo->dobj.name));
16782 ARCHIVE_OPTS(.tag = target->data,
16783 .namespace = tbinfo->dobj.namespace->dobj.name,
16784 .owner = tbinfo->rolname,
16785 .description = "SECURITY LABEL",
16786 .section = SECTION_NONE,
16787 .createStmt = query->data,
16788 .deps = &(tbinfo->dobj.dumpId),
16789 .nDeps = 1));
16790 }
16791 destroyPQExpBuffer(query);
16792 destroyPQExpBuffer(target);
16793}
16794
16795/*
16796 * findSecLabels
16797 *
16798 * Find the security label(s), if any, associated with the given object.
16799 * All the objsubid values associated with the given classoid/objoid are
16800 * found with one search.
16801 */
16802static int
16804{
16806 SecLabelItem *low;
16807 SecLabelItem *high;
16808 int nmatch;
16809
16810 if (nseclabels <= 0) /* no labels, so no match is possible */
16811 {
16812 *items = NULL;
16813 return 0;
16814 }
16815
16816 /*
16817 * Do binary search to find some item matching the object.
16818 */
16819 low = &seclabels[0];
16820 high = &seclabels[nseclabels - 1];
16821 while (low <= high)
16822 {
16823 middle = low + (high - low) / 2;
16824
16825 if (classoid < middle->classoid)
16826 high = middle - 1;
16827 else if (classoid > middle->classoid)
16828 low = middle + 1;
16829 else if (objoid < middle->objoid)
16830 high = middle - 1;
16831 else if (objoid > middle->objoid)
16832 low = middle + 1;
16833 else
16834 break; /* found a match */
16835 }
16836
16837 if (low > high) /* no matches */
16838 {
16839 *items = NULL;
16840 return 0;
16841 }
16842
16843 /*
16844 * Now determine how many items match the object. The search loop
16845 * invariant still holds: only items between low and high inclusive could
16846 * match.
16847 */
16848 nmatch = 1;
16849 while (middle > low)
16850 {
16851 if (classoid != middle[-1].classoid ||
16852 objoid != middle[-1].objoid)
16853 break;
16854 middle--;
16855 nmatch++;
16856 }
16857
16858 *items = middle;
16859
16860 middle += nmatch;
16861 while (middle <= high)
16862 {
16863 if (classoid != middle->classoid ||
16864 objoid != middle->objoid)
16865 break;
16866 middle++;
16867 nmatch++;
16868 }
16869
16870 return nmatch;
16871}
16872
16873/*
16874 * collectSecLabels
16875 *
16876 * Construct a table of all security labels available for database objects;
16877 * also set the has-seclabel component flag for each relevant object.
16878 *
16879 * The table is sorted by classoid/objid/objsubid for speed in lookup.
16880 */
16881static void
16883{
16884 PGresult *res;
16885 PQExpBuffer query;
16886 int i_label;
16887 int i_provider;
16888 int i_classoid;
16889 int i_objoid;
16890 int i_objsubid;
16891 int ntups;
16892 int i;
16893 DumpableObject *dobj;
16894
16895 query = createPQExpBuffer();
16896
16898 "SELECT label, provider, classoid, objoid, objsubid "
16899 "FROM pg_catalog.pg_seclabels "
16900 "ORDER BY classoid, objoid, objsubid");
16901
16902 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
16903
16904 /* Construct lookup table containing OIDs in numeric form */
16905 i_label = PQfnumber(res, "label");
16906 i_provider = PQfnumber(res, "provider");
16907 i_classoid = PQfnumber(res, "classoid");
16908 i_objoid = PQfnumber(res, "objoid");
16909 i_objsubid = PQfnumber(res, "objsubid");
16910
16911 ntups = PQntuples(res);
16912
16914 nseclabels = 0;
16915 dobj = NULL;
16916
16917 for (i = 0; i < ntups; i++)
16918 {
16919 CatalogId objId;
16920 int subid;
16921
16922 objId.tableoid = atooid(PQgetvalue(res, i, i_classoid));
16923 objId.oid = atooid(PQgetvalue(res, i, i_objoid));
16924 subid = atoi(PQgetvalue(res, i, i_objsubid));
16925
16926 /* We needn't remember labels that don't match any dumpable object */
16927 if (dobj == NULL ||
16928 dobj->catId.tableoid != objId.tableoid ||
16929 dobj->catId.oid != objId.oid)
16930 dobj = findObjectByCatalogId(objId);
16931 if (dobj == NULL)
16932 continue;
16933
16934 /*
16935 * Labels on columns of composite types are linked to the type's
16936 * pg_class entry, but we need to set the DUMP_COMPONENT_SECLABEL flag
16937 * in the type's own DumpableObject.
16938 */
16939 if (subid != 0 && dobj->objType == DO_TABLE &&
16940 ((TableInfo *) dobj)->relkind == RELKIND_COMPOSITE_TYPE)
16941 {
16943
16944 cTypeInfo = findTypeByOid(((TableInfo *) dobj)->reltype);
16945 if (cTypeInfo)
16946 cTypeInfo->dobj.components |= DUMP_COMPONENT_SECLABEL;
16947 }
16948 else
16949 dobj->components |= DUMP_COMPONENT_SECLABEL;
16950
16954 seclabels[nseclabels].objoid = objId.oid;
16955 seclabels[nseclabels].objsubid = subid;
16956 nseclabels++;
16957 }
16958
16959 PQclear(res);
16960 destroyPQExpBuffer(query);
16961}
16962
16963/*
16964 * dumpTable
16965 * write out to fout the declarations (not data) of a user-defined table
16966 */
16967static void
16969{
16970 DumpOptions *dopt = fout->dopt;
16972 char *namecopy;
16973
16974 /* Do nothing if not dumping schema */
16975 if (!dopt->dumpSchema)
16976 return;
16977
16978 if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16979 {
16980 if (tbinfo->relkind == RELKIND_SEQUENCE)
16982 else
16984 }
16985
16986 /* Handle the ACL here */
16987 namecopy = pg_strdup(fmtId(tbinfo->dobj.name));
16988 if (tbinfo->dobj.dump & DUMP_COMPONENT_ACL)
16989 {
16990 const char *objtype =
16991 (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE";
16992
16994 dumpACL(fout, tbinfo->dobj.dumpId, InvalidDumpId,
16995 objtype, namecopy, NULL,
16996 tbinfo->dobj.namespace->dobj.name,
16997 NULL, tbinfo->rolname, &tbinfo->dacl);
16998 }
16999
17000 /*
17001 * Handle column ACLs, if any. Note: we pull these with a separate query
17002 * rather than trying to fetch them during getTableAttrs, so that we won't
17003 * miss ACLs on system columns. Doing it this way also allows us to dump
17004 * ACLs for catalogs that we didn't mark "interesting" back in getTables.
17005 */
17006 if ((tbinfo->dobj.dump & DUMP_COMPONENT_ACL) && tbinfo->hascolumnACLs)
17007 {
17009 PGresult *res;
17010 int i;
17011
17013 {
17014 /* Set up query for column ACLs */
17016 "PREPARE getColumnACLs(pg_catalog.oid) AS\n");
17017
17018 if (fout->remoteVersion >= 90600)
17019 {
17020 /*
17021 * In principle we should call acldefault('c', relowner) to
17022 * get the default ACL for a column. However, we don't
17023 * currently store the numeric OID of the relowner in
17024 * TableInfo. We could convert the owner name using regrole,
17025 * but that creates a risk of failure due to concurrent role
17026 * renames. Given that the default ACL for columns is empty
17027 * and is likely to stay that way, it's not worth extra cycles
17028 * and risk to avoid hard-wiring that knowledge here.
17029 */
17031 "SELECT at.attname, "
17032 "at.attacl, "
17033 "'{}' AS acldefault, "
17034 "pip.privtype, pip.initprivs "
17035 "FROM pg_catalog.pg_attribute at "
17036 "LEFT JOIN pg_catalog.pg_init_privs pip ON "
17037 "(at.attrelid = pip.objoid "
17038 "AND pip.classoid = 'pg_catalog.pg_class'::pg_catalog.regclass "
17039 "AND at.attnum = pip.objsubid) "
17040 "WHERE at.attrelid = $1 AND "
17041 "NOT at.attisdropped "
17042 "AND (at.attacl IS NOT NULL OR pip.initprivs IS NOT NULL) "
17043 "ORDER BY at.attnum");
17044 }
17045 else
17046 {
17048 "SELECT attname, attacl, '{}' AS acldefault, "
17049 "NULL AS privtype, NULL AS initprivs "
17050 "FROM pg_catalog.pg_attribute "
17051 "WHERE attrelid = $1 AND NOT attisdropped "
17052 "AND attacl IS NOT NULL "
17053 "ORDER BY attnum");
17054 }
17055
17056 ExecuteSqlStatement(fout, query->data);
17057
17059 }
17060
17061 printfPQExpBuffer(query,
17062 "EXECUTE getColumnACLs('%u')",
17063 tbinfo->dobj.catId.oid);
17064
17065 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
17066
17067 for (i = 0; i < PQntuples(res); i++)
17068 {
17069 char *attname = PQgetvalue(res, i, 0);
17070 char *attacl = PQgetvalue(res, i, 1);
17071 char *acldefault = PQgetvalue(res, i, 2);
17072 char privtype = *(PQgetvalue(res, i, 3));
17073 char *initprivs = PQgetvalue(res, i, 4);
17075 char *attnamecopy;
17076
17077 coldacl.acl = attacl;
17078 coldacl.acldefault = acldefault;
17079 coldacl.privtype = privtype;
17080 coldacl.initprivs = initprivs;
17082
17083 /*
17084 * Column's GRANT type is always TABLE. Each column ACL depends
17085 * on the table-level ACL, since we can restore column ACLs in
17086 * parallel but the table-level ACL has to be done first.
17087 */
17088 dumpACL(fout, tbinfo->dobj.dumpId, tableAclDumpId,
17089 "TABLE", namecopy, attnamecopy,
17090 tbinfo->dobj.namespace->dobj.name,
17091 NULL, tbinfo->rolname, &coldacl);
17093 }
17094 PQclear(res);
17095 destroyPQExpBuffer(query);
17096 }
17097
17098 free(namecopy);
17099}
17100
17101/*
17102 * Create the AS clause for a view or materialized view. The semicolon is
17103 * stripped because a materialized view must add a WITH NO DATA clause.
17104 *
17105 * This returns a new buffer which must be freed by the caller.
17106 */
17107static PQExpBuffer
17109{
17111 PQExpBuffer result = createPQExpBuffer();
17112 PGresult *res;
17113 int len;
17114
17115 /* Fetch the view definition */
17116 appendPQExpBuffer(query,
17117 "SELECT pg_catalog.pg_get_viewdef('%u'::pg_catalog.oid) AS viewdef",
17118 tbinfo->dobj.catId.oid);
17119
17120 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
17121
17122 if (PQntuples(res) != 1)
17123 {
17124 if (PQntuples(res) < 1)
17125 pg_fatal("query to obtain definition of view \"%s\" returned no data",
17126 tbinfo->dobj.name);
17127 else
17128 pg_fatal("query to obtain definition of view \"%s\" returned more than one definition",
17129 tbinfo->dobj.name);
17130 }
17131
17132 len = PQgetlength(res, 0, 0);
17133
17134 if (len == 0)
17135 pg_fatal("definition of view \"%s\" appears to be empty (length zero)",
17136 tbinfo->dobj.name);
17137
17138 /* Strip off the trailing semicolon so that other things may follow. */
17139 Assert(PQgetvalue(res, 0, 0)[len - 1] == ';');
17140 appendBinaryPQExpBuffer(result, PQgetvalue(res, 0, 0), len - 1);
17141
17142 PQclear(res);
17143 destroyPQExpBuffer(query);
17144
17145 return result;
17146}
17147
17148/*
17149 * Create a dummy AS clause for a view. This is used when the real view
17150 * definition has to be postponed because of circular dependencies.
17151 * We must duplicate the view's external properties -- column names and types
17152 * (including collation) -- so that it works for subsequent references.
17153 *
17154 * This returns a new buffer which must be freed by the caller.
17155 */
17156static PQExpBuffer
17158{
17159 PQExpBuffer result = createPQExpBuffer();
17160 int j;
17161
17162 appendPQExpBufferStr(result, "SELECT");
17163
17164 for (j = 0; j < tbinfo->numatts; j++)
17165 {
17166 if (j > 0)
17167 appendPQExpBufferChar(result, ',');
17168 appendPQExpBufferStr(result, "\n ");
17169
17170 appendPQExpBuffer(result, "NULL::%s", tbinfo->atttypnames[j]);
17171
17172 /*
17173 * Must add collation if not default for the type, because CREATE OR
17174 * REPLACE VIEW won't change it
17175 */
17176 if (OidIsValid(tbinfo->attcollation[j]))
17177 {
17178 CollInfo *coll;
17179
17180 coll = findCollationByOid(tbinfo->attcollation[j]);
17181 if (coll)
17182 appendPQExpBuffer(result, " COLLATE %s",
17184 }
17185
17186 appendPQExpBuffer(result, " AS %s", fmtId(tbinfo->attnames[j]));
17187 }
17188
17189 return result;
17190}
17191
17192/*
17193 * dumpTableSchema
17194 * write the declaration (not data) of one user-defined table or view
17195 */
17196static void
17198{
17199 DumpOptions *dopt = fout->dopt;
17203 char *qrelname;
17204 char *qualrelname;
17205 int numParents;
17206 TableInfo **parents;
17207 int actual_atts; /* number of attrs in this CREATE statement */
17208 const char *reltypename;
17209 char *storage;
17210 int j,
17211 k;
17212
17213 /* We had better have loaded per-column details about this table */
17214 Assert(tbinfo->interesting);
17215
17216 qrelname = pg_strdup(fmtId(tbinfo->dobj.name));
17218
17219 if (tbinfo->hasoids)
17220 pg_log_warning("WITH OIDS is not supported anymore (table \"%s\")",
17221 qrelname);
17222
17223 if (dopt->binary_upgrade)
17225
17226 /* Is it a table or a view? */
17227 if (tbinfo->relkind == RELKIND_VIEW)
17228 {
17229 PQExpBuffer result;
17230
17231 /*
17232 * Note: keep this code in sync with the is_view case in dumpRule()
17233 */
17234
17235 reltypename = "VIEW";
17236
17237 appendPQExpBuffer(delq, "DROP VIEW %s;\n", qualrelname);
17238
17239 if (dopt->binary_upgrade)
17241 tbinfo->dobj.catId.oid);
17242
17243 appendPQExpBuffer(q, "CREATE VIEW %s", qualrelname);
17244
17245 if (tbinfo->dummy_view)
17247 else
17248 {
17249 if (nonemptyReloptions(tbinfo->reloptions))
17250 {
17251 appendPQExpBufferStr(q, " WITH (");
17252 appendReloptionsArrayAH(q, tbinfo->reloptions, "", fout);
17253 appendPQExpBufferChar(q, ')');
17254 }
17255 result = createViewAsClause(fout, tbinfo);
17256 }
17257 appendPQExpBuffer(q, " AS\n%s", result->data);
17258 destroyPQExpBuffer(result);
17259
17260 if (tbinfo->checkoption != NULL && !tbinfo->dummy_view)
17261 appendPQExpBuffer(q, "\n WITH %s CHECK OPTION", tbinfo->checkoption);
17262 appendPQExpBufferStr(q, ";\n");
17263 }
17264 else
17265 {
17266 char *partkeydef = NULL;
17267 char *ftoptions = NULL;
17268 char *srvname = NULL;
17269 const char *foreign = "";
17270
17271 /*
17272 * Set reltypename, and collect any relkind-specific data that we
17273 * didn't fetch during getTables().
17274 */
17275 switch (tbinfo->relkind)
17276 {
17278 {
17280 PGresult *res;
17281
17282 reltypename = "TABLE";
17283
17284 /* retrieve partition key definition */
17285 appendPQExpBuffer(query,
17286 "SELECT pg_get_partkeydef('%u')",
17287 tbinfo->dobj.catId.oid);
17288 res = ExecuteSqlQueryForSingleRow(fout, query->data);
17289 partkeydef = pg_strdup(PQgetvalue(res, 0, 0));
17290 PQclear(res);
17291 destroyPQExpBuffer(query);
17292 break;
17293 }
17295 {
17297 PGresult *res;
17298 int i_srvname;
17299 int i_ftoptions;
17300
17301 reltypename = "FOREIGN TABLE";
17302
17303 /* retrieve name of foreign server and generic options */
17304 appendPQExpBuffer(query,
17305 "SELECT fs.srvname, "
17306 "pg_catalog.array_to_string(ARRAY("
17307 "SELECT pg_catalog.quote_ident(option_name) || "
17308 "' ' || pg_catalog.quote_literal(option_value) "
17309 "FROM pg_catalog.pg_options_to_table(ftoptions) "
17310 "ORDER BY option_name"
17311 "), E',\n ') AS ftoptions "
17312 "FROM pg_catalog.pg_foreign_table ft "
17313 "JOIN pg_catalog.pg_foreign_server fs "
17314 "ON (fs.oid = ft.ftserver) "
17315 "WHERE ft.ftrelid = '%u'",
17316 tbinfo->dobj.catId.oid);
17317 res = ExecuteSqlQueryForSingleRow(fout, query->data);
17318 i_srvname = PQfnumber(res, "srvname");
17319 i_ftoptions = PQfnumber(res, "ftoptions");
17322 PQclear(res);
17323 destroyPQExpBuffer(query);
17324
17325 foreign = "FOREIGN ";
17326 break;
17327 }
17328 case RELKIND_MATVIEW:
17329 reltypename = "MATERIALIZED VIEW";
17330 break;
17331 default:
17332 reltypename = "TABLE";
17333 break;
17334 }
17335
17336 numParents = tbinfo->numParents;
17337 parents = tbinfo->parents;
17338
17339 appendPQExpBuffer(delq, "DROP %s %s;\n", reltypename, qualrelname);
17340
17341 if (dopt->binary_upgrade)
17343 tbinfo->dobj.catId.oid);
17344
17345 /*
17346 * PostgreSQL 18 has disabled UNLOGGED for partitioned tables, so
17347 * ignore it when dumping if it was set in this case.
17348 */
17349 appendPQExpBuffer(q, "CREATE %s%s %s",
17350 (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
17351 tbinfo->relkind != RELKIND_PARTITIONED_TABLE) ?
17352 "UNLOGGED " : "",
17354 qualrelname);
17355
17356 /*
17357 * Attach to type, if reloftype; except in case of a binary upgrade,
17358 * we dump the table normally and attach it to the type afterward.
17359 */
17360 if (OidIsValid(tbinfo->reloftype) && !dopt->binary_upgrade)
17361 appendPQExpBuffer(q, " OF %s",
17362 getFormattedTypeName(fout, tbinfo->reloftype,
17363 zeroIsError));
17364
17365 if (tbinfo->relkind != RELKIND_MATVIEW)
17366 {
17367 /* Dump the attributes */
17368 actual_atts = 0;
17369 for (j = 0; j < tbinfo->numatts; j++)
17370 {
17371 /*
17372 * Normally, dump if it's locally defined in this table, and
17373 * not dropped. But for binary upgrade, we'll dump all the
17374 * columns, and then fix up the dropped and nonlocal cases
17375 * below.
17376 */
17377 if (shouldPrintColumn(dopt, tbinfo, j))
17378 {
17379 bool print_default;
17380 bool print_notnull;
17381
17382 /*
17383 * Default value --- suppress if to be printed separately
17384 * or not at all.
17385 */
17386 print_default = (tbinfo->attrdefs[j] != NULL &&
17387 tbinfo->attrdefs[j]->dobj.dump &&
17388 !tbinfo->attrdefs[j]->separate);
17389
17390 /*
17391 * Not Null constraint --- print it if it is locally
17392 * defined, or if binary upgrade. (In the latter case, we
17393 * reset conislocal below.)
17394 */
17395 print_notnull = (tbinfo->notnull_constrs[j] != NULL &&
17396 (tbinfo->notnull_islocal[j] ||
17397 dopt->binary_upgrade ||
17398 tbinfo->ispartition));
17399
17400 /*
17401 * Skip column if fully defined by reloftype, except in
17402 * binary upgrade
17403 */
17404 if (OidIsValid(tbinfo->reloftype) &&
17406 !dopt->binary_upgrade)
17407 continue;
17408
17409 /* Format properly if not first attr */
17410 if (actual_atts == 0)
17411 appendPQExpBufferStr(q, " (");
17412 else
17413 appendPQExpBufferChar(q, ',');
17414 appendPQExpBufferStr(q, "\n ");
17415 actual_atts++;
17416
17417 /* Attribute name */
17418 appendPQExpBufferStr(q, fmtId(tbinfo->attnames[j]));
17419
17420 if (tbinfo->attisdropped[j])
17421 {
17422 /*
17423 * ALTER TABLE DROP COLUMN clears
17424 * pg_attribute.atttypid, so we will not have gotten a
17425 * valid type name; insert INTEGER as a stopgap. We'll
17426 * clean things up later.
17427 */
17428 appendPQExpBufferStr(q, " INTEGER /* dummy */");
17429 /* and skip to the next column */
17430 continue;
17431 }
17432
17433 /*
17434 * Attribute type; print it except when creating a typed
17435 * table ('OF type_name'), but in binary-upgrade mode,
17436 * print it in that case too.
17437 */
17438 if (dopt->binary_upgrade || !OidIsValid(tbinfo->reloftype))
17439 {
17440 appendPQExpBuffer(q, " %s",
17441 tbinfo->atttypnames[j]);
17442 }
17443
17444 if (print_default)
17445 {
17446 if (tbinfo->attgenerated[j] == ATTRIBUTE_GENERATED_STORED)
17447 appendPQExpBuffer(q, " GENERATED ALWAYS AS (%s) STORED",
17448 tbinfo->attrdefs[j]->adef_expr);
17449 else if (tbinfo->attgenerated[j] == ATTRIBUTE_GENERATED_VIRTUAL)
17450 appendPQExpBuffer(q, " GENERATED ALWAYS AS (%s)",
17451 tbinfo->attrdefs[j]->adef_expr);
17452 else
17453 appendPQExpBuffer(q, " DEFAULT %s",
17454 tbinfo->attrdefs[j]->adef_expr);
17455 }
17456
17457 if (print_notnull)
17458 {
17459 if (tbinfo->notnull_constrs[j][0] == '\0')
17460 appendPQExpBufferStr(q, " NOT NULL");
17461 else
17462 appendPQExpBuffer(q, " CONSTRAINT %s NOT NULL",
17463 fmtId(tbinfo->notnull_constrs[j]));
17464
17465 if (tbinfo->notnull_noinh[j])
17466 appendPQExpBufferStr(q, " NO INHERIT");
17467 }
17468
17469 /* Add collation if not default for the type */
17470 if (OidIsValid(tbinfo->attcollation[j]))
17471 {
17472 CollInfo *coll;
17473
17474 coll = findCollationByOid(tbinfo->attcollation[j]);
17475 if (coll)
17476 appendPQExpBuffer(q, " COLLATE %s",
17478 }
17479 }
17480
17481 /*
17482 * On the other hand, if we choose not to print a column
17483 * (likely because it is created by inheritance), but the
17484 * column has a locally-defined not-null constraint, we need
17485 * to dump the constraint as a standalone object.
17486 *
17487 * This syntax isn't SQL-conforming, but if you wanted
17488 * standard output you wouldn't be creating non-standard
17489 * objects to begin with.
17490 */
17491 if (!shouldPrintColumn(dopt, tbinfo, j) &&
17492 !tbinfo->attisdropped[j] &&
17493 tbinfo->notnull_constrs[j] != NULL &&
17494 tbinfo->notnull_islocal[j])
17495 {
17496 /* Format properly if not first attr */
17497 if (actual_atts == 0)
17498 appendPQExpBufferStr(q, " (");
17499 else
17500 appendPQExpBufferChar(q, ',');
17501 appendPQExpBufferStr(q, "\n ");
17502 actual_atts++;
17503
17504 if (tbinfo->notnull_constrs[j][0] == '\0')
17505 appendPQExpBuffer(q, "NOT NULL %s",
17506 fmtId(tbinfo->attnames[j]));
17507 else
17508 appendPQExpBuffer(q, "CONSTRAINT %s NOT NULL %s",
17509 tbinfo->notnull_constrs[j],
17510 fmtId(tbinfo->attnames[j]));
17511
17512 if (tbinfo->notnull_noinh[j])
17513 appendPQExpBufferStr(q, " NO INHERIT");
17514 }
17515 }
17516
17517 /*
17518 * Add non-inherited CHECK constraints, if any.
17519 *
17520 * For partitions, we need to include check constraints even if
17521 * they're not defined locally, because the ALTER TABLE ATTACH
17522 * PARTITION that we'll emit later expects the constraint to be
17523 * there. (No need to fix conislocal: ATTACH PARTITION does that)
17524 */
17525 for (j = 0; j < tbinfo->ncheck; j++)
17526 {
17527 ConstraintInfo *constr = &(tbinfo->checkexprs[j]);
17528
17529 if (constr->separate ||
17530 (!constr->conislocal && !tbinfo->ispartition))
17531 continue;
17532
17533 if (actual_atts == 0)
17534 appendPQExpBufferStr(q, " (\n ");
17535 else
17536 appendPQExpBufferStr(q, ",\n ");
17537
17538 appendPQExpBuffer(q, "CONSTRAINT %s ",
17539 fmtId(constr->dobj.name));
17540 appendPQExpBufferStr(q, constr->condef);
17541
17542 actual_atts++;
17543 }
17544
17545 if (actual_atts)
17546 appendPQExpBufferStr(q, "\n)");
17547 else if (!(OidIsValid(tbinfo->reloftype) && !dopt->binary_upgrade))
17548 {
17549 /*
17550 * No attributes? we must have a parenthesized attribute list,
17551 * even though empty, when not using the OF TYPE syntax.
17552 */
17553 appendPQExpBufferStr(q, " (\n)");
17554 }
17555
17556 /*
17557 * Emit the INHERITS clause (not for partitions), except in
17558 * binary-upgrade mode.
17559 */
17560 if (numParents > 0 && !tbinfo->ispartition &&
17561 !dopt->binary_upgrade)
17562 {
17563 appendPQExpBufferStr(q, "\nINHERITS (");
17564 for (k = 0; k < numParents; k++)
17565 {
17566 TableInfo *parentRel = parents[k];
17567
17568 if (k > 0)
17569 appendPQExpBufferStr(q, ", ");
17571 }
17572 appendPQExpBufferChar(q, ')');
17573 }
17574
17575 if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
17576 appendPQExpBuffer(q, "\nPARTITION BY %s", partkeydef);
17577
17578 if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
17579 appendPQExpBuffer(q, "\nSERVER %s", fmtId(srvname));
17580 }
17581
17582 if (nonemptyReloptions(tbinfo->reloptions) ||
17583 nonemptyReloptions(tbinfo->toast_reloptions))
17584 {
17585 bool addcomma = false;
17586
17587 appendPQExpBufferStr(q, "\nWITH (");
17588 if (nonemptyReloptions(tbinfo->reloptions))
17589 {
17590 addcomma = true;
17591 appendReloptionsArrayAH(q, tbinfo->reloptions, "", fout);
17592 }
17593 if (nonemptyReloptions(tbinfo->toast_reloptions))
17594 {
17595 if (addcomma)
17596 appendPQExpBufferStr(q, ", ");
17597 appendReloptionsArrayAH(q, tbinfo->toast_reloptions, "toast.",
17598 fout);
17599 }
17600 appendPQExpBufferChar(q, ')');
17601 }
17602
17603 /* Dump generic options if any */
17604 if (ftoptions && ftoptions[0])
17605 appendPQExpBuffer(q, "\nOPTIONS (\n %s\n)", ftoptions);
17606
17607 /*
17608 * For materialized views, create the AS clause just like a view. At
17609 * this point, we always mark the view as not populated.
17610 */
17611 if (tbinfo->relkind == RELKIND_MATVIEW)
17612 {
17613 PQExpBuffer result;
17614
17615 result = createViewAsClause(fout, tbinfo);
17616 appendPQExpBuffer(q, " AS\n%s\n WITH NO DATA;\n",
17617 result->data);
17618 destroyPQExpBuffer(result);
17619 }
17620 else
17621 appendPQExpBufferStr(q, ";\n");
17622
17623 /* Materialized views can depend on extensions */
17624 if (tbinfo->relkind == RELKIND_MATVIEW)
17626 "pg_catalog.pg_class",
17627 "MATERIALIZED VIEW",
17628 qualrelname);
17629
17630 /*
17631 * in binary upgrade mode, update the catalog with any missing values
17632 * that might be present.
17633 */
17634 if (dopt->binary_upgrade)
17635 {
17636 for (j = 0; j < tbinfo->numatts; j++)
17637 {
17638 if (tbinfo->attmissingval[j][0] != '\0')
17639 {
17640 appendPQExpBufferStr(q, "\n-- set missing value.\n");
17642 "SELECT pg_catalog.binary_upgrade_set_missing_value(");
17644 appendPQExpBufferStr(q, "::pg_catalog.regclass,");
17645 appendStringLiteralAH(q, tbinfo->attnames[j], fout);
17646 appendPQExpBufferChar(q, ',');
17647 appendStringLiteralAH(q, tbinfo->attmissingval[j], fout);
17648 appendPQExpBufferStr(q, ");\n\n");
17649 }
17650 }
17651 }
17652
17653 /*
17654 * To create binary-compatible heap files, we have to ensure the same
17655 * physical column order, including dropped columns, as in the
17656 * original. Therefore, we create dropped columns above and drop them
17657 * here, also updating their attlen/attalign values so that the
17658 * dropped column can be skipped properly. (We do not bother with
17659 * restoring the original attbyval setting.) Also, inheritance
17660 * relationships are set up by doing ALTER TABLE INHERIT rather than
17661 * using an INHERITS clause --- the latter would possibly mess up the
17662 * column order. That also means we have to take care about setting
17663 * attislocal correctly, plus fix up any inherited CHECK constraints.
17664 * Analogously, we set up typed tables using ALTER TABLE / OF here.
17665 *
17666 * We process foreign and partitioned tables here, even though they
17667 * lack heap storage, because they can participate in inheritance
17668 * relationships and we want this stuff to be consistent across the
17669 * inheritance tree. We can exclude indexes, toast tables, sequences
17670 * and matviews, even though they have storage, because we don't
17671 * support altering or dropping columns in them, nor can they be part
17672 * of inheritance trees.
17673 */
17674 if (dopt->binary_upgrade &&
17675 (tbinfo->relkind == RELKIND_RELATION ||
17676 tbinfo->relkind == RELKIND_FOREIGN_TABLE ||
17677 tbinfo->relkind == RELKIND_PARTITIONED_TABLE))
17678 {
17679 bool firstitem;
17680 bool firstitem_extra;
17681
17682 /*
17683 * Drop any dropped columns. Merge the pg_attribute manipulations
17684 * into a single SQL command, so that we don't cause repeated
17685 * relcache flushes on the target table. Otherwise we risk O(N^2)
17686 * relcache bloat while dropping N columns.
17687 */
17688 resetPQExpBuffer(extra);
17689 firstitem = true;
17690 for (j = 0; j < tbinfo->numatts; j++)
17691 {
17692 if (tbinfo->attisdropped[j])
17693 {
17694 if (firstitem)
17695 {
17696 appendPQExpBufferStr(q, "\n-- For binary upgrade, recreate dropped columns.\n"
17697 "UPDATE pg_catalog.pg_attribute\n"
17698 "SET attlen = v.dlen, "
17699 "attalign = v.dalign, "
17700 "attbyval = false\n"
17701 "FROM (VALUES ");
17702 firstitem = false;
17703 }
17704 else
17705 appendPQExpBufferStr(q, ",\n ");
17706 appendPQExpBufferChar(q, '(');
17707 appendStringLiteralAH(q, tbinfo->attnames[j], fout);
17708 appendPQExpBuffer(q, ", %d, '%c')",
17709 tbinfo->attlen[j],
17710 tbinfo->attalign[j]);
17711 /* The ALTER ... DROP COLUMN commands must come after */
17712 appendPQExpBuffer(extra, "ALTER %sTABLE ONLY %s ",
17714 appendPQExpBuffer(extra, "DROP COLUMN %s;\n",
17715 fmtId(tbinfo->attnames[j]));
17716 }
17717 }
17718 if (!firstitem)
17719 {
17720 appendPQExpBufferStr(q, ") v(dname, dlen, dalign)\n"
17721 "WHERE attrelid = ");
17723 appendPQExpBufferStr(q, "::pg_catalog.regclass\n"
17724 " AND attname = v.dname;\n");
17725 /* Now we can issue the actual DROP COLUMN commands */
17726 appendBinaryPQExpBuffer(q, extra->data, extra->len);
17727 }
17728
17729 /*
17730 * Fix up inherited columns. As above, do the pg_attribute
17731 * manipulations in a single SQL command.
17732 */
17733 firstitem = true;
17734 for (j = 0; j < tbinfo->numatts; j++)
17735 {
17736 if (!tbinfo->attisdropped[j] &&
17737 !tbinfo->attislocal[j])
17738 {
17739 if (firstitem)
17740 {
17741 appendPQExpBufferStr(q, "\n-- For binary upgrade, recreate inherited columns.\n");
17742 appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_attribute\n"
17743 "SET attislocal = false\n"
17744 "WHERE attrelid = ");
17746 appendPQExpBufferStr(q, "::pg_catalog.regclass\n"
17747 " AND attname IN (");
17748 firstitem = false;
17749 }
17750 else
17751 appendPQExpBufferStr(q, ", ");
17752 appendStringLiteralAH(q, tbinfo->attnames[j], fout);
17753 }
17754 }
17755 if (!firstitem)
17756 appendPQExpBufferStr(q, ");\n");
17757
17758 /*
17759 * Fix up not-null constraints that come from inheritance. As
17760 * above, do the pg_constraint manipulations in a single SQL
17761 * command. (Actually, two in special cases, if we're doing an
17762 * upgrade from < 18).
17763 */
17764 firstitem = true;
17765 firstitem_extra = true;
17766 resetPQExpBuffer(extra);
17767 for (j = 0; j < tbinfo->numatts; j++)
17768 {
17769 /*
17770 * If a not-null constraint comes from inheritance, reset
17771 * conislocal. The inhcount is fixed by ALTER TABLE INHERIT,
17772 * below. Special hack: in versions < 18, columns with no
17773 * local definition need their constraint to be matched by
17774 * column number in conkeys instead of by constraint name,
17775 * because the latter is not available. (We distinguish the
17776 * case because the constraint name is the empty string.)
17777 */
17778 if (tbinfo->notnull_constrs[j] != NULL &&
17779 !tbinfo->notnull_islocal[j])
17780 {
17781 if (tbinfo->notnull_constrs[j][0] != '\0')
17782 {
17783 if (firstitem)
17784 {
17785 appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_constraint\n"
17786 "SET conislocal = false\n"
17787 "WHERE contype = 'n' AND conrelid = ");
17789 appendPQExpBufferStr(q, "::pg_catalog.regclass AND\n"
17790 "conname IN (");
17791 firstitem = false;
17792 }
17793 else
17794 appendPQExpBufferStr(q, ", ");
17795 appendStringLiteralAH(q, tbinfo->notnull_constrs[j], fout);
17796 }
17797 else
17798 {
17799 if (firstitem_extra)
17800 {
17801 appendPQExpBufferStr(extra, "UPDATE pg_catalog.pg_constraint\n"
17802 "SET conislocal = false\n"
17803 "WHERE contype = 'n' AND conrelid = ");
17805 appendPQExpBufferStr(extra, "::pg_catalog.regclass AND\n"
17806 "conkey IN (");
17807 firstitem_extra = false;
17808 }
17809 else
17810 appendPQExpBufferStr(extra, ", ");
17811 appendPQExpBuffer(extra, "'{%d}'", j + 1);
17812 }
17813 }
17814 }
17815 if (!firstitem)
17816 appendPQExpBufferStr(q, ");\n");
17817 if (!firstitem_extra)
17818 appendPQExpBufferStr(extra, ");\n");
17819
17820 if (extra->len > 0)
17821 appendBinaryPQExpBuffer(q, extra->data, extra->len);
17822
17823 /*
17824 * Add inherited CHECK constraints, if any.
17825 *
17826 * For partitions, they were already dumped, and conislocal
17827 * doesn't need fixing.
17828 *
17829 * As above, issue only one direct manipulation of pg_constraint.
17830 * Although it is tempting to merge the ALTER ADD CONSTRAINT
17831 * commands into one as well, refrain for now due to concern about
17832 * possible backend memory bloat if there are many such
17833 * constraints.
17834 */
17835 resetPQExpBuffer(extra);
17836 firstitem = true;
17837 for (k = 0; k < tbinfo->ncheck; k++)
17838 {
17839 ConstraintInfo *constr = &(tbinfo->checkexprs[k]);
17840
17841 if (constr->separate || constr->conislocal || tbinfo->ispartition)
17842 continue;
17843
17844 if (firstitem)
17845 appendPQExpBufferStr(q, "\n-- For binary upgrade, set up inherited constraints.\n");
17846 appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ADD CONSTRAINT %s %s;\n",
17848 fmtId(constr->dobj.name),
17849 constr->condef);
17850 /* Update pg_constraint after all the ALTER TABLEs */
17851 if (firstitem)
17852 {
17853 appendPQExpBufferStr(extra, "UPDATE pg_catalog.pg_constraint\n"
17854 "SET conislocal = false\n"
17855 "WHERE contype = 'c' AND conrelid = ");
17857 appendPQExpBufferStr(extra, "::pg_catalog.regclass\n");
17858 appendPQExpBufferStr(extra, " AND conname IN (");
17859 firstitem = false;
17860 }
17861 else
17862 appendPQExpBufferStr(extra, ", ");
17863 appendStringLiteralAH(extra, constr->dobj.name, fout);
17864 }
17865 if (!firstitem)
17866 {
17867 appendPQExpBufferStr(extra, ");\n");
17868 appendBinaryPQExpBuffer(q, extra->data, extra->len);
17869 }
17870
17871 if (numParents > 0 && !tbinfo->ispartition)
17872 {
17873 appendPQExpBufferStr(q, "\n-- For binary upgrade, set up inheritance this way.\n");
17874 for (k = 0; k < numParents; k++)
17875 {
17876 TableInfo *parentRel = parents[k];
17877
17878 appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s INHERIT %s;\n", foreign,
17881 }
17882 }
17883
17884 if (OidIsValid(tbinfo->reloftype))
17885 {
17886 appendPQExpBufferStr(q, "\n-- For binary upgrade, set up typed tables this way.\n");
17887 appendPQExpBuffer(q, "ALTER TABLE ONLY %s OF %s;\n",
17889 getFormattedTypeName(fout, tbinfo->reloftype,
17890 zeroIsError));
17891 }
17892 }
17893
17894 /*
17895 * In binary_upgrade mode, arrange to restore the old relfrozenxid and
17896 * relminmxid of all vacuumable relations. (While vacuum.c processes
17897 * TOAST tables semi-independently, here we see them only as children
17898 * of other relations; so this "if" lacks RELKIND_TOASTVALUE, and the
17899 * child toast table is handled below.)
17900 */
17901 if (dopt->binary_upgrade &&
17902 (tbinfo->relkind == RELKIND_RELATION ||
17903 tbinfo->relkind == RELKIND_MATVIEW))
17904 {
17905 appendPQExpBufferStr(q, "\n-- For binary upgrade, set heap's relfrozenxid and relminmxid\n");
17906 appendPQExpBuffer(q, "UPDATE pg_catalog.pg_class\n"
17907 "SET relfrozenxid = '%u', relminmxid = '%u'\n"
17908 "WHERE oid = ",
17909 tbinfo->frozenxid, tbinfo->minmxid);
17911 appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
17912
17913 if (tbinfo->toast_oid)
17914 {
17915 /*
17916 * The toast table will have the same OID at restore, so we
17917 * can safely target it by OID.
17918 */
17919 appendPQExpBufferStr(q, "\n-- For binary upgrade, set toast's relfrozenxid and relminmxid\n");
17920 appendPQExpBuffer(q, "UPDATE pg_catalog.pg_class\n"
17921 "SET relfrozenxid = '%u', relminmxid = '%u'\n"
17922 "WHERE oid = '%u';\n",
17923 tbinfo->toast_frozenxid,
17924 tbinfo->toast_minmxid, tbinfo->toast_oid);
17925 }
17926 }
17927
17928 /*
17929 * In binary_upgrade mode, restore matviews' populated status by
17930 * poking pg_class directly. This is pretty ugly, but we can't use
17931 * REFRESH MATERIALIZED VIEW since it's possible that some underlying
17932 * matview is not populated even though this matview is; in any case,
17933 * we want to transfer the matview's heap storage, not run REFRESH.
17934 */
17935 if (dopt->binary_upgrade && tbinfo->relkind == RELKIND_MATVIEW &&
17936 tbinfo->relispopulated)
17937 {
17938 appendPQExpBufferStr(q, "\n-- For binary upgrade, mark materialized view as populated\n");
17939 appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_class\n"
17940 "SET relispopulated = 't'\n"
17941 "WHERE oid = ");
17943 appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
17944 }
17945
17946 /*
17947 * Dump additional per-column properties that we can't handle in the
17948 * main CREATE TABLE command.
17949 */
17950 for (j = 0; j < tbinfo->numatts; j++)
17951 {
17952 /* None of this applies to dropped columns */
17953 if (tbinfo->attisdropped[j])
17954 continue;
17955
17956 /*
17957 * Dump per-column statistics information. We only issue an ALTER
17958 * TABLE statement if the attstattarget entry for this column is
17959 * not the default value.
17960 */
17961 if (tbinfo->attstattarget[j] >= 0)
17962 appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET STATISTICS %d;\n",
17964 fmtId(tbinfo->attnames[j]),
17965 tbinfo->attstattarget[j]);
17966
17967 /*
17968 * Dump per-column storage information. The statement is only
17969 * dumped if the storage has been changed from the type's default.
17970 */
17971 if (tbinfo->attstorage[j] != tbinfo->typstorage[j])
17972 {
17973 switch (tbinfo->attstorage[j])
17974 {
17975 case TYPSTORAGE_PLAIN:
17976 storage = "PLAIN";
17977 break;
17979 storage = "EXTERNAL";
17980 break;
17982 storage = "EXTENDED";
17983 break;
17984 case TYPSTORAGE_MAIN:
17985 storage = "MAIN";
17986 break;
17987 default:
17988 storage = NULL;
17989 }
17990
17991 /*
17992 * Only dump the statement if it's a storage type we recognize
17993 */
17994 if (storage != NULL)
17995 appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET STORAGE %s;\n",
17997 fmtId(tbinfo->attnames[j]),
17998 storage);
17999 }
18000
18001 /*
18002 * Dump per-column compression, if it's been set.
18003 */
18004 if (!dopt->no_toast_compression)
18005 {
18006 const char *cmname;
18007
18008 switch (tbinfo->attcompression[j])
18009 {
18010 case 'p':
18011 cmname = "pglz";
18012 break;
18013 case 'l':
18014 cmname = "lz4";
18015 break;
18016 default:
18017 cmname = NULL;
18018 break;
18019 }
18020
18021 if (cmname != NULL)
18022 appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET COMPRESSION %s;\n",
18024 fmtId(tbinfo->attnames[j]),
18025 cmname);
18026 }
18027
18028 /*
18029 * Dump per-column attributes.
18030 */
18031 if (tbinfo->attoptions[j][0] != '\0')
18032 appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET (%s);\n",
18034 fmtId(tbinfo->attnames[j]),
18035 tbinfo->attoptions[j]);
18036
18037 /*
18038 * Dump per-column fdw options.
18039 */
18040 if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
18041 tbinfo->attfdwoptions[j][0] != '\0')
18043 "ALTER FOREIGN TABLE ONLY %s ALTER COLUMN %s OPTIONS (\n"
18044 " %s\n"
18045 ");\n",
18047 fmtId(tbinfo->attnames[j]),
18048 tbinfo->attfdwoptions[j]);
18049 } /* end loop over columns */
18050
18052 free(ftoptions);
18053 free(srvname);
18054 }
18055
18056 /*
18057 * dump properties we only have ALTER TABLE syntax for
18058 */
18059 if ((tbinfo->relkind == RELKIND_RELATION ||
18060 tbinfo->relkind == RELKIND_PARTITIONED_TABLE ||
18061 tbinfo->relkind == RELKIND_MATVIEW) &&
18062 tbinfo->relreplident != REPLICA_IDENTITY_DEFAULT)
18063 {
18064 if (tbinfo->relreplident == REPLICA_IDENTITY_INDEX)
18065 {
18066 /* nothing to do, will be set when the index is dumped */
18067 }
18068 else if (tbinfo->relreplident == REPLICA_IDENTITY_NOTHING)
18069 {
18070 appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY NOTHING;\n",
18071 qualrelname);
18072 }
18073 else if (tbinfo->relreplident == REPLICA_IDENTITY_FULL)
18074 {
18075 appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY FULL;\n",
18076 qualrelname);
18077 }
18078 }
18079
18080 if (tbinfo->forcerowsec)
18081 appendPQExpBuffer(q, "\nALTER TABLE ONLY %s FORCE ROW LEVEL SECURITY;\n",
18082 qualrelname);
18083
18084 if (dopt->binary_upgrade)
18087 tbinfo->dobj.namespace->dobj.name);
18088
18089 if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18090 {
18091 char *tablespace = NULL;
18092 char *tableam = NULL;
18093
18094 /*
18095 * _selectTablespace() relies on tablespace-enabled objects in the
18096 * default tablespace to have a tablespace of "" (empty string) versus
18097 * non-tablespace-enabled objects to have a tablespace of NULL.
18098 * getTables() sets tbinfo->reltablespace to "" for the default
18099 * tablespace (not NULL).
18100 */
18101 if (RELKIND_HAS_TABLESPACE(tbinfo->relkind))
18102 tablespace = tbinfo->reltablespace;
18103
18104 if (RELKIND_HAS_TABLE_AM(tbinfo->relkind) ||
18106 tableam = tbinfo->amname;
18107
18108 ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
18109 ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
18110 .namespace = tbinfo->dobj.namespace->dobj.name,
18111 .tablespace = tablespace,
18112 .tableam = tableam,
18113 .relkind = tbinfo->relkind,
18114 .owner = tbinfo->rolname,
18115 .description = reltypename,
18116 .section = tbinfo->postponed_def ?
18118 .createStmt = q->data,
18119 .dropStmt = delq->data));
18120 }
18121
18122 /* Dump Table Comments */
18123 if (tbinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18125
18126 /* Dump Table Security Labels */
18127 if (tbinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
18129
18130 /*
18131 * Dump comments for not-null constraints that aren't to be dumped
18132 * separately (those are processed by collectComments/dumpComment).
18133 */
18134 if (!fout->dopt->no_comments && dopt->dumpSchema &&
18135 fout->remoteVersion >= 180000)
18136 {
18138 PQExpBuffer tag = NULL;
18139
18140 for (j = 0; j < tbinfo->numatts; j++)
18141 {
18142 if (tbinfo->notnull_constrs[j] != NULL &&
18143 tbinfo->notnull_comment[j] != NULL)
18144 {
18145 if (comment == NULL)
18146 {
18148 tag = createPQExpBuffer();
18149 }
18150 else
18151 {
18153 resetPQExpBuffer(tag);
18154 }
18155
18156 appendPQExpBuffer(comment, "COMMENT ON CONSTRAINT %s ON %s IS ",
18157 fmtId(tbinfo->notnull_constrs[j]), qualrelname);
18158 appendStringLiteralAH(comment, tbinfo->notnull_comment[j], fout);
18160
18161 appendPQExpBuffer(tag, "CONSTRAINT %s ON %s",
18162 fmtId(tbinfo->notnull_constrs[j]), qrelname);
18163
18165 ARCHIVE_OPTS(.tag = tag->data,
18166 .namespace = tbinfo->dobj.namespace->dobj.name,
18167 .owner = tbinfo->rolname,
18168 .description = "COMMENT",
18169 .section = SECTION_NONE,
18170 .createStmt = comment->data,
18171 .deps = &(tbinfo->dobj.dumpId),
18172 .nDeps = 1));
18173 }
18174 }
18175
18177 destroyPQExpBuffer(tag);
18178 }
18179
18180 /* Dump comments on inlined table constraints */
18181 for (j = 0; j < tbinfo->ncheck; j++)
18182 {
18183 ConstraintInfo *constr = &(tbinfo->checkexprs[j]);
18184
18185 if (constr->separate || !constr->conislocal)
18186 continue;
18187
18188 if (constr->dobj.dump & DUMP_COMPONENT_COMMENT)
18190 }
18191
18194 destroyPQExpBuffer(extra);
18195 free(qrelname);
18197}
18198
18199/*
18200 * dumpTableAttach
18201 * write to fout the commands to attach a child partition
18202 *
18203 * Child partitions are always made by creating them separately
18204 * and then using ATTACH PARTITION, rather than using
18205 * CREATE TABLE ... PARTITION OF. This is important for preserving
18206 * any possible discrepancy in column layout, to allow assigning the
18207 * correct tablespace if different, and so that it's possible to restore
18208 * a partition without restoring its parent. (You'll get an error from
18209 * the ATTACH PARTITION command, but that can be ignored, or skipped
18210 * using "pg_restore -L" if you prefer.) The last point motivates
18211 * treating ATTACH PARTITION as a completely separate ArchiveEntry
18212 * rather than emitting it within the child partition's ArchiveEntry.
18213 */
18214static void
18216{
18217 DumpOptions *dopt = fout->dopt;
18218 PQExpBuffer q;
18219 PGresult *res;
18220 char *partbound;
18221
18222 /* Do nothing if not dumping schema */
18223 if (!dopt->dumpSchema)
18224 return;
18225
18226 q = createPQExpBuffer();
18227
18229 {
18230 /* Set up query for partbound details */
18232 "PREPARE dumpTableAttach(pg_catalog.oid) AS\n");
18233
18235 "SELECT pg_get_expr(c.relpartbound, c.oid) "
18236 "FROM pg_class c "
18237 "WHERE c.oid = $1");
18238
18240
18242 }
18243
18245 "EXECUTE dumpTableAttach('%u')",
18246 attachinfo->partitionTbl->dobj.catId.oid);
18247
18249 partbound = PQgetvalue(res, 0, 0);
18250
18251 /* Perform ALTER TABLE on the parent */
18253 "ALTER TABLE ONLY %s ",
18254 fmtQualifiedDumpable(attachinfo->parentTbl));
18256 "ATTACH PARTITION %s %s;\n",
18257 fmtQualifiedDumpable(attachinfo->partitionTbl),
18258 partbound);
18259
18260 /*
18261 * There is no point in creating a drop query as the drop is done by table
18262 * drop. (If you think to change this, see also _printTocEntry().)
18263 * Although this object doesn't really have ownership as such, set the
18264 * owner field anyway to ensure that the command is run by the correct
18265 * role at restore time.
18266 */
18267 ArchiveEntry(fout, attachinfo->dobj.catId, attachinfo->dobj.dumpId,
18268 ARCHIVE_OPTS(.tag = attachinfo->dobj.name,
18269 .namespace = attachinfo->dobj.namespace->dobj.name,
18270 .owner = attachinfo->partitionTbl->rolname,
18271 .description = "TABLE ATTACH",
18272 .section = SECTION_PRE_DATA,
18273 .createStmt = q->data));
18274
18275 PQclear(res);
18277}
18278
18279/*
18280 * dumpAttrDef --- dump an attribute's default-value declaration
18281 */
18282static void
18284{
18285 DumpOptions *dopt = fout->dopt;
18286 TableInfo *tbinfo = adinfo->adtable;
18287 int adnum = adinfo->adnum;
18288 PQExpBuffer q;
18290 char *qualrelname;
18291 char *tag;
18292 char *foreign;
18293
18294 /* Do nothing if not dumping schema */
18295 if (!dopt->dumpSchema)
18296 return;
18297
18298 /* Skip if not "separate"; it was dumped in the table's definition */
18299 if (!adinfo->separate)
18300 return;
18301
18302 q = createPQExpBuffer();
18304
18306
18307 foreign = tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "";
18308
18310 "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET DEFAULT %s;\n",
18311 foreign, qualrelname, fmtId(tbinfo->attnames[adnum - 1]),
18312 adinfo->adef_expr);
18313
18314 appendPQExpBuffer(delq, "ALTER %sTABLE %s ALTER COLUMN %s DROP DEFAULT;\n",
18316 fmtId(tbinfo->attnames[adnum - 1]));
18317
18318 tag = psprintf("%s %s", tbinfo->dobj.name, tbinfo->attnames[adnum - 1]);
18319
18320 if (adinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18321 ArchiveEntry(fout, adinfo->dobj.catId, adinfo->dobj.dumpId,
18322 ARCHIVE_OPTS(.tag = tag,
18323 .namespace = tbinfo->dobj.namespace->dobj.name,
18324 .owner = tbinfo->rolname,
18325 .description = "DEFAULT",
18326 .section = SECTION_PRE_DATA,
18327 .createStmt = q->data,
18328 .dropStmt = delq->data));
18329
18330 free(tag);
18334}
18335
18336/*
18337 * getAttrName: extract the correct name for an attribute
18338 *
18339 * The array tblInfo->attnames[] only provides names of user attributes;
18340 * if a system attribute number is supplied, we have to fake it.
18341 * We also do a little bit of bounds checking for safety's sake.
18342 */
18343static const char *
18344getAttrName(int attrnum, const TableInfo *tblInfo)
18345{
18346 if (attrnum > 0 && attrnum <= tblInfo->numatts)
18347 return tblInfo->attnames[attrnum - 1];
18348 switch (attrnum)
18349 {
18351 return "ctid";
18353 return "xmin";
18355 return "cmin";
18357 return "xmax";
18359 return "cmax";
18361 return "tableoid";
18362 }
18363 pg_fatal("invalid column number %d for table \"%s\"",
18364 attrnum, tblInfo->dobj.name);
18365 return NULL; /* keep compiler quiet */
18366}
18367
18368/*
18369 * dumpIndex
18370 * write out to fout a user-defined index
18371 */
18372static void
18374{
18375 DumpOptions *dopt = fout->dopt;
18376 TableInfo *tbinfo = indxinfo->indextable;
18377 bool is_constraint = (indxinfo->indexconstraint != 0);
18378 PQExpBuffer q;
18380 char *qindxname;
18381 char *qqindxname;
18382
18383 /* Do nothing if not dumping schema */
18384 if (!dopt->dumpSchema)
18385 return;
18386
18387 q = createPQExpBuffer();
18389
18390 qindxname = pg_strdup(fmtId(indxinfo->dobj.name));
18392
18393 /*
18394 * If there's an associated constraint, don't dump the index per se, but
18395 * do dump any comment for it. (This is safe because dependency ordering
18396 * will have ensured the constraint is emitted first.) Note that the
18397 * emitted comment has to be shown as depending on the constraint, not the
18398 * index, in such cases.
18399 */
18400 if (!is_constraint)
18401 {
18402 char *indstatcols = indxinfo->indstatcols;
18403 char *indstatvals = indxinfo->indstatvals;
18404 char **indstatcolsarray = NULL;
18405 char **indstatvalsarray = NULL;
18406 int nstatcols = 0;
18407 int nstatvals = 0;
18408
18409 if (dopt->binary_upgrade)
18411 indxinfo->dobj.catId.oid);
18412
18413 /* Plain secondary index */
18414 appendPQExpBuffer(q, "%s;\n", indxinfo->indexdef);
18415
18416 /*
18417 * Append ALTER TABLE commands as needed to set properties that we
18418 * only have ALTER TABLE syntax for. Keep this in sync with the
18419 * similar code in dumpConstraint!
18420 */
18421
18422 /* If the index is clustered, we need to record that. */
18423 if (indxinfo->indisclustered)
18424 {
18425 appendPQExpBuffer(q, "\nALTER TABLE %s CLUSTER",
18427 /* index name is not qualified in this syntax */
18428 appendPQExpBuffer(q, " ON %s;\n",
18429 qindxname);
18430 }
18431
18432 /*
18433 * If the index has any statistics on some of its columns, generate
18434 * the associated ALTER INDEX queries.
18435 */
18436 if (strlen(indstatcols) != 0 || strlen(indstatvals) != 0)
18437 {
18438 int j;
18439
18440 if (!parsePGArray(indstatcols, &indstatcolsarray, &nstatcols))
18441 pg_fatal("could not parse index statistic columns");
18442 if (!parsePGArray(indstatvals, &indstatvalsarray, &nstatvals))
18443 pg_fatal("could not parse index statistic values");
18444 if (nstatcols != nstatvals)
18445 pg_fatal("mismatched number of columns and values for index statistics");
18446
18447 for (j = 0; j < nstatcols; j++)
18448 {
18449 appendPQExpBuffer(q, "ALTER INDEX %s ", qqindxname);
18450
18451 /*
18452 * Note that this is a column number, so no quotes should be
18453 * used.
18454 */
18455 appendPQExpBuffer(q, "ALTER COLUMN %s ",
18457 appendPQExpBuffer(q, "SET STATISTICS %s;\n",
18459 }
18460 }
18461
18462 /* Indexes can depend on extensions */
18464 "pg_catalog.pg_class",
18465 "INDEX", qqindxname);
18466
18467 /* If the index defines identity, we need to record that. */
18468 if (indxinfo->indisreplident)
18469 {
18470 appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY USING",
18472 /* index name is not qualified in this syntax */
18473 appendPQExpBuffer(q, " INDEX %s;\n",
18474 qindxname);
18475 }
18476
18477 /*
18478 * If this index is a member of a partitioned index, the backend will
18479 * not allow us to drop it separately, so don't try. It will go away
18480 * automatically when we drop either the index's table or the
18481 * partitioned index. (If, in a selective restore with --clean, we
18482 * drop neither of those, then this index will not be dropped either.
18483 * But that's fine, and even if you think it's not, the backend won't
18484 * let us do differently.)
18485 */
18486 if (indxinfo->parentidx == 0)
18487 appendPQExpBuffer(delq, "DROP INDEX %s;\n", qqindxname);
18488
18489 if (indxinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18490 ArchiveEntry(fout, indxinfo->dobj.catId, indxinfo->dobj.dumpId,
18491 ARCHIVE_OPTS(.tag = indxinfo->dobj.name,
18492 .namespace = tbinfo->dobj.namespace->dobj.name,
18493 .tablespace = indxinfo->tablespace,
18494 .owner = tbinfo->rolname,
18495 .description = "INDEX",
18496 .section = SECTION_POST_DATA,
18497 .createStmt = q->data,
18498 .dropStmt = delq->data));
18499
18502 }
18503
18504 /* Dump Index Comments */
18505 if (indxinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18506 dumpComment(fout, "INDEX", qindxname,
18507 tbinfo->dobj.namespace->dobj.name,
18508 tbinfo->rolname,
18509 indxinfo->dobj.catId, 0,
18510 is_constraint ? indxinfo->indexconstraint :
18511 indxinfo->dobj.dumpId);
18512
18515 free(qindxname);
18517}
18518
18519/*
18520 * dumpIndexAttach
18521 * write out to fout a partitioned-index attachment clause
18522 */
18523static void
18525{
18526 /* Do nothing if not dumping schema */
18527 if (!fout->dopt->dumpSchema)
18528 return;
18529
18530 if (attachinfo->partitionIdx->dobj.dump & DUMP_COMPONENT_DEFINITION)
18531 {
18533
18534 appendPQExpBuffer(q, "ALTER INDEX %s ",
18535 fmtQualifiedDumpable(attachinfo->parentIdx));
18536 appendPQExpBuffer(q, "ATTACH PARTITION %s;\n",
18537 fmtQualifiedDumpable(attachinfo->partitionIdx));
18538
18539 /*
18540 * There is no need for a dropStmt since the drop is done implicitly
18541 * when we drop either the index's table or the partitioned index.
18542 * Moreover, since there's no ALTER INDEX DETACH PARTITION command,
18543 * there's no way to do it anyway. (If you think to change this,
18544 * consider also what to do with --if-exists.)
18545 *
18546 * Although this object doesn't really have ownership as such, set the
18547 * owner field anyway to ensure that the command is run by the correct
18548 * role at restore time.
18549 */
18550 ArchiveEntry(fout, attachinfo->dobj.catId, attachinfo->dobj.dumpId,
18551 ARCHIVE_OPTS(.tag = attachinfo->dobj.name,
18552 .namespace = attachinfo->dobj.namespace->dobj.name,
18553 .owner = attachinfo->parentIdx->indextable->rolname,
18554 .description = "INDEX ATTACH",
18555 .section = SECTION_POST_DATA,
18556 .createStmt = q->data));
18557
18559 }
18560}
18561
18562/*
18563 * dumpStatisticsExt
18564 * write out to fout an extended statistics object
18565 */
18566static void
18568{
18569 DumpOptions *dopt = fout->dopt;
18570 PQExpBuffer q;
18572 PQExpBuffer query;
18573 char *qstatsextname;
18574 PGresult *res;
18575 char *stxdef;
18576
18577 /* Do nothing if not dumping schema */
18578 if (!dopt->dumpSchema)
18579 return;
18580
18581 q = createPQExpBuffer();
18583 query = createPQExpBuffer();
18584
18586
18587 appendPQExpBuffer(query, "SELECT "
18588 "pg_catalog.pg_get_statisticsobjdef('%u'::pg_catalog.oid)",
18589 statsextinfo->dobj.catId.oid);
18590
18591 res = ExecuteSqlQueryForSingleRow(fout, query->data);
18592
18593 stxdef = PQgetvalue(res, 0, 0);
18594
18595 /* Result of pg_get_statisticsobjdef is complete except for semicolon */
18596 appendPQExpBuffer(q, "%s;\n", stxdef);
18597
18598 /*
18599 * We only issue an ALTER STATISTICS statement if the stxstattarget entry
18600 * for this statistics object is not the default value.
18601 */
18602 if (statsextinfo->stattarget >= 0)
18603 {
18604 appendPQExpBuffer(q, "ALTER STATISTICS %s ",
18606 appendPQExpBuffer(q, "SET STATISTICS %d;\n",
18607 statsextinfo->stattarget);
18608 }
18609
18610 appendPQExpBuffer(delq, "DROP STATISTICS %s;\n",
18612
18613 if (statsextinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18614 ArchiveEntry(fout, statsextinfo->dobj.catId,
18615 statsextinfo->dobj.dumpId,
18616 ARCHIVE_OPTS(.tag = statsextinfo->dobj.name,
18617 .namespace = statsextinfo->dobj.namespace->dobj.name,
18618 .owner = statsextinfo->rolname,
18619 .description = "STATISTICS",
18620 .section = SECTION_POST_DATA,
18621 .createStmt = q->data,
18622 .dropStmt = delq->data));
18623
18624 /* Dump Statistics Comments */
18625 if (statsextinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18626 dumpComment(fout, "STATISTICS", qstatsextname,
18627 statsextinfo->dobj.namespace->dobj.name,
18628 statsextinfo->rolname,
18629 statsextinfo->dobj.catId, 0,
18630 statsextinfo->dobj.dumpId);
18631
18632 PQclear(res);
18635 destroyPQExpBuffer(query);
18637}
18638
18639/*
18640 * dumpStatisticsExtStats
18641 * write out to fout the stats for an extended statistics object
18642 */
18643static void
18645{
18646 DumpOptions *dopt = fout->dopt;
18647 PQExpBuffer query;
18648 PGresult *res;
18649 int nstats;
18650
18651 /* Do nothing if not dumping statistics */
18652 if (!dopt->dumpStatistics)
18653 return;
18654
18656 {
18658
18659 /*---------
18660 * Set up query for details about extended statistics objects.
18661 *
18662 * The query depends on the backend version:
18663 * - In v19 and newer versions, query directly the pg_stats_ext*
18664 * catalogs.
18665 * - In v18 and older versions, ndistinct and dependencies have a
18666 * different format that needs translation.
18667 * - In v14 and older versions, inherited does not exist.
18668 * - In v11 and older versions, there is no pg_stats_ext, hence
18669 * the logic joins pg_statistic_ext and pg_namespace.
18670 *---------
18671 */
18672
18674 "PREPARE getExtStatsStats(pg_catalog.name, pg_catalog.name) AS\n"
18675 "SELECT ");
18676
18677 /*
18678 * Versions 15 and newer have inherited stats.
18679 *
18680 * Create this column in all versions because we need to order by it
18681 * later.
18682 */
18683 if (fout->remoteVersion >= 150000)
18684 appendPQExpBufferStr(pq, "e.inherited, ");
18685 else
18686 appendPQExpBufferStr(pq, "false AS inherited, ");
18687
18688 /*--------
18689 * The ndistinct and dependencies formats changed in v19, so
18690 * everything before that needs to be translated.
18691 *
18692 * The ndistinct translation converts this kind of data:
18693 * {"3, 4": 11, "3, 6": 11, "4, 6": 11, "3, 4, 6": 11}
18694 *
18695 * to this:
18696 * [ {"attributes": [3,4], "ndistinct": 11},
18697 * {"attributes": [3,6], "ndistinct": 11},
18698 * {"attributes": [4,6], "ndistinct": 11},
18699 * {"attributes": [3,4,6], "ndistinct": 11} ]
18700 *
18701 * The dependencies translation converts this kind of data:
18702 * {"3 => 4": 1.000000, "3 => 6": 1.000000,
18703 * "4 => 6": 1.000000, "3, 4 => 6": 1.000000,
18704 * "3, 6 => 4": 1.000000}
18705 *
18706 * to this:
18707 * [ {"attributes": [3], "dependency": 4, "degree": 1.000000},
18708 * {"attributes": [3], "dependency": 6, "degree": 1.000000},
18709 * {"attributes": [4], "dependency": 6, "degree": 1.000000},
18710 * {"attributes": [3,4], "dependency": 6, "degree": 1.000000},
18711 * {"attributes": [3,6], "dependency": 4, "degree": 1.000000} ]
18712 *--------
18713 */
18714 if (fout->remoteVersion >= 190000)
18715 appendPQExpBufferStr(pq, "e.n_distinct, e.dependencies, ");
18716 else
18718 "( "
18719 "SELECT json_agg( "
18720 " json_build_object( "
18722 " string_to_array(kv.key, ', ')::integer[], "
18724 " kv.value::bigint )) "
18725 "FROM json_each_text(e.n_distinct::text::json) AS kv"
18726 ") AS n_distinct, "
18727 "( "
18728 "SELECT json_agg( "
18729 " json_build_object( "
18731 " string_to_array( "
18732 " split_part(kv.key, ' => ', 1), "
18733 " ', ')::integer[], "
18735 " split_part(kv.key, ' => ', 2)::integer, "
18737 " kv.value::double precision )) "
18738 "FROM json_each_text(e.dependencies::text::json) AS kv "
18739 ") AS dependencies, ");
18740
18741 /* MCV was introduced v13 */
18742 if (fout->remoteVersion >= 130000)
18744 "e.most_common_vals, e.most_common_freqs, "
18745 "e.most_common_base_freqs, ");
18746 else
18748 "NULL AS most_common_vals, NULL AS most_common_freqs, "
18749 "NULL AS most_common_base_freqs, ");
18750
18751 /* Expressions were introduced in v14 */
18752 if (fout->remoteVersion >= 140000)
18753 {
18754 /*
18755 * There is no ordering column in pg_stats_ext_exprs. However, we
18756 * can rely on the unnesting of pg_statistic.ext_data.stxdexpr to
18757 * maintain the desired order of expression elements.
18758 */
18760 "( "
18761 "SELECT jsonb_pretty(jsonb_agg("
18762 "nullif(j.obj, '{}'::jsonb))) "
18763 "FROM pg_stats_ext_exprs AS ee "
18764 "CROSS JOIN LATERAL jsonb_strip_nulls("
18765 " jsonb_build_object( "
18766 " 'null_frac', ee.null_frac::text, "
18767 " 'avg_width', ee.avg_width::text, "
18768 " 'n_distinct', ee.n_distinct::text, "
18769 " 'most_common_vals', ee.most_common_vals::text, "
18770 " 'most_common_freqs', ee.most_common_freqs::text, "
18771 " 'histogram_bounds', ee.histogram_bounds::text, "
18772 " 'correlation', ee.correlation::text, "
18773 " 'most_common_elems', ee.most_common_elems::text, "
18774 " 'most_common_elem_freqs', ee.most_common_elem_freqs::text, "
18775 " 'elem_count_histogram', ee.elem_count_histogram::text");
18776
18777 /* These three have been added to pg_stats_ext_exprs in v19. */
18778 if (fout->remoteVersion >= 190000)
18780 ", "
18781 " 'range_length_histogram', ee.range_length_histogram::text, "
18782 " 'range_empty_frac', ee.range_empty_frac::text, "
18783 " 'range_bounds_histogram', ee.range_bounds_histogram::text");
18784
18786 " )) AS j(obj)"
18787 "WHERE ee.statistics_schemaname = $1 "
18788 "AND ee.statistics_name = $2 ");
18789 /* Inherited expressions introduced in v15 */
18790 if (fout->remoteVersion >= 150000)
18791 appendPQExpBufferStr(pq, "AND ee.inherited = e.inherited");
18792
18793 appendPQExpBufferStr(pq, ") AS exprs ");
18794 }
18795 else
18796 appendPQExpBufferStr(pq, "NULL AS exprs ");
18797
18798 /* pg_stats_ext introduced in v12 */
18799 if (fout->remoteVersion >= 120000)
18801 "FROM pg_catalog.pg_stats_ext AS e "
18802 "WHERE e.statistics_schemaname = $1 "
18803 "AND e.statistics_name = $2 ");
18804 else
18806 "FROM ( "
18807 "SELECT s.stxndistinct AS n_distinct, "
18808 " s.stxdependencies AS dependencies "
18809 "FROM pg_catalog.pg_statistic_ext AS s "
18810 "JOIN pg_catalog.pg_namespace AS n "
18811 "ON n.oid = s.stxnamespace "
18812 "WHERE n.nspname = $1 "
18813 "AND s.stxname = $2 "
18814 ") AS e ");
18815
18816 /* we always have an inherited column, but it may be a constant */
18817 appendPQExpBufferStr(pq, "ORDER BY inherited");
18818
18819 ExecuteSqlStatement(fout, pq->data);
18820
18822
18824 }
18825
18826 query = createPQExpBuffer();
18827
18828 appendPQExpBufferStr(query, "EXECUTE getExtStatsStats(");
18829 appendStringLiteralAH(query, statsextinfo->dobj.namespace->dobj.name, fout);
18830 appendPQExpBufferStr(query, "::pg_catalog.name, ");
18831 appendStringLiteralAH(query, statsextinfo->dobj.name, fout);
18832 appendPQExpBufferStr(query, "::pg_catalog.name)");
18833
18834 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
18835
18836 destroyPQExpBuffer(query);
18837
18838 nstats = PQntuples(res);
18839
18840 if (nstats > 0)
18841 {
18843
18844 int i_inherited = PQfnumber(res, "inherited");
18845 int i_ndistinct = PQfnumber(res, "n_distinct");
18846 int i_dependencies = PQfnumber(res, "dependencies");
18847 int i_mcv = PQfnumber(res, "most_common_vals");
18848 int i_mcf = PQfnumber(res, "most_common_freqs");
18849 int i_mcbf = PQfnumber(res, "most_common_base_freqs");
18850 int i_exprs = PQfnumber(res, "exprs");
18851
18852 for (int i = 0; i < nstats; i++)
18853 {
18854 TableInfo *tbinfo = statsextinfo->stattable;
18855
18856 if (PQgetisnull(res, i, i_inherited))
18857 pg_fatal("inherited cannot be NULL");
18858
18860 "SELECT * FROM pg_catalog.pg_restore_extended_stats(\n");
18861 appendPQExpBuffer(out, "\t'version', '%d'::integer,\n",
18863
18864 /* Relation information */
18865 appendPQExpBufferStr(out, "\t'schemaname', ");
18866 appendStringLiteralAH(out, tbinfo->dobj.namespace->dobj.name, fout);
18867 appendPQExpBufferStr(out, ",\n\t'relname', ");
18868 appendStringLiteralAH(out, tbinfo->dobj.name, fout);
18869
18870 /* Extended statistics information */
18871 appendPQExpBufferStr(out, ",\n\t'statistics_schemaname', ");
18872 appendStringLiteralAH(out, statsextinfo->dobj.namespace->dobj.name, fout);
18873 appendPQExpBufferStr(out, ",\n\t'statistics_name', ");
18874 appendStringLiteralAH(out, statsextinfo->dobj.name, fout);
18875 appendNamedArgument(out, fout, "inherited", "boolean",
18876 PQgetvalue(res, i, i_inherited));
18877
18878 if (!PQgetisnull(res, i, i_ndistinct))
18879 appendNamedArgument(out, fout, "n_distinct", "pg_ndistinct",
18880 PQgetvalue(res, i, i_ndistinct));
18881
18882 if (!PQgetisnull(res, i, i_dependencies))
18883 appendNamedArgument(out, fout, "dependencies", "pg_dependencies",
18884 PQgetvalue(res, i, i_dependencies));
18885
18886 if (!PQgetisnull(res, i, i_mcv))
18887 appendNamedArgument(out, fout, "most_common_vals", "text[]",
18888 PQgetvalue(res, i, i_mcv));
18889
18890 if (!PQgetisnull(res, i, i_mcf))
18891 appendNamedArgument(out, fout, "most_common_freqs", "double precision[]",
18892 PQgetvalue(res, i, i_mcf));
18893
18894 if (!PQgetisnull(res, i, i_mcbf))
18895 appendNamedArgument(out, fout, "most_common_base_freqs", "double precision[]",
18896 PQgetvalue(res, i, i_mcbf));
18897
18898 if (!PQgetisnull(res, i, i_exprs))
18899 appendNamedArgument(out, fout, "exprs", "jsonb",
18900 PQgetvalue(res, i, i_exprs));
18901
18902 appendPQExpBufferStr(out, "\n);\n");
18903 }
18904
18906 ARCHIVE_OPTS(.tag = statsextinfo->dobj.name,
18907 .namespace = statsextinfo->dobj.namespace->dobj.name,
18908 .owner = statsextinfo->rolname,
18909 .description = "EXTENDED STATISTICS DATA",
18910 .section = SECTION_POST_DATA,
18911 .createStmt = out->data,
18912 .deps = &statsextinfo->dobj.dumpId,
18913 .nDeps = 1));
18914 destroyPQExpBuffer(out);
18915 }
18916 PQclear(res);
18917}
18918
18919/*
18920 * dumpConstraint
18921 * write out to fout a user-defined constraint
18922 */
18923static void
18925{
18926 DumpOptions *dopt = fout->dopt;
18927 TableInfo *tbinfo = coninfo->contable;
18928 PQExpBuffer q;
18930 char *tag = NULL;
18931 char *foreign;
18932
18933 /* Do nothing if not dumping schema */
18934 if (!dopt->dumpSchema)
18935 return;
18936
18937 q = createPQExpBuffer();
18939
18940 foreign = tbinfo &&
18941 tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "";
18942
18943 if (coninfo->contype == 'p' ||
18944 coninfo->contype == 'u' ||
18945 coninfo->contype == 'x')
18946 {
18947 /* Index-related constraint */
18949 int k;
18950
18952
18953 if (indxinfo == NULL)
18954 pg_fatal("missing index for constraint \"%s\"",
18955 coninfo->dobj.name);
18956
18957 if (dopt->binary_upgrade)
18959 indxinfo->dobj.catId.oid);
18960
18961 appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s\n", foreign,
18963 appendPQExpBuffer(q, " ADD CONSTRAINT %s ",
18964 fmtId(coninfo->dobj.name));
18965
18966 if (coninfo->condef)
18967 {
18968 /* pg_get_constraintdef should have provided everything */
18969 appendPQExpBuffer(q, "%s;\n", coninfo->condef);
18970 }
18971 else
18972 {
18974 coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
18975
18976 /*
18977 * PRIMARY KEY constraints should not be using NULLS NOT DISTINCT
18978 * indexes. Being able to create this was fixed, but we need to
18979 * make the index distinct in order to be able to restore the
18980 * dump.
18981 */
18982 if (indxinfo->indnullsnotdistinct && coninfo->contype != 'p')
18983 appendPQExpBufferStr(q, " NULLS NOT DISTINCT");
18984 appendPQExpBufferStr(q, " (");
18985 for (k = 0; k < indxinfo->indnkeyattrs; k++)
18986 {
18987 int indkey = (int) indxinfo->indkeys[k];
18988 const char *attname;
18989
18991 break;
18993
18994 appendPQExpBuffer(q, "%s%s",
18995 (k == 0) ? "" : ", ",
18996 fmtId(attname));
18997 }
18998 if (coninfo->conperiod)
18999 appendPQExpBufferStr(q, " WITHOUT OVERLAPS");
19000
19001 if (indxinfo->indnkeyattrs < indxinfo->indnattrs)
19002 appendPQExpBufferStr(q, ") INCLUDE (");
19003
19004 for (k = indxinfo->indnkeyattrs; k < indxinfo->indnattrs; k++)
19005 {
19006 int indkey = (int) indxinfo->indkeys[k];
19007 const char *attname;
19008
19010 break;
19012
19013 appendPQExpBuffer(q, "%s%s",
19014 (k == indxinfo->indnkeyattrs) ? "" : ", ",
19015 fmtId(attname));
19016 }
19017
19018 appendPQExpBufferChar(q, ')');
19019
19020 if (nonemptyReloptions(indxinfo->indreloptions))
19021 {
19022 appendPQExpBufferStr(q, " WITH (");
19023 appendReloptionsArrayAH(q, indxinfo->indreloptions, "", fout);
19024 appendPQExpBufferChar(q, ')');
19025 }
19026
19027 if (coninfo->condeferrable)
19028 {
19029 appendPQExpBufferStr(q, " DEFERRABLE");
19030 if (coninfo->condeferred)
19031 appendPQExpBufferStr(q, " INITIALLY DEFERRED");
19032 }
19033
19034 appendPQExpBufferStr(q, ";\n");
19035 }
19036
19037 /*
19038 * Append ALTER TABLE commands as needed to set properties that we
19039 * only have ALTER TABLE syntax for. Keep this in sync with the
19040 * similar code in dumpIndex!
19041 */
19042
19043 /* If the index is clustered, we need to record that. */
19044 if (indxinfo->indisclustered)
19045 {
19046 appendPQExpBuffer(q, "\nALTER TABLE %s CLUSTER",
19048 /* index name is not qualified in this syntax */
19049 appendPQExpBuffer(q, " ON %s;\n",
19050 fmtId(indxinfo->dobj.name));
19051 }
19052
19053 /* If the index defines identity, we need to record that. */
19054 if (indxinfo->indisreplident)
19055 {
19056 appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY USING",
19058 /* index name is not qualified in this syntax */
19059 appendPQExpBuffer(q, " INDEX %s;\n",
19060 fmtId(indxinfo->dobj.name));
19061 }
19062
19063 /* Indexes can depend on extensions */
19065 "pg_catalog.pg_class", "INDEX",
19067
19068 appendPQExpBuffer(delq, "ALTER %sTABLE ONLY %s ", foreign,
19070 appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
19071 fmtId(coninfo->dobj.name));
19072
19073 tag = psprintf("%s %s", tbinfo->dobj.name, coninfo->dobj.name);
19074
19075 if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19076 ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
19077 ARCHIVE_OPTS(.tag = tag,
19078 .namespace = tbinfo->dobj.namespace->dobj.name,
19079 .tablespace = indxinfo->tablespace,
19080 .owner = tbinfo->rolname,
19081 .description = "CONSTRAINT",
19082 .section = SECTION_POST_DATA,
19083 .createStmt = q->data,
19084 .dropStmt = delq->data));
19085 }
19086 else if (coninfo->contype == 'f')
19087 {
19088 char *only;
19089
19090 /*
19091 * Foreign keys on partitioned tables are always declared as
19092 * inheriting to partitions; for all other cases, emit them as
19093 * applying ONLY directly to the named table, because that's how they
19094 * work for regular inherited tables.
19095 */
19096 only = tbinfo->relkind == RELKIND_PARTITIONED_TABLE ? "" : "ONLY ";
19097
19098 /*
19099 * XXX Potentially wrap in a 'SET CONSTRAINTS OFF' block so that the
19100 * current table data is not processed
19101 */
19102 appendPQExpBuffer(q, "ALTER %sTABLE %s%s\n", foreign,
19104 appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
19105 fmtId(coninfo->dobj.name),
19106 coninfo->condef);
19107
19108 appendPQExpBuffer(delq, "ALTER %sTABLE %s%s ", foreign,
19110 appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
19111 fmtId(coninfo->dobj.name));
19112
19113 tag = psprintf("%s %s", tbinfo->dobj.name, coninfo->dobj.name);
19114
19115 if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19116 ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
19117 ARCHIVE_OPTS(.tag = tag,
19118 .namespace = tbinfo->dobj.namespace->dobj.name,
19119 .owner = tbinfo->rolname,
19120 .description = "FK CONSTRAINT",
19121 .section = SECTION_POST_DATA,
19122 .createStmt = q->data,
19123 .dropStmt = delq->data));
19124 }
19125 else if ((coninfo->contype == 'c' || coninfo->contype == 'n') && tbinfo)
19126 {
19127 /* CHECK or invalid not-null constraint on a table */
19128
19129 /* Ignore if not to be dumped separately, or if it was inherited */
19130 if (coninfo->separate && coninfo->conislocal)
19131 {
19132 const char *keyword;
19133
19134 if (coninfo->contype == 'c')
19135 keyword = "CHECK CONSTRAINT";
19136 else
19137 keyword = "CONSTRAINT";
19138
19139 /* not ONLY since we want it to propagate to children */
19140 appendPQExpBuffer(q, "ALTER %sTABLE %s\n", foreign,
19142 appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
19143 fmtId(coninfo->dobj.name),
19144 coninfo->condef);
19145
19146 appendPQExpBuffer(delq, "ALTER %sTABLE %s ", foreign,
19148 appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
19149 fmtId(coninfo->dobj.name));
19150
19151 tag = psprintf("%s %s", tbinfo->dobj.name, coninfo->dobj.name);
19152
19153 if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19154 ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
19155 ARCHIVE_OPTS(.tag = tag,
19156 .namespace = tbinfo->dobj.namespace->dobj.name,
19157 .owner = tbinfo->rolname,
19158 .description = keyword,
19159 .section = SECTION_POST_DATA,
19160 .createStmt = q->data,
19161 .dropStmt = delq->data));
19162 }
19163 }
19164 else if (tbinfo == NULL)
19165 {
19166 /* CHECK, NOT NULL constraint on a domain */
19167 TypeInfo *tyinfo = coninfo->condomain;
19168
19169 Assert(coninfo->contype == 'c' || coninfo->contype == 'n');
19170
19171 /* Ignore if not to be dumped separately */
19172 if (coninfo->separate)
19173 {
19174 const char *keyword;
19175
19176 if (coninfo->contype == 'c')
19177 keyword = "CHECK CONSTRAINT";
19178 else
19179 keyword = "CONSTRAINT";
19180
19181 appendPQExpBuffer(q, "ALTER DOMAIN %s\n",
19183 appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
19184 fmtId(coninfo->dobj.name),
19185 coninfo->condef);
19186
19187 appendPQExpBuffer(delq, "ALTER DOMAIN %s ",
19189 appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
19190 fmtId(coninfo->dobj.name));
19191
19192 tag = psprintf("%s %s", tyinfo->dobj.name, coninfo->dobj.name);
19193
19194 if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19195 ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
19196 ARCHIVE_OPTS(.tag = tag,
19197 .namespace = tyinfo->dobj.namespace->dobj.name,
19198 .owner = tyinfo->rolname,
19199 .description = keyword,
19200 .section = SECTION_POST_DATA,
19201 .createStmt = q->data,
19202 .dropStmt = delq->data));
19203
19204 if (coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19205 {
19207 char *qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
19208
19209 appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
19210 fmtId(coninfo->dobj.name));
19211
19213 tyinfo->dobj.namespace->dobj.name,
19214 tyinfo->rolname,
19215 coninfo->dobj.catId, 0, coninfo->dobj.dumpId);
19217 free(qtypname);
19218 }
19219 }
19220 }
19221 else
19222 {
19223 pg_fatal("unrecognized constraint type: %c",
19224 coninfo->contype);
19225 }
19226
19227 /* Dump Constraint Comments --- only works for table constraints */
19228 if (tbinfo && coninfo->separate &&
19229 coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19231
19232 free(tag);
19235}
19236
19237/*
19238 * dumpTableConstraintComment --- dump a constraint's comment if any
19239 *
19240 * This is split out because we need the function in two different places
19241 * depending on whether the constraint is dumped as part of CREATE TABLE
19242 * or as a separate ALTER command.
19243 */
19244static void
19246{
19247 TableInfo *tbinfo = coninfo->contable;
19249 char *qtabname;
19250
19251 qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
19252
19253 appendPQExpBuffer(conprefix, "CONSTRAINT %s ON",
19254 fmtId(coninfo->dobj.name));
19255
19256 if (coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19258 tbinfo->dobj.namespace->dobj.name,
19259 tbinfo->rolname,
19260 coninfo->dobj.catId, 0,
19261 coninfo->separate ? coninfo->dobj.dumpId : tbinfo->dobj.dumpId);
19262
19264 free(qtabname);
19265}
19266
19267static inline SeqType
19269{
19270 for (int i = 0; i < lengthof(SeqTypeNames); i++)
19271 {
19272 if (strcmp(SeqTypeNames[i], name) == 0)
19273 return (SeqType) i;
19274 }
19275
19276 pg_fatal("unrecognized sequence type: %s", name);
19277 return (SeqType) 0; /* keep compiler quiet */
19278}
19279
19280/*
19281 * bsearch() comparator for SequenceItem
19282 */
19283static int
19284SequenceItemCmp(const void *p1, const void *p2)
19285{
19286 SequenceItem v1 = *((const SequenceItem *) p1);
19287 SequenceItem v2 = *((const SequenceItem *) p2);
19288
19289 return pg_cmp_u32(v1.oid, v2.oid);
19290}
19291
19292/*
19293 * collectSequences
19294 *
19295 * Construct a table of sequence information. This table is sorted by OID for
19296 * speed in lookup.
19297 */
19298static void
19300{
19301 PGresult *res;
19302 const char *query;
19303
19304 /*
19305 * Before Postgres 10, sequence metadata is in the sequence itself. With
19306 * some extra effort, we might be able to use the sorted table for those
19307 * versions, but for now it seems unlikely to be worth it.
19308 *
19309 * Since version 18, we can gather the sequence data in this query with
19310 * pg_get_sequence_data(), but we only do so for non-schema-only dumps.
19311 */
19312 if (fout->remoteVersion < 100000)
19313 return;
19314 else if (fout->remoteVersion < 180000 ||
19316 query = "SELECT seqrelid, format_type(seqtypid, NULL), "
19317 "seqstart, seqincrement, "
19318 "seqmax, seqmin, "
19319 "seqcache, seqcycle, "
19320 "NULL, 'f' "
19321 "FROM pg_catalog.pg_sequence "
19322 "ORDER BY seqrelid";
19323 else
19324 query = "SELECT seqrelid, format_type(seqtypid, NULL), "
19325 "seqstart, seqincrement, "
19326 "seqmax, seqmin, "
19327 "seqcache, seqcycle, "
19328 "last_value, is_called "
19329 "FROM pg_catalog.pg_sequence, "
19330 "pg_get_sequence_data(seqrelid) "
19331 "ORDER BY seqrelid;";
19332
19333 res = ExecuteSqlQuery(fout, query, PGRES_TUPLES_OK);
19334
19335 nsequences = PQntuples(res);
19337
19338 for (int i = 0; i < nsequences; i++)
19339 {
19340 sequences[i].oid = atooid(PQgetvalue(res, i, 0));
19342 sequences[i].startv = strtoi64(PQgetvalue(res, i, 2), NULL, 10);
19343 sequences[i].incby = strtoi64(PQgetvalue(res, i, 3), NULL, 10);
19344 sequences[i].maxv = strtoi64(PQgetvalue(res, i, 4), NULL, 10);
19345 sequences[i].minv = strtoi64(PQgetvalue(res, i, 5), NULL, 10);
19346 sequences[i].cache = strtoi64(PQgetvalue(res, i, 6), NULL, 10);
19347 sequences[i].cycled = (strcmp(PQgetvalue(res, i, 7), "t") == 0);
19348 sequences[i].last_value = strtoi64(PQgetvalue(res, i, 8), NULL, 10);
19349 sequences[i].is_called = (strcmp(PQgetvalue(res, i, 9), "t") == 0);
19350 sequences[i].null_seqtuple = (PQgetisnull(res, i, 8) || PQgetisnull(res, i, 9));
19351 }
19352
19353 PQclear(res);
19354}
19355
19356/*
19357 * dumpSequence
19358 * write the declaration (not data) of one user-defined sequence
19359 */
19360static void
19362{
19363 DumpOptions *dopt = fout->dopt;
19365 bool is_ascending;
19370 char *qseqname;
19371 TableInfo *owning_tab = NULL;
19372
19373 qseqname = pg_strdup(fmtId(tbinfo->dobj.name));
19374
19375 /*
19376 * For versions >= 10, the sequence information is gathered in a sorted
19377 * table before any calls to dumpSequence(). See collectSequences() for
19378 * more information.
19379 */
19380 if (fout->remoteVersion >= 100000)
19381 {
19382 SequenceItem key = {0};
19383
19385
19386 key.oid = tbinfo->dobj.catId.oid;
19388 sizeof(SequenceItem), SequenceItemCmp);
19389 }
19390 else
19391 {
19392 PGresult *res;
19393
19394 /*
19395 * Before PostgreSQL 10, sequence metadata is in the sequence itself.
19396 *
19397 * Note: it might seem that 'bigint' potentially needs to be
19398 * schema-qualified, but actually that's a keyword.
19399 */
19400 appendPQExpBuffer(query,
19401 "SELECT 'bigint' AS sequence_type, "
19402 "start_value, increment_by, max_value, min_value, "
19403 "cache_value, is_cycled FROM %s",
19405
19406 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19407
19408 if (PQntuples(res) != 1)
19409 pg_fatal(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
19410 "query to get data of sequence \"%s\" returned %d rows (expected 1)",
19411 PQntuples(res)),
19412 tbinfo->dobj.name, PQntuples(res));
19413
19415 seq->seqtype = parse_sequence_type(PQgetvalue(res, 0, 0));
19416 seq->startv = strtoi64(PQgetvalue(res, 0, 1), NULL, 10);
19417 seq->incby = strtoi64(PQgetvalue(res, 0, 2), NULL, 10);
19418 seq->maxv = strtoi64(PQgetvalue(res, 0, 3), NULL, 10);
19419 seq->minv = strtoi64(PQgetvalue(res, 0, 4), NULL, 10);
19420 seq->cache = strtoi64(PQgetvalue(res, 0, 5), NULL, 10);
19421 seq->cycled = (strcmp(PQgetvalue(res, 0, 6), "t") == 0);
19422
19423 PQclear(res);
19424 }
19425
19426 /* Calculate default limits for a sequence of this type */
19427 is_ascending = (seq->incby >= 0);
19428 if (seq->seqtype == SEQTYPE_SMALLINT)
19429 {
19432 }
19433 else if (seq->seqtype == SEQTYPE_INTEGER)
19434 {
19437 }
19438 else if (seq->seqtype == SEQTYPE_BIGINT)
19439 {
19442 }
19443 else
19444 {
19445 pg_fatal("unrecognized sequence type: %d", seq->seqtype);
19446 default_minv = default_maxv = 0; /* keep compiler quiet */
19447 }
19448
19449 /*
19450 * Identity sequences are not to be dropped separately.
19451 */
19452 if (!tbinfo->is_identity_sequence)
19453 {
19454 appendPQExpBuffer(delqry, "DROP SEQUENCE %s;\n",
19456 }
19457
19458 resetPQExpBuffer(query);
19459
19460 if (dopt->binary_upgrade)
19461 {
19463 tbinfo->dobj.catId.oid);
19464
19465 /*
19466 * In older PG versions a sequence will have a pg_type entry, but v14
19467 * and up don't use that, so don't attempt to preserve the type OID.
19468 */
19469 }
19470
19471 if (tbinfo->is_identity_sequence)
19472 {
19473 owning_tab = findTableByOid(tbinfo->owning_tab);
19474
19475 appendPQExpBuffer(query,
19476 "ALTER TABLE %s ",
19477 fmtQualifiedDumpable(owning_tab));
19478 appendPQExpBuffer(query,
19479 "ALTER COLUMN %s ADD GENERATED ",
19480 fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
19481 if (owning_tab->attidentity[tbinfo->owning_col - 1] == ATTRIBUTE_IDENTITY_ALWAYS)
19482 appendPQExpBufferStr(query, "ALWAYS");
19483 else if (owning_tab->attidentity[tbinfo->owning_col - 1] == ATTRIBUTE_IDENTITY_BY_DEFAULT)
19484 appendPQExpBufferStr(query, "BY DEFAULT");
19485 appendPQExpBuffer(query, " AS IDENTITY (\n SEQUENCE NAME %s\n",
19487
19488 /*
19489 * Emit persistence option only if it's different from the owning
19490 * table's. This avoids using this new syntax unnecessarily.
19491 */
19492 if (tbinfo->relpersistence != owning_tab->relpersistence)
19493 appendPQExpBuffer(query, " %s\n",
19494 tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED ?
19495 "UNLOGGED" : "LOGGED");
19496 }
19497 else
19498 {
19499 appendPQExpBuffer(query,
19500 "CREATE %sSEQUENCE %s\n",
19501 tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED ?
19502 "UNLOGGED " : "",
19504
19505 if (seq->seqtype != SEQTYPE_BIGINT)
19506 appendPQExpBuffer(query, " AS %s\n", SeqTypeNames[seq->seqtype]);
19507 }
19508
19509 appendPQExpBuffer(query, " START WITH " INT64_FORMAT "\n", seq->startv);
19510
19511 appendPQExpBuffer(query, " INCREMENT BY " INT64_FORMAT "\n", seq->incby);
19512
19513 if (seq->minv != default_minv)
19514 appendPQExpBuffer(query, " MINVALUE " INT64_FORMAT "\n", seq->minv);
19515 else
19516 appendPQExpBufferStr(query, " NO MINVALUE\n");
19517
19518 if (seq->maxv != default_maxv)
19519 appendPQExpBuffer(query, " MAXVALUE " INT64_FORMAT "\n", seq->maxv);
19520 else
19521 appendPQExpBufferStr(query, " NO MAXVALUE\n");
19522
19523 appendPQExpBuffer(query,
19524 " CACHE " INT64_FORMAT "%s",
19525 seq->cache, (seq->cycled ? "\n CYCLE" : ""));
19526
19527 if (tbinfo->is_identity_sequence)
19528 appendPQExpBufferStr(query, "\n);\n");
19529 else
19530 appendPQExpBufferStr(query, ";\n");
19531
19532 /* binary_upgrade: no need to clear TOAST table oid */
19533
19534 if (dopt->binary_upgrade)
19536 "SEQUENCE", qseqname,
19537 tbinfo->dobj.namespace->dobj.name);
19538
19539 if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19540 ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
19541 ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
19542 .namespace = tbinfo->dobj.namespace->dobj.name,
19543 .owner = tbinfo->rolname,
19544 .description = "SEQUENCE",
19545 .section = SECTION_PRE_DATA,
19546 .createStmt = query->data,
19547 .dropStmt = delqry->data));
19548
19549 /*
19550 * If the sequence is owned by a table column, emit the ALTER for it as a
19551 * separate TOC entry immediately following the sequence's own entry. It's
19552 * OK to do this rather than using full sorting logic, because the
19553 * dependency that tells us it's owned will have forced the table to be
19554 * created first. We can't just include the ALTER in the TOC entry
19555 * because it will fail if we haven't reassigned the sequence owner to
19556 * match the table's owner.
19557 *
19558 * We need not schema-qualify the table reference because both sequence
19559 * and table must be in the same schema.
19560 */
19561 if (OidIsValid(tbinfo->owning_tab) && !tbinfo->is_identity_sequence)
19562 {
19563 owning_tab = findTableByOid(tbinfo->owning_tab);
19564
19565 if (owning_tab == NULL)
19566 pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
19567 tbinfo->owning_tab, tbinfo->dobj.catId.oid);
19568
19569 if (owning_tab->dobj.dump & DUMP_COMPONENT_DEFINITION)
19570 {
19571 resetPQExpBuffer(query);
19572 appendPQExpBuffer(query, "ALTER SEQUENCE %s",
19574 appendPQExpBuffer(query, " OWNED BY %s",
19575 fmtQualifiedDumpable(owning_tab));
19576 appendPQExpBuffer(query, ".%s;\n",
19577 fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
19578
19579 if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19581 ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
19582 .namespace = tbinfo->dobj.namespace->dobj.name,
19583 .owner = tbinfo->rolname,
19584 .description = "SEQUENCE OWNED BY",
19585 .section = SECTION_PRE_DATA,
19586 .createStmt = query->data,
19587 .deps = &(tbinfo->dobj.dumpId),
19588 .nDeps = 1));
19589 }
19590 }
19591
19592 /* Dump Sequence Comments and Security Labels */
19593 if (tbinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19594 dumpComment(fout, "SEQUENCE", qseqname,
19595 tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
19596 tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId);
19597
19598 if (tbinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
19599 dumpSecLabel(fout, "SEQUENCE", qseqname,
19600 tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
19601 tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId);
19602
19603 if (fout->remoteVersion < 100000)
19604 pg_free(seq);
19605 destroyPQExpBuffer(query);
19607 free(qseqname);
19608}
19609
19610/*
19611 * dumpSequenceData
19612 * write the data of one user-defined sequence
19613 */
19614static void
19616{
19617 TableInfo *tbinfo = tdinfo->tdtable;
19618 int64 last;
19619 bool called;
19620 PQExpBuffer query;
19621
19622 /* needn't bother if not dumping sequence data */
19623 if (!fout->dopt->dumpData && !fout->dopt->sequence_data)
19624 return;
19625
19626 query = createPQExpBuffer();
19627
19628 /*
19629 * For versions >= 18, the sequence information is gathered in the sorted
19630 * array before any calls to dumpSequenceData(). See collectSequences()
19631 * for more information.
19632 *
19633 * For older versions, we have to query the sequence relations
19634 * individually.
19635 */
19636 if (fout->remoteVersion < 180000)
19637 {
19638 PGresult *res;
19639
19640 appendPQExpBuffer(query,
19641 "SELECT last_value, is_called FROM %s",
19643
19644 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19645
19646 if (PQntuples(res) != 1)
19647 pg_fatal(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
19648 "query to get data of sequence \"%s\" returned %d rows (expected 1)",
19649 PQntuples(res)),
19650 tbinfo->dobj.name, PQntuples(res));
19651
19652 last = strtoi64(PQgetvalue(res, 0, 0), NULL, 10);
19653 called = (strcmp(PQgetvalue(res, 0, 1), "t") == 0);
19654
19655 PQclear(res);
19656 }
19657 else
19658 {
19659 SequenceItem key = {0};
19660 SequenceItem *entry;
19661
19663 Assert(tbinfo->dobj.catId.oid);
19664
19665 key.oid = tbinfo->dobj.catId.oid;
19666 entry = bsearch(&key, sequences, nsequences,
19667 sizeof(SequenceItem), SequenceItemCmp);
19668
19669 if (entry->null_seqtuple)
19670 pg_fatal("failed to get data for sequence \"%s\"; user may lack "
19671 "SELECT privilege on the sequence or the sequence may "
19672 "have been concurrently dropped",
19673 tbinfo->dobj.name);
19674
19675 last = entry->last_value;
19676 called = entry->is_called;
19677 }
19678
19679 resetPQExpBuffer(query);
19680 appendPQExpBufferStr(query, "SELECT pg_catalog.setval(");
19682 appendPQExpBuffer(query, ", " INT64_FORMAT ", %s);\n",
19683 last, (called ? "true" : "false"));
19684
19685 if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
19687 ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
19688 .namespace = tbinfo->dobj.namespace->dobj.name,
19689 .owner = tbinfo->rolname,
19690 .description = "SEQUENCE SET",
19691 .section = SECTION_DATA,
19692 .createStmt = query->data,
19693 .deps = &(tbinfo->dobj.dumpId),
19694 .nDeps = 1));
19695
19696 destroyPQExpBuffer(query);
19697}
19698
19699/*
19700 * dumpTrigger
19701 * write the declaration of one user-defined table trigger
19702 */
19703static void
19705{
19706 DumpOptions *dopt = fout->dopt;
19707 TableInfo *tbinfo = tginfo->tgtable;
19708 PQExpBuffer query;
19712 char *qtabname;
19713 char *tag;
19714
19715 /* Do nothing if not dumping schema */
19716 if (!dopt->dumpSchema)
19717 return;
19718
19719 query = createPQExpBuffer();
19723
19724 qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
19725
19726 appendPQExpBuffer(trigidentity, "%s ", fmtId(tginfo->dobj.name));
19728
19729 appendPQExpBuffer(query, "%s;\n", tginfo->tgdef);
19730 appendPQExpBuffer(delqry, "DROP TRIGGER %s;\n", trigidentity->data);
19731
19732 /* Triggers can depend on extensions */
19734 "pg_catalog.pg_trigger", "TRIGGER",
19736
19737 if (tginfo->tgispartition)
19738 {
19739 Assert(tbinfo->ispartition);
19740
19741 /*
19742 * Partition triggers only appear here because their 'tgenabled' flag
19743 * differs from its parent's. The trigger is created already, so
19744 * remove the CREATE and replace it with an ALTER. (Clear out the
19745 * DROP query too, so that pg_dump --create does not cause errors.)
19746 */
19747 resetPQExpBuffer(query);
19749 appendPQExpBuffer(query, "\nALTER %sTABLE %s ",
19750 tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "",
19752 switch (tginfo->tgenabled)
19753 {
19754 case 'f':
19755 case 'D':
19756 appendPQExpBufferStr(query, "DISABLE");
19757 break;
19758 case 't':
19759 case 'O':
19760 appendPQExpBufferStr(query, "ENABLE");
19761 break;
19762 case 'R':
19763 appendPQExpBufferStr(query, "ENABLE REPLICA");
19764 break;
19765 case 'A':
19766 appendPQExpBufferStr(query, "ENABLE ALWAYS");
19767 break;
19768 }
19769 appendPQExpBuffer(query, " TRIGGER %s;\n",
19770 fmtId(tginfo->dobj.name));
19771 }
19772 else if (tginfo->tgenabled != 't' && tginfo->tgenabled != 'O')
19773 {
19774 appendPQExpBuffer(query, "\nALTER %sTABLE %s ",
19775 tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "",
19777 switch (tginfo->tgenabled)
19778 {
19779 case 'D':
19780 case 'f':
19781 appendPQExpBufferStr(query, "DISABLE");
19782 break;
19783 case 'A':
19784 appendPQExpBufferStr(query, "ENABLE ALWAYS");
19785 break;
19786 case 'R':
19787 appendPQExpBufferStr(query, "ENABLE REPLICA");
19788 break;
19789 default:
19790 appendPQExpBufferStr(query, "ENABLE");
19791 break;
19792 }
19793 appendPQExpBuffer(query, " TRIGGER %s;\n",
19794 fmtId(tginfo->dobj.name));
19795 }
19796
19797 appendPQExpBuffer(trigprefix, "TRIGGER %s ON",
19798 fmtId(tginfo->dobj.name));
19799
19800 tag = psprintf("%s %s", tbinfo->dobj.name, tginfo->dobj.name);
19801
19802 if (tginfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19803 ArchiveEntry(fout, tginfo->dobj.catId, tginfo->dobj.dumpId,
19804 ARCHIVE_OPTS(.tag = tag,
19805 .namespace = tbinfo->dobj.namespace->dobj.name,
19806 .owner = tbinfo->rolname,
19807 .description = "TRIGGER",
19808 .section = SECTION_POST_DATA,
19809 .createStmt = query->data,
19810 .dropStmt = delqry->data));
19811
19812 if (tginfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19814 tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
19815 tginfo->dobj.catId, 0, tginfo->dobj.dumpId);
19816
19817 free(tag);
19818 destroyPQExpBuffer(query);
19822 free(qtabname);
19823}
19824
19825/*
19826 * dumpEventTrigger
19827 * write the declaration of one user-defined event trigger
19828 */
19829static void
19831{
19832 DumpOptions *dopt = fout->dopt;
19833 PQExpBuffer query;
19835 char *qevtname;
19836
19837 /* Do nothing if not dumping schema */
19838 if (!dopt->dumpSchema)
19839 return;
19840
19841 query = createPQExpBuffer();
19843
19844 qevtname = pg_strdup(fmtId(evtinfo->dobj.name));
19845
19846 appendPQExpBufferStr(query, "CREATE EVENT TRIGGER ");
19848 appendPQExpBufferStr(query, " ON ");
19849 appendPQExpBufferStr(query, fmtId(evtinfo->evtevent));
19850
19851 if (strcmp("", evtinfo->evttags) != 0)
19852 {
19853 appendPQExpBufferStr(query, "\n WHEN TAG IN (");
19854 appendPQExpBufferStr(query, evtinfo->evttags);
19855 appendPQExpBufferChar(query, ')');
19856 }
19857
19858 appendPQExpBufferStr(query, "\n EXECUTE FUNCTION ");
19859 appendPQExpBufferStr(query, evtinfo->evtfname);
19860 appendPQExpBufferStr(query, "();\n");
19861
19862 if (evtinfo->evtenabled != 'O')
19863 {
19864 appendPQExpBuffer(query, "\nALTER EVENT TRIGGER %s ",
19865 qevtname);
19866 switch (evtinfo->evtenabled)
19867 {
19868 case 'D':
19869 appendPQExpBufferStr(query, "DISABLE");
19870 break;
19871 case 'A':
19872 appendPQExpBufferStr(query, "ENABLE ALWAYS");
19873 break;
19874 case 'R':
19875 appendPQExpBufferStr(query, "ENABLE REPLICA");
19876 break;
19877 default:
19878 appendPQExpBufferStr(query, "ENABLE");
19879 break;
19880 }
19881 appendPQExpBufferStr(query, ";\n");
19882 }
19883
19884 appendPQExpBuffer(delqry, "DROP EVENT TRIGGER %s;\n",
19885 qevtname);
19886
19887 if (dopt->binary_upgrade)
19889 "EVENT TRIGGER", qevtname, NULL);
19890
19891 if (evtinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19892 ArchiveEntry(fout, evtinfo->dobj.catId, evtinfo->dobj.dumpId,
19893 ARCHIVE_OPTS(.tag = evtinfo->dobj.name,
19894 .owner = evtinfo->evtowner,
19895 .description = "EVENT TRIGGER",
19896 .section = SECTION_POST_DATA,
19897 .createStmt = query->data,
19898 .dropStmt = delqry->data));
19899
19900 if (evtinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19901 dumpComment(fout, "EVENT TRIGGER", qevtname,
19902 NULL, evtinfo->evtowner,
19903 evtinfo->dobj.catId, 0, evtinfo->dobj.dumpId);
19904
19905 if (evtinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
19906 dumpSecLabel(fout, "EVENT TRIGGER", qevtname,
19907 NULL, evtinfo->evtowner,
19908 evtinfo->dobj.catId, 0, evtinfo->dobj.dumpId);
19909
19910 destroyPQExpBuffer(query);
19912 free(qevtname);
19913}
19914
19915/*
19916 * dumpRule
19917 * Dump a rule
19918 */
19919static void
19921{
19922 DumpOptions *dopt = fout->dopt;
19923 TableInfo *tbinfo = rinfo->ruletable;
19924 bool is_view;
19925 PQExpBuffer query;
19926 PQExpBuffer cmd;
19929 char *qtabname;
19930 PGresult *res;
19931 char *tag;
19932
19933 /* Do nothing if not dumping schema */
19934 if (!dopt->dumpSchema)
19935 return;
19936
19937 /*
19938 * If it is an ON SELECT rule that is created implicitly by CREATE VIEW,
19939 * we do not want to dump it as a separate object.
19940 */
19941 if (!rinfo->separate)
19942 return;
19943
19944 /*
19945 * If it's an ON SELECT rule, we want to print it as a view definition,
19946 * instead of a rule.
19947 */
19948 is_view = (rinfo->ev_type == '1' && rinfo->is_instead);
19949
19950 query = createPQExpBuffer();
19951 cmd = createPQExpBuffer();
19954
19955 qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
19956
19957 if (is_view)
19958 {
19959 PQExpBuffer result;
19960
19961 /*
19962 * We need OR REPLACE here because we'll be replacing a dummy view.
19963 * Otherwise this should look largely like the regular view dump code.
19964 */
19965 appendPQExpBuffer(cmd, "CREATE OR REPLACE VIEW %s",
19967 if (nonemptyReloptions(tbinfo->reloptions))
19968 {
19969 appendPQExpBufferStr(cmd, " WITH (");
19970 appendReloptionsArrayAH(cmd, tbinfo->reloptions, "", fout);
19971 appendPQExpBufferChar(cmd, ')');
19972 }
19973 result = createViewAsClause(fout, tbinfo);
19974 appendPQExpBuffer(cmd, " AS\n%s", result->data);
19975 destroyPQExpBuffer(result);
19976 if (tbinfo->checkoption != NULL)
19977 appendPQExpBuffer(cmd, "\n WITH %s CHECK OPTION",
19978 tbinfo->checkoption);
19979 appendPQExpBufferStr(cmd, ";\n");
19980 }
19981 else
19982 {
19983 /* In the rule case, just print pg_get_ruledef's result verbatim */
19984 appendPQExpBuffer(query,
19985 "SELECT pg_catalog.pg_get_ruledef('%u'::pg_catalog.oid)",
19986 rinfo->dobj.catId.oid);
19987
19988 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19989
19990 if (PQntuples(res) != 1)
19991 pg_fatal("query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned",
19992 rinfo->dobj.name, tbinfo->dobj.name);
19993
19994 printfPQExpBuffer(cmd, "%s\n", PQgetvalue(res, 0, 0));
19995
19996 PQclear(res);
19997 }
19998
19999 /*
20000 * Add the command to alter the rules replication firing semantics if it
20001 * differs from the default.
20002 */
20003 if (rinfo->ev_enabled != 'O')
20004 {
20005 appendPQExpBuffer(cmd, "ALTER TABLE %s ", fmtQualifiedDumpable(tbinfo));
20006 switch (rinfo->ev_enabled)
20007 {
20008 case 'A':
20009 appendPQExpBuffer(cmd, "ENABLE ALWAYS RULE %s;\n",
20010 fmtId(rinfo->dobj.name));
20011 break;
20012 case 'R':
20013 appendPQExpBuffer(cmd, "ENABLE REPLICA RULE %s;\n",
20014 fmtId(rinfo->dobj.name));
20015 break;
20016 case 'D':
20017 appendPQExpBuffer(cmd, "DISABLE RULE %s;\n",
20018 fmtId(rinfo->dobj.name));
20019 break;
20020 }
20021 }
20022
20023 if (is_view)
20024 {
20025 /*
20026 * We can't DROP a view's ON SELECT rule. Instead, use CREATE OR
20027 * REPLACE VIEW to replace the rule with something with minimal
20028 * dependencies.
20029 */
20030 PQExpBuffer result;
20031
20032 appendPQExpBuffer(delcmd, "CREATE OR REPLACE VIEW %s",
20035 appendPQExpBuffer(delcmd, " AS\n%s;\n", result->data);
20036 destroyPQExpBuffer(result);
20037 }
20038 else
20039 {
20040 appendPQExpBuffer(delcmd, "DROP RULE %s ",
20041 fmtId(rinfo->dobj.name));
20042 appendPQExpBuffer(delcmd, "ON %s;\n",
20044 }
20045
20046 appendPQExpBuffer(ruleprefix, "RULE %s ON",
20047 fmtId(rinfo->dobj.name));
20048
20049 tag = psprintf("%s %s", tbinfo->dobj.name, rinfo->dobj.name);
20050
20051 if (rinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
20052 ArchiveEntry(fout, rinfo->dobj.catId, rinfo->dobj.dumpId,
20053 ARCHIVE_OPTS(.tag = tag,
20054 .namespace = tbinfo->dobj.namespace->dobj.name,
20055 .owner = tbinfo->rolname,
20056 .description = "RULE",
20057 .section = SECTION_POST_DATA,
20058 .createStmt = cmd->data,
20059 .dropStmt = delcmd->data));
20060
20061 /* Dump rule comments */
20062 if (rinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
20064 tbinfo->dobj.namespace->dobj.name,
20065 tbinfo->rolname,
20066 rinfo->dobj.catId, 0, rinfo->dobj.dumpId);
20067
20068 free(tag);
20069 destroyPQExpBuffer(query);
20070 destroyPQExpBuffer(cmd);
20073 free(qtabname);
20074}
20075
20076/*
20077 * getExtensionMembership --- obtain extension membership data
20078 *
20079 * We need to identify objects that are extension members as soon as they're
20080 * loaded, so that we can correctly determine whether they need to be dumped.
20081 * Generally speaking, extension member objects will get marked as *not* to
20082 * be dumped, as they will be recreated by the single CREATE EXTENSION
20083 * command. However, in binary upgrade mode we still need to dump the members
20084 * individually.
20085 */
20086void
20088 int numExtensions)
20089{
20090 PQExpBuffer query;
20091 PGresult *res;
20092 int ntups,
20093 i;
20094 int i_classid,
20095 i_objid,
20096 i_refobjid;
20097 ExtensionInfo *ext;
20098
20099 /* Nothing to do if no extensions */
20100 if (numExtensions == 0)
20101 return;
20102
20103 query = createPQExpBuffer();
20104
20105 /* refclassid constraint is redundant but may speed the search */
20106 appendPQExpBufferStr(query, "SELECT "
20107 "classid, objid, refobjid "
20108 "FROM pg_depend "
20109 "WHERE refclassid = 'pg_extension'::regclass "
20110 "AND deptype = 'e' "
20111 "ORDER BY 3");
20112
20113 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
20114
20115 ntups = PQntuples(res);
20116
20117 i_classid = PQfnumber(res, "classid");
20118 i_objid = PQfnumber(res, "objid");
20119 i_refobjid = PQfnumber(res, "refobjid");
20120
20121 /*
20122 * Since we ordered the SELECT by referenced ID, we can expect that
20123 * multiple entries for the same extension will appear together; this
20124 * saves on searches.
20125 */
20126 ext = NULL;
20127
20128 for (i = 0; i < ntups; i++)
20129 {
20130 CatalogId objId;
20131 Oid extId;
20132
20133 objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
20134 objId.oid = atooid(PQgetvalue(res, i, i_objid));
20136
20137 if (ext == NULL ||
20138 ext->dobj.catId.oid != extId)
20140
20141 if (ext == NULL)
20142 {
20143 /* shouldn't happen */
20144 pg_log_warning("could not find referenced extension %u", extId);
20145 continue;
20146 }
20147
20148 recordExtensionMembership(objId, ext);
20149 }
20150
20151 PQclear(res);
20152
20153 destroyPQExpBuffer(query);
20154}
20155
20156/*
20157 * processExtensionTables --- deal with extension configuration tables
20158 *
20159 * There are two parts to this process:
20160 *
20161 * 1. Identify and create dump records for extension configuration tables.
20162 *
20163 * Extensions can mark tables as "configuration", which means that the user
20164 * is able and expected to modify those tables after the extension has been
20165 * loaded. For these tables, we dump out only the data- the structure is
20166 * expected to be handled at CREATE EXTENSION time, including any indexes or
20167 * foreign keys, which brings us to-
20168 *
20169 * 2. Record FK dependencies between configuration tables.
20170 *
20171 * Due to the FKs being created at CREATE EXTENSION time and therefore before
20172 * the data is loaded, we have to work out what the best order for reloading
20173 * the data is, to avoid FK violations when the tables are restored. This is
20174 * not perfect- we can't handle circular dependencies and if any exist they
20175 * will cause an invalid dump to be produced (though at least all of the data
20176 * is included for a user to manually restore). This is currently documented
20177 * but perhaps we can provide a better solution in the future.
20178 */
20179void
20181 int numExtensions)
20182{
20183 DumpOptions *dopt = fout->dopt;
20184 PQExpBuffer query;
20185 PGresult *res;
20186 int ntups,
20187 i;
20188 int i_conrelid,
20190
20191 /* Nothing to do if no extensions */
20192 if (numExtensions == 0)
20193 return;
20194
20195 /*
20196 * Identify extension configuration tables and create TableDataInfo
20197 * objects for them, ensuring their data will be dumped even though the
20198 * tables themselves won't be.
20199 *
20200 * Note that we create TableDataInfo objects even in schema-only mode, ie,
20201 * user data in a configuration table is treated like schema data. This
20202 * seems appropriate since system data in a config table would get
20203 * reloaded by CREATE EXTENSION. If the extension is not listed in the
20204 * list of extensions to be included, none of its data is dumped.
20205 */
20206 for (i = 0; i < numExtensions; i++)
20207 {
20209 char *extconfig = curext->extconfig;
20210 char *extcondition = curext->extcondition;
20211 char **extconfigarray = NULL;
20212 char **extconditionarray = NULL;
20213 int nconfigitems = 0;
20214 int nconditionitems = 0;
20215
20216 /*
20217 * Check if this extension is listed as to include in the dump. If
20218 * not, any table data associated with it is discarded.
20219 */
20222 curext->dobj.catId.oid))
20223 continue;
20224
20225 /*
20226 * Check if this extension is listed as to exclude in the dump. If
20227 * yes, any table data associated with it is discarded.
20228 */
20231 curext->dobj.catId.oid))
20232 continue;
20233
20234 if (strlen(extconfig) != 0 || strlen(extcondition) != 0)
20235 {
20236 int j;
20237
20238 if (!parsePGArray(extconfig, &extconfigarray, &nconfigitems))
20239 pg_fatal("could not parse %s array", "extconfig");
20240 if (!parsePGArray(extcondition, &extconditionarray, &nconditionitems))
20241 pg_fatal("could not parse %s array", "extcondition");
20243 pg_fatal("mismatched number of configurations and conditions for extension");
20244
20245 for (j = 0; j < nconfigitems; j++)
20246 {
20249 bool dumpobj =
20250 curext->dobj.dump & DUMP_COMPONENT_DEFINITION;
20251
20253 if (configtbl == NULL)
20254 continue;
20255
20256 /*
20257 * Tables of not-to-be-dumped extensions shouldn't be dumped
20258 * unless the table or its schema is explicitly included
20259 */
20260 if (!(curext->dobj.dump & DUMP_COMPONENT_DEFINITION))
20261 {
20262 /* check table explicitly requested */
20263 if (table_include_oids.head != NULL &&
20265 configtbloid))
20266 dumpobj = true;
20267
20268 /* check table's schema explicitly requested */
20269 if (configtbl->dobj.namespace->dobj.dump &
20271 dumpobj = true;
20272 }
20273
20274 /* check table excluded by an exclusion switch */
20275 if (table_exclude_oids.head != NULL &&
20277 configtbloid))
20278 dumpobj = false;
20279
20280 /* check schema excluded by an exclusion switch */
20282 configtbl->dobj.namespace->dobj.catId.oid))
20283 dumpobj = false;
20284
20285 if (dumpobj)
20286 {
20288 if (configtbl->dataObj != NULL)
20289 {
20290 if (strlen(extconditionarray[j]) > 0)
20291 configtbl->dataObj->filtercond = pg_strdup(extconditionarray[j]);
20292 }
20293 }
20294 }
20295 }
20296 if (extconfigarray)
20300 }
20301
20302 /*
20303 * Now that all the TableDataInfo objects have been created for all the
20304 * extensions, check their FK dependencies and register them to try and
20305 * dump the data out in an order that they can be restored in.
20306 *
20307 * Note that this is not a problem for user tables as their FKs are
20308 * recreated after the data has been loaded.
20309 */
20310
20311 query = createPQExpBuffer();
20312
20313 printfPQExpBuffer(query,
20314 "SELECT conrelid, confrelid "
20315 "FROM pg_constraint "
20316 "JOIN pg_depend ON (objid = confrelid) "
20317 "WHERE contype = 'f' "
20318 "AND refclassid = 'pg_extension'::regclass "
20319 "AND classid = 'pg_class'::regclass;");
20320
20321 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
20322 ntups = PQntuples(res);
20323
20324 i_conrelid = PQfnumber(res, "conrelid");
20325 i_confrelid = PQfnumber(res, "confrelid");
20326
20327 /* Now get the dependencies and register them */
20328 for (i = 0; i < ntups; i++)
20329 {
20330 Oid conrelid,
20331 confrelid;
20333 *contable;
20334
20335 conrelid = atooid(PQgetvalue(res, i, i_conrelid));
20336 confrelid = atooid(PQgetvalue(res, i, i_confrelid));
20337 contable = findTableByOid(conrelid);
20338 reftable = findTableByOid(confrelid);
20339
20340 if (reftable == NULL ||
20341 reftable->dataObj == NULL ||
20342 contable == NULL ||
20343 contable->dataObj == NULL)
20344 continue;
20345
20346 /*
20347 * Make referencing TABLE_DATA object depend on the referenced table's
20348 * TABLE_DATA object.
20349 */
20350 addObjectDependency(&contable->dataObj->dobj,
20351 reftable->dataObj->dobj.dumpId);
20352 }
20353 PQclear(res);
20354 destroyPQExpBuffer(query);
20355}
20356
20357/*
20358 * getDependencies --- obtain available dependency data
20359 */
20360static void
20362{
20363 PQExpBuffer query;
20364 PGresult *res;
20365 int ntups,
20366 i;
20367 int i_classid,
20368 i_objid,
20370 i_refobjid,
20371 i_deptype;
20372 DumpableObject *dobj,
20373 *refdobj;
20374
20375 pg_log_info("reading dependency data");
20376
20377 query = createPQExpBuffer();
20378
20379 /*
20380 * Messy query to collect the dependency data we need. Note that we
20381 * ignore the sub-object column, so that dependencies of or on a column
20382 * look the same as dependencies of or on a whole table.
20383 *
20384 * PIN dependencies aren't interesting, and EXTENSION dependencies were
20385 * already processed by getExtensionMembership.
20386 */
20387 appendPQExpBufferStr(query, "SELECT "
20388 "classid, objid, refclassid, refobjid, deptype "
20389 "FROM pg_depend "
20390 "WHERE deptype != 'p' AND deptype != 'e'\n");
20391
20392 /*
20393 * Since we don't treat pg_amop entries as separate DumpableObjects, we
20394 * have to translate their dependencies into dependencies of their parent
20395 * opfamily. Ignore internal dependencies though, as those will point to
20396 * their parent opclass, which we needn't consider here (and if we did,
20397 * it'd just result in circular dependencies). Also, "loose" opfamily
20398 * entries will have dependencies on their parent opfamily, which we
20399 * should drop since they'd likewise become useless self-dependencies.
20400 * (But be sure to keep deps on *other* opfamilies; see amopsortfamily.)
20401 */
20402 appendPQExpBufferStr(query, "UNION ALL\n"
20403 "SELECT 'pg_opfamily'::regclass AS classid, amopfamily AS objid, refclassid, refobjid, deptype "
20404 "FROM pg_depend d, pg_amop o "
20405 "WHERE deptype NOT IN ('p', 'e', 'i') AND "
20406 "classid = 'pg_amop'::regclass AND objid = o.oid "
20407 "AND NOT (refclassid = 'pg_opfamily'::regclass AND amopfamily = refobjid)\n");
20408
20409 /* Likewise for pg_amproc entries */
20410 appendPQExpBufferStr(query, "UNION ALL\n"
20411 "SELECT 'pg_opfamily'::regclass AS classid, amprocfamily AS objid, refclassid, refobjid, deptype "
20412 "FROM pg_depend d, pg_amproc p "
20413 "WHERE deptype NOT IN ('p', 'e', 'i') AND "
20414 "classid = 'pg_amproc'::regclass AND objid = p.oid "
20415 "AND NOT (refclassid = 'pg_opfamily'::regclass AND amprocfamily = refobjid)\n");
20416
20417 /* Sort the output for efficiency below */
20418 appendPQExpBufferStr(query, "ORDER BY 1,2");
20419
20420 res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
20421
20422 ntups = PQntuples(res);
20423
20424 i_classid = PQfnumber(res, "classid");
20425 i_objid = PQfnumber(res, "objid");
20426 i_refclassid = PQfnumber(res, "refclassid");
20427 i_refobjid = PQfnumber(res, "refobjid");
20428 i_deptype = PQfnumber(res, "deptype");
20429
20430 /*
20431 * Since we ordered the SELECT by referencing ID, we can expect that
20432 * multiple entries for the same object will appear together; this saves
20433 * on searches.
20434 */
20435 dobj = NULL;
20436
20437 for (i = 0; i < ntups; i++)
20438 {
20439 CatalogId objId;
20441 char deptype;
20442
20443 objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
20444 objId.oid = atooid(PQgetvalue(res, i, i_objid));
20445 refobjId.tableoid = atooid(PQgetvalue(res, i, i_refclassid));
20446 refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
20447 deptype = *(PQgetvalue(res, i, i_deptype));
20448
20449 if (dobj == NULL ||
20450 dobj->catId.tableoid != objId.tableoid ||
20451 dobj->catId.oid != objId.oid)
20452 dobj = findObjectByCatalogId(objId);
20453
20454 /*
20455 * Failure to find objects mentioned in pg_depend is not unexpected,
20456 * since for example we don't collect info about TOAST tables.
20457 */
20458 if (dobj == NULL)
20459 {
20460#ifdef NOT_USED
20461 pg_log_warning("no referencing object %u %u",
20462 objId.tableoid, objId.oid);
20463#endif
20464 continue;
20465 }
20466
20468
20469 if (refdobj == NULL)
20470 {
20471#ifdef NOT_USED
20472 pg_log_warning("no referenced object %u %u",
20473 refobjId.tableoid, refobjId.oid);
20474#endif
20475 continue;
20476 }
20477
20478 /*
20479 * For 'x' dependencies, mark the object for later; we still add the
20480 * normal dependency, for possible ordering purposes. Currently
20481 * pg_dump_sort.c knows to put extensions ahead of all object types
20482 * that could possibly depend on them, but this is safer.
20483 */
20484 if (deptype == 'x')
20485 dobj->depends_on_ext = true;
20486
20487 /*
20488 * Ordinarily, table rowtypes have implicit dependencies on their
20489 * tables. However, for a composite type the implicit dependency goes
20490 * the other way in pg_depend; which is the right thing for DROP but
20491 * it doesn't produce the dependency ordering we need. So in that one
20492 * case, we reverse the direction of the dependency.
20493 */
20494 if (deptype == 'i' &&
20495 dobj->objType == DO_TABLE &&
20496 refdobj->objType == DO_TYPE)
20498 else
20499 /* normal case */
20501 }
20502
20503 PQclear(res);
20504
20505 destroyPQExpBuffer(query);
20506}
20507
20508
20509/*
20510 * createBoundaryObjects - create dummy DumpableObjects to represent
20511 * dump section boundaries.
20512 */
20513static DumpableObject *
20515{
20517
20519
20520 dobjs[0].objType = DO_PRE_DATA_BOUNDARY;
20521 dobjs[0].catId = nilCatalogId;
20522 AssignDumpId(dobjs + 0);
20523 dobjs[0].name = pg_strdup("PRE-DATA BOUNDARY");
20524
20525 dobjs[1].objType = DO_POST_DATA_BOUNDARY;
20526 dobjs[1].catId = nilCatalogId;
20527 AssignDumpId(dobjs + 1);
20528 dobjs[1].name = pg_strdup("POST-DATA BOUNDARY");
20529
20530 return dobjs;
20531}
20532
20533/*
20534 * addBoundaryDependencies - add dependencies as needed to enforce the dump
20535 * section boundaries.
20536 */
20537static void
20540{
20543 int i;
20544
20545 for (i = 0; i < numObjs; i++)
20546 {
20547 DumpableObject *dobj = dobjs[i];
20548
20549 /*
20550 * The classification of object types here must match the SECTION_xxx
20551 * values assigned during subsequent ArchiveEntry calls!
20552 */
20553 switch (dobj->objType)
20554 {
20555 case DO_NAMESPACE:
20556 case DO_EXTENSION:
20557 case DO_TYPE:
20558 case DO_SHELL_TYPE:
20559 case DO_FUNC:
20560 case DO_AGG:
20561 case DO_OPERATOR:
20562 case DO_ACCESS_METHOD:
20563 case DO_OPCLASS:
20564 case DO_OPFAMILY:
20565 case DO_COLLATION:
20566 case DO_CONVERSION:
20567 case DO_TABLE:
20568 case DO_TABLE_ATTACH:
20569 case DO_ATTRDEF:
20570 case DO_PROCLANG:
20571 case DO_CAST:
20572 case DO_DUMMY_TYPE:
20573 case DO_TSPARSER:
20574 case DO_TSDICT:
20575 case DO_TSTEMPLATE:
20576 case DO_TSCONFIG:
20577 case DO_FDW:
20578 case DO_FOREIGN_SERVER:
20579 case DO_TRANSFORM:
20580 /* Pre-data objects: must come before the pre-data boundary */
20582 break;
20583 case DO_TABLE_DATA:
20584 case DO_SEQUENCE_SET:
20585 case DO_LARGE_OBJECT:
20587 /* Data objects: must come between the boundaries */
20590 break;
20591 case DO_INDEX:
20592 case DO_INDEX_ATTACH:
20593 case DO_STATSEXT:
20594 case DO_REFRESH_MATVIEW:
20595 case DO_TRIGGER:
20596 case DO_EVENT_TRIGGER:
20597 case DO_DEFAULT_ACL:
20598 case DO_POLICY:
20599 case DO_PUBLICATION:
20600 case DO_PUBLICATION_REL:
20602 case DO_SUBSCRIPTION:
20604 /* Post-data objects: must come after the post-data boundary */
20606 break;
20607 case DO_RULE:
20608 /* Rules are post-data, but only if dumped separately */
20609 if (((RuleInfo *) dobj)->separate)
20610 addObjectDependency(dobj, postDataBound->dumpId);
20611 break;
20612 case DO_CONSTRAINT:
20613 case DO_FK_CONSTRAINT:
20614 /* Constraints are post-data, but only if dumped separately */
20615 if (((ConstraintInfo *) dobj)->separate)
20616 addObjectDependency(dobj, postDataBound->dumpId);
20617 break;
20619 /* nothing to do */
20620 break;
20622 /* must come after the pre-data boundary */
20623 addObjectDependency(dobj, preDataBound->dumpId);
20624 break;
20625 case DO_REL_STATS:
20626 /* stats section varies by parent object type, DATA or POST */
20627 if (((RelStatsInfo *) dobj)->section == SECTION_DATA)
20628 {
20629 addObjectDependency(dobj, preDataBound->dumpId);
20630 addObjectDependency(postDataBound, dobj->dumpId);
20631 }
20632 else
20633 addObjectDependency(dobj, postDataBound->dumpId);
20634 break;
20635 }
20636 }
20637}
20638
20639
20640/*
20641 * BuildArchiveDependencies - create dependency data for archive TOC entries
20642 *
20643 * The raw dependency data obtained by getDependencies() is not terribly
20644 * useful in an archive dump, because in many cases there are dependency
20645 * chains linking through objects that don't appear explicitly in the dump.
20646 * For example, a view will depend on its _RETURN rule while the _RETURN rule
20647 * will depend on other objects --- but the rule will not appear as a separate
20648 * object in the dump. We need to adjust the view's dependencies to include
20649 * whatever the rule depends on that is included in the dump.
20650 *
20651 * Just to make things more complicated, there are also "special" dependencies
20652 * such as the dependency of a TABLE DATA item on its TABLE, which we must
20653 * not rearrange because pg_restore knows that TABLE DATA only depends on
20654 * its table. In these cases we must leave the dependencies strictly as-is
20655 * even if they refer to not-to-be-dumped objects.
20656 *
20657 * To handle this, the convention is that "special" dependencies are created
20658 * during ArchiveEntry calls, and an archive TOC item that has any such
20659 * entries will not be touched here. Otherwise, we recursively search the
20660 * DumpableObject data structures to build the correct dependencies for each
20661 * archive TOC item.
20662 */
20663static void
20665{
20667 TocEntry *te;
20668
20669 /* Scan all TOC entries in the archive */
20670 for (te = AH->toc->next; te != AH->toc; te = te->next)
20671 {
20672 DumpableObject *dobj;
20673 DumpId *dependencies;
20674 int nDeps;
20675 int allocDeps;
20676
20677 /* No need to process entries that will not be dumped */
20678 if (te->reqs == 0)
20679 continue;
20680 /* Ignore entries that already have "special" dependencies */
20681 if (te->nDeps > 0)
20682 continue;
20683 /* Otherwise, look up the item's original DumpableObject, if any */
20684 dobj = findObjectByDumpId(te->dumpId);
20685 if (dobj == NULL)
20686 continue;
20687 /* No work if it has no dependencies */
20688 if (dobj->nDeps <= 0)
20689 continue;
20690 /* Set up work array */
20691 allocDeps = 64;
20692 dependencies = pg_malloc_array(DumpId, allocDeps);
20693 nDeps = 0;
20694 /* Recursively find all dumpable dependencies */
20695 findDumpableDependencies(AH, dobj,
20696 &dependencies, &nDeps, &allocDeps);
20697 /* And save 'em ... */
20698 if (nDeps > 0)
20699 {
20700 dependencies = pg_realloc_array(dependencies, DumpId, nDeps);
20701 te->dependencies = dependencies;
20702 te->nDeps = nDeps;
20703 }
20704 else
20705 free(dependencies);
20706 }
20707}
20708
20709/* Recursive search subroutine for BuildArchiveDependencies */
20710static void
20712 DumpId **dependencies, int *nDeps, int *allocDeps)
20713{
20714 int i;
20715
20716 /*
20717 * Ignore section boundary objects: if we search through them, we'll
20718 * report lots of bogus dependencies.
20719 */
20720 if (dobj->objType == DO_PRE_DATA_BOUNDARY ||
20722 return;
20723
20724 for (i = 0; i < dobj->nDeps; i++)
20725 {
20726 DumpId depid = dobj->dependencies[i];
20727
20728 if (TocIDRequired(AH, depid) != 0)
20729 {
20730 /* Object will be dumped, so just reference it as a dependency */
20731 if (*nDeps >= *allocDeps)
20732 {
20733 *allocDeps *= 2;
20734 *dependencies = pg_realloc_array(*dependencies, DumpId, *allocDeps);
20735 }
20736 (*dependencies)[*nDeps] = depid;
20737 (*nDeps)++;
20738 }
20739 else
20740 {
20741 /*
20742 * Object will not be dumped, so recursively consider its deps. We
20743 * rely on the assumption that sortDumpableObjects already broke
20744 * any dependency loops, else we might recurse infinitely.
20745 */
20747
20748 if (otherdobj)
20750 dependencies, nDeps, allocDeps);
20751 }
20752 }
20753}
20754
20755
20756/*
20757 * getFormattedTypeName - retrieve a nicely-formatted type name for the
20758 * given type OID.
20759 *
20760 * This does not guarantee to schema-qualify the output, so it should not
20761 * be used to create the target object name for CREATE or ALTER commands.
20762 *
20763 * Note that the result is cached and must not be freed by the caller.
20764 */
20765static const char *
20767{
20769 char *result;
20770 PQExpBuffer query;
20771 PGresult *res;
20772
20773 if (oid == 0)
20774 {
20775 if ((opts & zeroAsStar) != 0)
20776 return "*";
20777 else if ((opts & zeroAsNone) != 0)
20778 return "NONE";
20779 }
20780
20781 /* see if we have the result cached in the type's TypeInfo record */
20782 typeInfo = findTypeByOid(oid);
20783 if (typeInfo && typeInfo->ftypname)
20784 return typeInfo->ftypname;
20785
20786 query = createPQExpBuffer();
20787 appendPQExpBuffer(query, "SELECT pg_catalog.format_type('%u'::pg_catalog.oid, NULL)",
20788 oid);
20789
20790 res = ExecuteSqlQueryForSingleRow(fout, query->data);
20791
20792 /* result of format_type is already quoted */
20793 result = pg_strdup(PQgetvalue(res, 0, 0));
20794
20795 PQclear(res);
20796 destroyPQExpBuffer(query);
20797
20798 /*
20799 * Cache the result for re-use in later requests, if possible. If we
20800 * don't have a TypeInfo for the type, the string will be leaked once the
20801 * caller is done with it ... but that case really should not happen, so
20802 * leaking if it does seems acceptable.
20803 */
20804 if (typeInfo)
20805 typeInfo->ftypname = result;
20806
20807 return result;
20808}
20809
20810/*
20811 * Return a column list clause for the given relation.
20812 *
20813 * Special case: if there are no undropped columns in the relation, return
20814 * "", not an invalid "()" column list.
20815 */
20816static const char *
20818{
20819 int numatts = ti->numatts;
20820 char **attnames = ti->attnames;
20821 bool *attisdropped = ti->attisdropped;
20822 char *attgenerated = ti->attgenerated;
20823 bool needComma;
20824 int i;
20825
20826 appendPQExpBufferChar(buffer, '(');
20827 needComma = false;
20828 for (i = 0; i < numatts; i++)
20829 {
20830 if (attisdropped[i])
20831 continue;
20832 if (attgenerated[i])
20833 continue;
20834 if (needComma)
20835 appendPQExpBufferStr(buffer, ", ");
20836 appendPQExpBufferStr(buffer, fmtId(attnames[i]));
20837 needComma = true;
20838 }
20839
20840 if (!needComma)
20841 return ""; /* no undropped columns */
20842
20843 appendPQExpBufferChar(buffer, ')');
20844 return buffer->data;
20845}
20846
20847/*
20848 * Check if a reloptions array is nonempty.
20849 */
20850static bool
20851nonemptyReloptions(const char *reloptions)
20852{
20853 /* Don't want to print it if it's just "{}" */
20854 return (reloptions != NULL && strlen(reloptions) > 2);
20855}
20856
20857/*
20858 * Format a reloptions array and append it to the given buffer.
20859 *
20860 * "prefix" is prepended to the option names; typically it's "" or "toast.".
20861 */
20862static void
20863appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
20864 const char *prefix, Archive *fout)
20865{
20866 bool res;
20867
20868 res = appendReloptionsArray(buffer, reloptions, prefix, fout->encoding,
20869 fout->std_strings);
20870 if (!res)
20871 pg_log_warning("could not parse %s array", "reloptions");
20872}
20873
20874/*
20875 * read_dump_filters - retrieve object identifier patterns from file
20876 *
20877 * Parse the specified filter file for include and exclude patterns, and add
20878 * them to the relevant lists. If the filename is "-" then filters will be
20879 * read from STDIN rather than a file.
20880 */
20881static void
20883{
20885 char *objname;
20887 FilterObjectType objtype;
20888
20890
20891 while (filter_read_item(&fstate, &objname, &comtype, &objtype))
20892 {
20894 {
20895 switch (objtype)
20896 {
20898 break;
20905 pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed"),
20906 "include",
20907 filter_object_type_name(objtype));
20908 exit_nicely(1);
20909 break; /* unreachable */
20910
20913 break;
20916 break;
20919 dopt->include_everything = false;
20920 break;
20923 dopt->include_everything = false;
20924 break;
20927 objname);
20928 dopt->include_everything = false;
20929 break;
20930 }
20931 }
20933 {
20934 switch (objtype)
20935 {
20937 break;
20943 pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed"),
20944 "exclude",
20945 filter_object_type_name(objtype));
20946 exit_nicely(1);
20947 break;
20948
20951 break;
20954 objname);
20955 break;
20958 objname);
20959 break;
20962 break;
20965 break;
20968 objname);
20969 break;
20970 }
20971 }
20972 else
20973 {
20975 Assert(objtype == FILTER_OBJECT_TYPE_NONE);
20976 }
20977
20978 if (objname)
20979 free(objname);
20980 }
20981
20983}
Acl * acldefault(ObjectType objtype, Oid ownerId)
Definition acl.c:816
#define InvalidAttrNumber
Definition attnum.h:23
int lo_read(int fd, char *buf, int len)
Definition be-fsstubs.c:154
static void help(void)
Definition pg_config.c:71
void recordAdditionalCatalogID(CatalogId catId, DumpableObject *dobj)
Definition common.c:719
void recordExtensionMembership(CatalogId catId, ExtensionInfo *ext)
Definition common.c:1063
FuncInfo * findFuncByOid(Oid oid)
Definition common.c:918
TableInfo * findTableByOid(Oid oid)
Definition common.c:863
ExtensionInfo * findExtensionByOid(Oid oid)
Definition common.c:1008
CollInfo * findCollationByOid(Oid oid)
Definition common.c:972
SubscriptionInfo * findSubscriptionByOid(Oid oid)
Definition common.c:1044
OprInfo * findOprByOid(Oid oid)
Definition common.c:936
NamespaceInfo * findNamespaceByOid(Oid oid)
Definition common.c:990
DumpId createDumpId(void)
Definition common.c:745
void addObjectDependency(DumpableObject *dobj, DumpId refId)
Definition common.c:818
DumpableObject * findObjectByDumpId(DumpId dumpId)
Definition common.c:765
void parseOidArray(const char *str, Oid *array, int arraysize)
Definition common.c:1111
ExtensionInfo * findOwningExtension(CatalogId catalogId)
Definition common.c:1087
TableInfo * getSchemaData(Archive *fout, int *numTablesPtr)
Definition common.c:98
TypeInfo * findTypeByOid(Oid oid)
Definition common.c:899
DumpableObject * findObjectByCatalogId(CatalogId catalogId)
Definition common.c:778
void AssignDumpId(DumpableObject *dobj)
Definition common.c:657
void getDumpableObjects(DumpableObject ***objs, int *numObjs)
Definition common.c:797
PublicationInfo * findPublicationByOid(Oid oid)
Definition common.c:1026
void on_exit_close_archive(Archive *AHX)
Definition parallel.c:330
void init_parallel_dump_utils(void)
Definition parallel.c:238
#define PG_MAX_JOBS
Definition parallel.h:48
bool is_superuser(void)
Definition common.c:2480
uint32 BlockNumber
Definition block.h:31
static void cleanup(void)
Definition bootstrap.c:878
static const gbtree_vinfo tinfo
Definition btree_bit.c:109
#define PG_INT32_MAX
Definition c.h:636
#define ngettext(s, p, n)
Definition c.h:1233
#define INT64_FORMAT
Definition c.h:597
#define Assert(condition)
Definition c.h:906
#define PG_TEXTDOMAIN(domain)
Definition c.h:1266
int64_t int64
Definition c.h:576
#define PG_INT16_MIN
Definition c.h:632
#define CppAsString2(x)
Definition c.h:461
int32_t int32
Definition c.h:575
#define PG_INT64_MAX
Definition c.h:639
#define PG_INT64_MIN
Definition c.h:638
uint32_t uint32
Definition c.h:579
#define lengthof(array)
Definition c.h:836
#define PG_INT32_MIN
Definition c.h:635
#define StaticAssertDecl(condition, errmessage)
Definition c.h:971
#define PG_INT16_MAX
Definition c.h:633
#define OidIsValid(objectId)
Definition c.h:821
int nspid
void set_pglocale_pgservice(const char *argv0, const char *app)
Definition exec.c:430
int main(void)
char * supports_compression(const pg_compress_specification compression_spec)
Definition compress_io.c:87
char * validate_compress_specification(pg_compress_specification *spec)
bool parse_compress_algorithm(char *name, pg_compress_algorithm *algorithm)
Definition compression.c:49
void parse_compress_specification(pg_compress_algorithm algorithm, char *specification, pg_compress_specification *result)
#define PG_COMPRESSION_OPTION_WORKERS
Definition compression.h:29
pg_compress_algorithm
Definition compression.h:22
@ PG_COMPRESSION_NONE
Definition compression.h:23
void parse_compress_options(const char *option, char **algorithm, char **detail)
#define ALWAYS_SECURE_SEARCH_PATH_SQL
Definition connect.h:25
char * generate_restrict_key(void)
Definition dumputils.c:973
bool buildACLCommands(const char *name, const char *subname, const char *nspname, const char *type, const char *acls, const char *baseacls, const char *owner, const char *prefix, int remoteVersion, PQExpBuffer sql)
Definition dumputils.c:104
bool valid_restrict_key(const char *restrict_key)
Definition dumputils.c:997
void buildShSecLabelQuery(const char *catalog_name, Oid objectId, PQExpBuffer sql)
Definition dumputils.c:678
void makeAlterConfigCommand(PGconn *conn, const char *configitem, const char *type, const char *name, const char *type2, const char *name2, PQExpBuffer buf)
Definition dumputils.c:865
bool buildDefaultACLCommands(const char *type, const char *nspname, const char *acls, const char *acldefault, const char *owner, int remoteVersion, PQExpBuffer sql)
Definition dumputils.c:366
char * sanitize_line(const char *str, bool want_hyphen)
Definition dumputils.c:52
bool variable_is_guc_list_quote(const char *name)
Definition dumputils.c:730
void quoteAclUserName(PQExpBuffer output, const char *input)
Definition dumputils.c:585
void emitShSecLabels(PGconn *conn, PGresult *res, PQExpBuffer buffer, const char *objtype, const char *objname)
Definition dumputils.c:696
Datum arg
Definition elog.c:1322
#define _(x)
Definition elog.c:95
char * PQdb(const PGconn *conn)
const char * PQparameterStatus(const PGconn *conn, const char *paramName)
int PQclientEncoding(const PGconn *conn)
char * PQerrorMessage(const PGconn *conn)
int PQsetClientEncoding(PGconn *conn, const char *encoding)
void PQfreemem(void *ptr)
Definition fe-exec.c:4049
Oid PQftype(const PGresult *res, int field_num)
Definition fe-exec.c:3736
int PQfnumber(const PGresult *res, const char *field_name)
Definition fe-exec.c:3606
int PQgetCopyData(PGconn *conn, char **buffer, int async)
Definition fe-exec.c:2833
int lo_close(PGconn *conn, int fd)
Definition fe-lobj.c:96
int lo_open(PGconn *conn, Oid lobjId, int mode)
Definition fe-lobj.c:57
void * pg_malloc(size_t size)
Definition fe_memutils.c:47
char * pg_strdup(const char *in)
Definition fe_memutils.c:85
void pg_free(void *ptr)
#define pg_realloc_array(pointer, type, count)
Definition fe_memutils.h:63
#define pg_malloc_array(type, count)
Definition fe_memutils.h:56
#define pg_malloc0_object(type)
Definition fe_memutils.h:51
#define pg_malloc_object(type)
Definition fe_memutils.h:50
#define pg_malloc0_array(type, count)
Definition fe_memutils.h:57
DataDirSyncMethod
Definition file_utils.h:28
@ DATA_DIR_SYNC_METHOD_FSYNC
Definition file_utils.h:29
void filter_init(FilterStateData *fstate, const char *filename, exit_function f_exit)
Definition filter.c:36
void filter_free(FilterStateData *fstate)
Definition filter.c:60
const char * filter_object_type_name(FilterObjectType fot)
Definition filter.c:82
bool filter_read_item(FilterStateData *fstate, char **objname, FilterCommandType *comtype, FilterObjectType *objtype)
Definition filter.c:392
void pg_log_filter_error(FilterStateData *fstate, const char *fmt,...)
Definition filter.c:154
FilterObjectType
Definition filter.h:48
@ FILTER_OBJECT_TYPE_TABLE_DATA_AND_CHILDREN
Definition filter.h:51
@ FILTER_OBJECT_TYPE_SCHEMA
Definition filter.h:57
@ FILTER_OBJECT_TYPE_INDEX
Definition filter.h:56
@ FILTER_OBJECT_TYPE_TRIGGER
Definition filter.h:60
@ FILTER_OBJECT_TYPE_FOREIGN_DATA
Definition filter.h:54
@ FILTER_OBJECT_TYPE_DATABASE
Definition filter.h:52
@ FILTER_OBJECT_TYPE_FUNCTION
Definition filter.h:55
@ FILTER_OBJECT_TYPE_TABLE_DATA
Definition filter.h:50
@ FILTER_OBJECT_TYPE_NONE
Definition filter.h:49
@ FILTER_OBJECT_TYPE_TABLE_AND_CHILDREN
Definition filter.h:59
@ FILTER_OBJECT_TYPE_EXTENSION
Definition filter.h:53
@ FILTER_OBJECT_TYPE_TABLE
Definition filter.h:58
FilterCommandType
Definition filter.h:38
@ FILTER_COMMAND_TYPE_NONE
Definition filter.h:39
@ FILTER_COMMAND_TYPE_EXCLUDE
Definition filter.h:41
@ FILTER_COMMAND_TYPE_INCLUDE
Definition filter.h:40
int getopt_long(int argc, char *const argv[], const char *optstring, const struct option *longopts, int *longindex)
Definition getopt_long.c:60
#define no_argument
Definition getopt_long.h:25
#define required_argument
Definition getopt_long.h:26
#define comment
#define storage
long val
Definition informix.c:689
static struct @174 value
static char * encoding
Definition initdb.c:139
static DataDirSyncMethod sync_method
Definition initdb.c:170
static int pg_cmp_u32(uint32 a, uint32 b)
Definition int.h:719
int j
Definition isn.c:78
int i
Definition isn.c:77
#define PQgetvalue
#define PQgetResult
#define PQgetlength
#define PQclear
#define PQnfields
#define PQresultStatus
#define PQgetisnull
#define PQfname
#define PQntuples
@ PGRES_COMMAND_OK
Definition libpq-fe.h:131
@ PGRES_COPY_OUT
Definition libpq-fe.h:137
@ PGRES_TUPLES_OK
Definition libpq-fe.h:134
#define INV_READ
Definition libpq-fs.h:22
void pg_logging_increase_verbosity(void)
Definition logging.c:185
void pg_logging_init(const char *argv0)
Definition logging.c:83
void pg_logging_set_level(enum pg_log_level new_level)
Definition logging.c:176
#define pg_log_error(...)
Definition logging.h:106
#define pg_log_error_hint(...)
Definition logging.h:112
#define pg_log_info(...)
Definition logging.h:124
@ PG_LOG_WARNING
Definition logging.h:38
#define pg_log_error_detail(...)
Definition logging.h:109
const char * progname
Definition main.c:44
char * pstrdup(const char *in)
Definition mcxt.c:1781
bool option_parse_int(const char *optarg, const char *optname, int min_range, int max_range, int *result)
bool parse_sync_method(const char *optarg, DataDirSyncMethod *sync_method)
#define check_mut_excl_opts(set, opt,...)
Oid oprid(Operator op)
Definition parse_oper.c:240
static AmcheckOptions opts
Definition pg_amcheck.c:112
NameData attname
char attalign
int16 attlen
NameData rolname
Definition pg_authid.h:36
@ SECTION_NONE
Definition pg_backup.h:57
@ SECTION_POST_DATA
Definition pg_backup.h:60
@ SECTION_PRE_DATA
Definition pg_backup.h:58
@ SECTION_DATA
Definition pg_backup.h:59
int DumpId
Definition pg_backup.h:285
int EndLO(Archive *AHX, Oid oid)
void ProcessArchiveRestoreOptions(Archive *AHX)
RestoreOptions * NewRestoreOptions(void)
#define InvalidDumpId
Definition pg_backup.h:287
#define appendStringLiteralAH(buf, str, AH)
Definition pg_backup.h:344
int StartLO(Archive *AHX, Oid oid)
enum _archiveFormat ArchiveFormat
void RestoreArchive(Archive *AHX, bool append_data)
void ConnectDatabaseAhx(Archive *AHX, const ConnParams *cparams, bool isReconnect)
void CloseArchive(Archive *AHX)
Archive * CreateArchive(const char *FileSpec, const ArchiveFormat fmt, const pg_compress_specification compression_spec, bool dosync, ArchiveMode mode, SetupWorkerPtrType setupDumpWorker, DataDirSyncMethod sync_method)
@ archModeWrite
Definition pg_backup.h:51
@ archModeAppend
Definition pg_backup.h:50
@ PREPQUERY_DUMPFUNC
Definition pg_backup.h:72
@ PREPQUERY_DUMPTABLEATTACH
Definition pg_backup.h:75
@ PREPQUERY_DUMPBASETYPE
Definition pg_backup.h:67
@ PREPQUERY_DUMPRANGETYPE
Definition pg_backup.h:74
@ PREPQUERY_DUMPOPR
Definition pg_backup.h:73
@ PREPQUERY_DUMPEXTSTATSOBJSTATS
Definition pg_backup.h:71
@ PREPQUERY_GETATTRIBUTESTATS
Definition pg_backup.h:76
@ PREPQUERY_DUMPDOMAIN
Definition pg_backup.h:69
@ PREPQUERY_DUMPCOMPOSITETYPE
Definition pg_backup.h:68
@ PREPQUERY_DUMPAGG
Definition pg_backup.h:66
@ PREPQUERY_GETCOLUMNACLS
Definition pg_backup.h:77
@ PREPQUERY_GETDOMAINCONSTRAINTS
Definition pg_backup.h:78
@ PREPQUERY_DUMPENUMTYPE
Definition pg_backup.h:70
int archprintf(Archive *AH, const char *fmt,...) pg_attribute_printf(2
void SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ropt)
#define NUM_PREP_QUERIES
Definition pg_backup.h:81
void archputs(const char *s, Archive *AH)
@ archUnknown
Definition pg_backup.h:41
@ archTar
Definition pg_backup.h:43
@ archCustom
Definition pg_backup.h:42
@ archDirectory
Definition pg_backup.h:45
@ archNull
Definition pg_backup.h:44
void InitDumpOptions(DumpOptions *opts)
void WriteData(Archive *AHX, const void *data, size_t dLen)
int TocIDRequired(ArchiveHandle *AH, DumpId id)
TocEntry * ArchiveEntry(Archive *AHX, CatalogId catalogId, DumpId dumpId, ArchiveOpts *opts)
#define ARCHIVE_OPTS(...)
#define LOBBUFSIZE
#define REQ_STATS
int(* DataDumperPtr)(Archive *AH, const void *userArg)
void ExecuteSqlStatement(Archive *AHX, const char *query)
PGresult * ExecuteSqlQuery(Archive *AHX, const char *query, ExecStatusType status)
PGresult * ExecuteSqlQueryForSingleRow(Archive *fout, const char *query)
void exit_nicely(int code)
void set_dump_section(const char *arg, int *dumpSections)
#define pg_fatal(...)
static char format
static char * label
static PgChecksumMode mode
#define FUNC_MAX_ARGS
const void size_t len
char datlocprovider
Definition pg_database.h:46
NameData datname
Definition pg_database.h:37
bool datistemplate
Definition pg_database.h:49
int32 datconnlimit
Definition pg_database.h:61
static void expand_schema_name_patterns(Archive *fout, SimpleStringList *patterns, SimpleOidList *oids, bool strict_names)
Definition pg_dump.c:1647
static const CatalogId nilCatalogId
Definition pg_dump.c:191
static void dumpEncoding(Archive *AH)
Definition pg_dump.c:3822
void getConstraints(Archive *fout, TableInfo tblinfo[], int numTables)
Definition pg_dump.c:8384
static DumpId dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId, const char *type, const char *name, const char *subname, const char *nspname, const char *tag, const char *owner, const DumpableAcl *dacl)
Definition pg_dump.c:16513
static SimpleStringList schema_include_patterns
Definition pg_dump.c:167
static void dumpAttrDef(Archive *fout, const AttrDefInfo *adinfo)
Definition pg_dump.c:18283
ExtensionInfo * getExtensions(Archive *fout, int *numExtensions)
Definition pg_dump.c:6213
static void selectDumpableProcLang(ProcLangInfo *plang, Archive *fout)
Definition pg_dump.c:2199
static void collectBinaryUpgradeClassOids(Archive *fout)
Definition pg_dump.c:5913
static PQExpBuffer createDummyViewAsClause(Archive *fout, const TableInfo *tbinfo)
Definition pg_dump.c:17157
static void dumpUserMappings(Archive *fout, const char *servername, const char *namespace, const char *owner, CatalogId catalogId, DumpId dumpId)
Definition pg_dump.c:16327
static void dumpPublicationNamespace(Archive *fout, const PublicationSchemaInfo *pubsinfo)
Definition pg_dump.c:5012
static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs, DumpableObject *boundaryObjs)
Definition pg_dump.c:20538
void getPublicationNamespaces(Archive *fout)
Definition pg_dump.c:4801
static void dumpSearchPath(Archive *AH)
Definition pg_dump.c:3871
static int ncomments
Definition pg_dump.c:203
static void selectDumpableTable(TableInfo *tbinfo, Archive *fout)
Definition pg_dump.c:2068
static DumpableObject * createBoundaryObjects(void)
Definition pg_dump.c:20514
static char * convertTSFunction(Archive *fout, Oid funcOid)
Definition pg_dump.c:14500
static void dumpDatabase(Archive *fout)
Definition pg_dump.c:3269
static SimpleStringList table_include_patterns
Definition pg_dump.c:172
static void append_depends_on_extension(Archive *fout, PQExpBuffer create, const DumpableObject *dobj, const char *catalog, const char *keyword, const char *objname)
Definition pg_dump.c:5726
static Oid get_next_possible_free_pg_type_oid(Archive *fout, PQExpBuffer upgrade_query)
Definition pg_dump.c:5771
static void dumpNamespace(Archive *fout, const NamespaceInfo *nspinfo)
Definition pg_dump.c:11962
static bool forcePartitionRootLoad(const TableInfo *tbinfo)
Definition pg_dump.c:2829
static void dumpCast(Archive *fout, const CastInfo *cast)
Definition pg_dump.c:13976
static SimpleOidList schema_exclude_oids
Definition pg_dump.c:170
static bool have_extra_float_digits
Definition pg_dump.c:194
static void dumpIndex(Archive *fout, const IndxInfo *indxinfo)
Definition pg_dump.c:18373
void getPartitioningInfo(Archive *fout)
Definition pg_dump.c:7873
static int nbinaryUpgradeClassOids
Definition pg_dump.c:211
static void dumpBaseType(Archive *fout, const TypeInfo *tyinfo)
Definition pg_dump.c:12555
OidOptions
Definition pg_dump.c:145
@ zeroIsError
Definition pg_dump.c:146
@ zeroAsStar
Definition pg_dump.c:147
@ zeroAsNone
Definition pg_dump.c:148
static char * dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
Definition pg_dump.c:11206
static SimpleOidList extension_include_oids
Definition pg_dump.c:186
static void dumpTSDictionary(Archive *fout, const TSDictInfo *dictinfo)
Definition pg_dump.c:15899
static void dumpAgg(Archive *fout, const AggInfo *agginfo)
Definition pg_dump.c:15475
static int extra_float_digits
Definition pg_dump.c:195
static int SequenceItemCmp(const void *p1, const void *p2)
Definition pg_dump.c:19284
static void dumpRelationStats(Archive *fout, const RelStatsInfo *rsinfo)
Definition pg_dump.c:11479
static void dumpTableComment(Archive *fout, const TableInfo *tbinfo, const char *reltypename)
Definition pg_dump.c:11505
static SimpleStringList extension_include_patterns
Definition pg_dump.c:185
static void selectDumpableNamespace(NamespaceInfo *nsinfo, Archive *fout)
Definition pg_dump.c:1982
InhInfo * getInherits(Archive *fout, int *numInherits)
Definition pg_dump.c:7817
void getForeignDataWrappers(Archive *fout)
Definition pg_dump.c:10515
static void dumpTrigger(Archive *fout, const TriggerInfo *tginfo)
Definition pg_dump.c:19704
static void binary_upgrade_set_type_oids_by_rel(Archive *fout, PQExpBuffer upgrade_buffer, const TableInfo *tbinfo)
Definition pg_dump.c:5882
static void dumpTable(Archive *fout, const TableInfo *tbinfo)
Definition pg_dump.c:16968
static SimpleOidList extension_exclude_oids
Definition pg_dump.c:189
static SimpleStringList table_exclude_patterns
Definition pg_dump.c:175
static PQExpBuffer createViewAsClause(Archive *fout, const TableInfo *tbinfo)
Definition pg_dump.c:17108
static void dumpStatisticsExt(Archive *fout, const StatsExtInfo *statsextinfo)
Definition pg_dump.c:18567
void getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
Definition pg_dump.c:4223
static void dumpRangeType(Archive *fout, const TypeInfo *tyinfo)
Definition pg_dump.c:12333
void getExtensionMembership(Archive *fout, ExtensionInfo extinfo[], int numExtensions)
Definition pg_dump.c:20087
static void dumpComment(Archive *fout, const char *type, const char *name, const char *namespace, const char *owner, CatalogId catalogId, int subid, DumpId dumpId)
Definition pg_dump.c:11084
static char * getFormattedOperatorName(const char *oproid)
Definition pg_dump.c:14470
static char * format_function_signature(Archive *fout, const FuncInfo *finfo, bool honor_quotes)
Definition pg_dump.c:13525
static int nseclabels
Definition pg_dump.c:207
static pg_compress_algorithm compression_algorithm
Definition pg_dump.c:159
static void dumpStdStrings(Archive *AH)
Definition pg_dump.c:3847
static void dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
Definition pg_dump.c:18924
static void dumpType(Archive *fout, const TypeInfo *tyinfo)
Definition pg_dump.c:12162
static void dumpTableAttach(Archive *fout, const TableAttachInfo *attachinfo)
Definition pg_dump.c:18215
void getTypes(Archive *fout)
Definition pg_dump.c:6288
static void dumpAccessMethod(Archive *fout, const AccessMethodInfo *aminfo)
Definition pg_dump.c:14522
static void dumpOpr(Archive *fout, const OprInfo *oprinfo)
Definition pg_dump.c:14210
static void selectDumpableStatisticsObject(StatsExtInfo *sobj, Archive *fout)
Definition pg_dump.c:2324
static void selectDumpablePublicationObject(DumpableObject *dobj, Archive *fout)
Definition pg_dump.c:2306
static void dumpSequenceData(Archive *fout, const TableDataInfo *tdinfo)
Definition pg_dump.c:19615
static void dumpFunc(Archive *fout, const FuncInfo *finfo)
Definition pg_dump.c:13554
static void selectDumpableDefaultACL(DefaultACLInfo *dinfo, DumpOptions *dopt)
Definition pg_dump.c:2152
static void BuildArchiveDependencies(Archive *fout)
Definition pg_dump.c:20664
static RelStatsInfo * getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages, char *reltuples, int32 relallvisible, int32 relallfrozen, char relkind, char **indAttNames, int nindAttNames)
Definition pg_dump.c:7198
static const char *const SeqTypeNames[]
Definition pg_dump.c:119
void getOwnedSeqs(Archive *fout, TableInfo tblinfo[], int numTables)
Definition pg_dump.c:7752
static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo)
Definition pg_dump.c:3026
static int nsequences
Definition pg_dump.c:215
static const char * getAttrName(int attrnum, const TableInfo *tblInfo)
Definition pg_dump.c:18344
static void dumpForeignServer(Archive *fout, const ForeignServerInfo *srvinfo)
Definition pg_dump.c:16227
static RoleNameItem * rolenames
Definition pg_dump.c:198
static void collectRoleNames(Archive *fout)
Definition pg_dump.c:10820
static PGresult * fetchAttributeStats(Archive *fout)
Definition pg_dump.c:11118
static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions, const char *prefix, Archive *fout)
Definition pg_dump.c:20863
void getOpclasses(Archive *fout)
Definition pg_dump.c:6734
void getForeignServers(Archive *fout)
Definition pg_dump.c:10599
void getFuncs(Archive *fout)
Definition pg_dump.c:7003
static void dumpTableData(Archive *fout, const TableDataInfo *tdinfo)
Definition pg_dump.c:2857
static void prohibit_crossdb_refs(PGconn *conn, const char *dbname, const char *pattern)
Definition pg_dump.c:1907