PostgreSQL Source Code git master
pg_backup_archiver.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * pg_backup_archiver.c
4 *
5 * Private implementation of the archiver routines.
6 *
7 * See the headers to pg_restore for more details.
8 *
9 * Copyright (c) 2000, Philip Warner
10 * Rights are granted to use this software in any way so long
11 * as this notice is not removed.
12 *
13 * The author is not responsible for loss or damages that may
14 * result from its use.
15 *
16 *
17 * IDENTIFICATION
18 * src/bin/pg_dump/pg_backup_archiver.c
19 *
20 *-------------------------------------------------------------------------
21 */
22#include "postgres_fe.h"
23
24#include <ctype.h>
25#include <fcntl.h>
26#include <unistd.h>
27#include <sys/stat.h>
28#include <sys/wait.h>
29#ifdef WIN32
30#include <io.h>
31#endif
32
33#include "catalog/pg_class_d.h"
34#include "catalog/pg_largeobject_metadata_d.h"
35#include "catalog/pg_shdepend_d.h"
36#include "common/string.h"
37#include "compress_io.h"
38#include "dumputils.h"
40#include "lib/binaryheap.h"
41#include "lib/stringinfo.h"
42#include "libpq/libpq-fs.h"
43#include "parallel.h"
44#include "pg_backup_archiver.h"
45#include "pg_backup_db.h"
46#include "pg_backup_utils.h"
47
48#define TEXT_DUMP_HEADER "--\n-- PostgreSQL database dump\n--\n\n"
49#define TEXT_DUMPALL_HEADER "--\n-- PostgreSQL database cluster dump\n--\n\n"
50
51#define TOC_PREFIX_NONE ""
52#define TOC_PREFIX_DATA "Data for "
53#define TOC_PREFIX_STATS "Statistics for "
54
55static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt,
56 const pg_compress_specification compression_spec,
58 SetupWorkerPtrType setupWorkerPtr,
60static void _getObjectDescription(PQExpBuffer buf, const TocEntry *te);
61static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, const char *pfx);
63static void _doSetSessionAuth(ArchiveHandle *AH, const char *user);
64static void _reconnectToDB(ArchiveHandle *AH, const char *dbname);
65static void _becomeUser(ArchiveHandle *AH, const char *user);
66static void _becomeOwner(ArchiveHandle *AH, TocEntry *te);
67static void _selectOutputSchema(ArchiveHandle *AH, const char *schemaName);
68static void _selectTablespace(ArchiveHandle *AH, const char *tablespace);
69static void _selectTableAccessMethod(ArchiveHandle *AH, const char *tableam);
71 TocEntry *te);
72static void processEncodingEntry(ArchiveHandle *AH, TocEntry *te);
75static int _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH);
77static bool _tocEntryIsACL(TocEntry *te);
81static void buildTocEntryArrays(ArchiveHandle *AH);
82static void _moveBefore(TocEntry *pos, TocEntry *te);
84
85static int RestoringToDB(ArchiveHandle *AH);
86static void dump_lo_buf(ArchiveHandle *AH);
87static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim);
88static void SetOutput(ArchiveHandle *AH, const char *filename,
89 const pg_compress_specification compression_spec);
91static void RestoreOutput(ArchiveHandle *AH, CompressFileHandle *savedOutput);
92
93static int restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel);
95 TocEntry *pending_list);
97 ParallelState *pstate,
98 TocEntry *pending_list);
100 TocEntry *pending_list);
101static void pending_list_header_init(TocEntry *l);
102static void pending_list_append(TocEntry *l, TocEntry *te);
103static void pending_list_remove(TocEntry *te);
104static int TocEntrySizeCompareQsort(const void *p1, const void *p2);
105static int TocEntrySizeCompareBinaryheap(void *p1, void *p2, void *arg);
106static void move_to_ready_heap(TocEntry *pending_list,
107 binaryheap *ready_heap,
108 RestorePass pass);
109static TocEntry *pop_next_work_item(binaryheap *ready_heap,
110 ParallelState *pstate);
111static void mark_dump_job_done(ArchiveHandle *AH,
112 TocEntry *te,
113 int status,
114 void *callback_data);
116 TocEntry *te,
117 int status,
118 void *callback_data);
119static void fix_dependencies(ArchiveHandle *AH);
120static bool has_lock_conflicts(TocEntry *te1, TocEntry *te2);
123static void reduce_dependencies(ArchiveHandle *AH, TocEntry *te,
124 binaryheap *ready_heap);
125static void mark_create_done(ArchiveHandle *AH, TocEntry *te);
127
128static void StrictNamesCheck(RestoreOptions *ropt);
129
130
131/*
132 * Allocate a new DumpOptions block containing all default values.
133 */
136{
138
140 return opts;
141}
142
143/*
144 * Initialize a DumpOptions struct to all default values
145 */
146void
148{
149 memset(opts, 0, sizeof(DumpOptions));
150 /* set any fields that shouldn't default to zeroes */
151 opts->include_everything = true;
152 opts->cparams.promptPassword = TRI_DEFAULT;
153 opts->dumpSections = DUMP_UNSECTIONED;
154 opts->dumpSchema = true;
155 opts->dumpData = true;
156 opts->dumpStatistics = false;
157}
158
159/*
160 * Create a freshly allocated DumpOptions with options equivalent to those
161 * found in the given RestoreOptions.
162 */
165{
166 DumpOptions *dopt = NewDumpOptions();
167
168 /* this is the inverse of what's at the end of pg_dump.c's main() */
169 dopt->cparams.dbname = ropt->cparams.dbname ? pg_strdup(ropt->cparams.dbname) : NULL;
170 dopt->cparams.pgport = ropt->cparams.pgport ? pg_strdup(ropt->cparams.pgport) : NULL;
171 dopt->cparams.pghost = ropt->cparams.pghost ? pg_strdup(ropt->cparams.pghost) : NULL;
172 dopt->cparams.username = ropt->cparams.username ? pg_strdup(ropt->cparams.username) : NULL;
174 dopt->outputClean = ropt->dropSchema;
175 dopt->dumpData = ropt->dumpData;
176 dopt->dumpSchema = ropt->dumpSchema;
177 dopt->dumpSections = ropt->dumpSections;
178 dopt->dumpStatistics = ropt->dumpStatistics;
179 dopt->if_exists = ropt->if_exists;
180 dopt->column_inserts = ropt->column_inserts;
181 dopt->aclsSkip = ropt->aclsSkip;
182 dopt->outputSuperuser = ropt->superuser;
183 dopt->outputCreateDB = ropt->createDB;
184 dopt->outputNoOwner = ropt->noOwner;
185 dopt->outputNoTableAm = ropt->noTableAm;
186 dopt->outputNoTablespaces = ropt->noTablespace;
188 dopt->use_setsessauth = ropt->use_setsessauth;
190 dopt->dump_inserts = ropt->dump_inserts;
191 dopt->no_comments = ropt->no_comments;
192 dopt->no_policies = ropt->no_policies;
193 dopt->no_publications = ropt->no_publications;
196 dopt->lockWaitTimeout = ropt->lockWaitTimeout;
199 dopt->sequence_data = ropt->sequence_data;
200 dopt->restrict_key = ropt->restrict_key ? pg_strdup(ropt->restrict_key) : NULL;
201
202 return dopt;
203}
204
205
206/*
207 * Wrapper functions.
208 *
209 * The objective is to make writing new formats and dumpers as simple
210 * as possible, if necessary at the expense of extra function calls etc.
211 *
212 */
213
214/*
215 * The dump worker setup needs lots of knowledge of the internals of pg_dump,
216 * so it's defined in pg_dump.c and passed into OpenArchive. The restore worker
217 * setup doesn't need to know anything much, so it's defined here.
218 */
219static void
221{
222 ArchiveHandle *AH = (ArchiveHandle *) AHX;
223
224 AH->ReopenPtr(AH);
225}
226
227
228/* Create a new archive */
229/* Public */
230Archive *
231CreateArchive(const char *FileSpec, const ArchiveFormat fmt,
232 const pg_compress_specification compression_spec,
233 bool dosync, ArchiveMode mode,
236
237{
238 ArchiveHandle *AH = _allocAH(FileSpec, fmt, compression_spec,
240
241 return (Archive *) AH;
242}
243
244/* Open an existing archive */
245/* Public */
246Archive *
247OpenArchive(const char *FileSpec, const ArchiveFormat fmt)
248{
249 ArchiveHandle *AH;
250 pg_compress_specification compression_spec = {0};
251
252 compression_spec.algorithm = PG_COMPRESSION_NONE;
253 AH = _allocAH(FileSpec, fmt, compression_spec, true,
256
257 return (Archive *) AH;
258}
259
260/* Public */
261void
263{
264 ArchiveHandle *AH = (ArchiveHandle *) AHX;
265
266 AH->ClosePtr(AH);
267
268 /* Close the output */
269 errno = 0;
270 if (!EndCompressFileHandle(AH->OF))
271 pg_fatal("could not close output file: %m");
272}
273
274/* Public */
275void
277{
278 /* Caller can omit dump options, in which case we synthesize them */
279 if (dopt == NULL && ropt != NULL)
281
282 /* Save options for later access */
283 AH->dopt = dopt;
284 AH->ropt = ropt;
285}
286
287/* Public */
288void
290{
291 ArchiveHandle *AH = (ArchiveHandle *) AHX;
292 RestoreOptions *ropt = AH->public.ropt;
293 TocEntry *te;
294 teSection curSection;
295
296 /* Decide which TOC entries will be dumped/restored, and mark them */
297 curSection = SECTION_PRE_DATA;
298 for (te = AH->toc->next; te != AH->toc; te = te->next)
299 {
300 /*
301 * When writing an archive, we also take this opportunity to check
302 * that we have generated the entries in a sane order that respects
303 * the section divisions. When reading, don't complain, since buggy
304 * old versions of pg_dump might generate out-of-order archives.
305 */
306 if (AH->mode != archModeRead)
307 {
308 switch (te->section)
309 {
310 case SECTION_NONE:
311 /* ok to be anywhere */
312 break;
313 case SECTION_PRE_DATA:
314 if (curSection != SECTION_PRE_DATA)
315 pg_log_warning("archive items not in correct section order");
316 break;
317 case SECTION_DATA:
318 if (curSection == SECTION_POST_DATA)
319 pg_log_warning("archive items not in correct section order");
320 break;
322 /* ok no matter which section we were in */
323 break;
324 default:
325 pg_fatal("unexpected section code %d",
326 (int) te->section);
327 break;
328 }
329 }
330
331 if (te->section != SECTION_NONE)
332 curSection = te->section;
333
334 te->reqs = _tocEntryRequired(te, curSection, AH);
335 }
336
337 /* Enforce strict names checking */
338 if (ropt->strict_names)
339 StrictNamesCheck(ropt);
340}
341
342/* Public */
343void
345{
346 ArchiveHandle *AH = (ArchiveHandle *) AHX;
347 RestoreOptions *ropt = AH->public.ropt;
348 bool parallel_mode;
349 TocEntry *te;
351
353
354 /*
355 * If we're going to do parallel restore, there are some restrictions.
356 */
357 parallel_mode = (AH->public.numWorkers > 1 && ropt->useDB);
358 if (parallel_mode)
359 {
360 /* We haven't got round to making this work for all archive formats */
361 if (AH->ClonePtr == NULL || AH->ReopenPtr == NULL)
362 pg_fatal("parallel restore is not supported with this archive file format");
363
364 /* Doesn't work if the archive represents dependencies as OIDs */
365 if (AH->version < K_VERS_1_8)
366 pg_fatal("parallel restore is not supported with archives made by pre-8.0 pg_dump");
367
368 /*
369 * It's also not gonna work if we can't reopen the input file, so
370 * let's try that immediately.
371 */
372 AH->ReopenPtr(AH);
373 }
374
375 /*
376 * Make sure we won't need (de)compression we haven't got
377 */
378 if (AH->PrintTocDataPtr != NULL)
379 {
380 for (te = AH->toc->next; te != AH->toc; te = te->next)
381 {
382 if (te->hadDumper && (te->reqs & REQ_DATA) != 0)
383 {
385
386 if (errmsg)
387 pg_fatal("cannot restore from compressed archive (%s)",
388 errmsg);
389 else
390 break;
391 }
392 }
393 }
394
395 /*
396 * Prepare index arrays, so we can assume we have them throughout restore.
397 * It's possible we already did this, though.
398 */
399 if (AH->tocsByDumpId == NULL)
401
402 /*
403 * If we're using a DB connection, then connect it.
404 */
405 if (ropt->useDB)
406 {
407 pg_log_info("connecting to database for restore");
408 if (AH->version < K_VERS_1_3)
409 pg_fatal("direct database connections are not supported in pre-1.3 archives");
410
411 /*
412 * We don't want to guess at whether the dump will successfully
413 * restore; allow the attempt regardless of the version of the restore
414 * target.
415 */
416 AHX->minRemoteVersion = 0;
417 AHX->maxRemoteVersion = 9999999;
418
419 ConnectDatabaseAhx(AHX, &ropt->cparams, false);
420
421 /*
422 * If we're talking to the DB directly, don't send comments since they
423 * obscure SQL when displaying errors
424 */
425 AH->noTocComments = 1;
426 }
427
428 /*
429 * Work out if we have an implied schema-less restore. This can happen if
430 * the dump excluded the schema or the user has used a toc list to exclude
431 * all of the schema data. All we do is look for schema entries - if none
432 * are found then we unset the dumpSchema flag.
433 *
434 * We could scan for wanted TABLE entries, but that is not the same as
435 * data-only. At this stage, it seems unnecessary (6-Mar-2001).
436 */
437 if (ropt->dumpSchema)
438 {
439 bool no_schema_found = true;
440
441 for (te = AH->toc->next; te != AH->toc; te = te->next)
442 {
443 if ((te->reqs & REQ_SCHEMA) != 0)
444 {
445 no_schema_found = false;
446 break;
447 }
448 }
449 if (no_schema_found)
450 {
451 ropt->dumpSchema = false;
452 pg_log_info("implied no-schema restore");
453 }
454 }
455
456 /*
457 * Setup the output file if necessary.
458 */
459 sav = SaveOutput(AH);
461 SetOutput(AH, ropt->filename, ropt->compression_spec);
462
463 ahprintf(AH, "--\n-- PostgreSQL database dump\n--\n\n");
464
465 /*
466 * If generating plain-text output, enter restricted mode to block any
467 * unexpected psql meta-commands. A malicious source might try to inject
468 * a variety of things via bogus responses to queries. While we cannot
469 * prevent such sources from affecting the destination at restore time, we
470 * can block psql meta-commands so that the client machine that runs psql
471 * with the dump output remains unaffected.
472 */
473 if (ropt->restrict_key)
474 ahprintf(AH, "\\restrict %s\n\n", ropt->restrict_key);
475
476 if (AH->archiveRemoteVersion)
477 ahprintf(AH, "-- Dumped from database version %s\n",
479 if (AH->archiveDumpVersion)
480 ahprintf(AH, "-- Dumped by pg_dump version %s\n",
482
483 ahprintf(AH, "\n");
484
485 if (AH->public.verbose)
486 dumpTimestamp(AH, "Started on", AH->createDate);
487
488 if (ropt->single_txn)
489 {
490 if (AH->connection)
491 StartTransaction(AHX);
492 else
493 ahprintf(AH, "BEGIN;\n\n");
494 }
495
496 /*
497 * Establish important parameter values right away.
498 */
500
502
503 /*
504 * Drop the items at the start, in reverse order
505 */
506 if (ropt->dropSchema)
507 {
508 for (te = AH->toc->prev; te != AH->toc; te = te->prev)
509 {
510 AH->currentTE = te;
511
512 /*
513 * In createDB mode, issue a DROP *only* for the database as a
514 * whole. Issuing drops against anything else would be wrong,
515 * because at this point we're connected to the wrong database.
516 * (The DATABASE PROPERTIES entry, if any, should be treated like
517 * the DATABASE entry.)
518 */
519 if (ropt->createDB)
520 {
521 if (strcmp(te->desc, "DATABASE") != 0 &&
522 strcmp(te->desc, "DATABASE PROPERTIES") != 0)
523 continue;
524 }
525
526 /* Otherwise, drop anything that's selected and has a dropStmt */
527 if (((te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0) && te->dropStmt)
528 {
529 bool not_allowed_in_txn = false;
530
531 pg_log_info("dropping %s %s", te->desc, te->tag);
532
533 /*
534 * In --transaction-size mode, we have to temporarily exit our
535 * transaction block to drop objects that can't be dropped
536 * within a transaction.
537 */
538 if (ropt->txn_size > 0)
539 {
540 if (strcmp(te->desc, "DATABASE") == 0 ||
541 strcmp(te->desc, "DATABASE PROPERTIES") == 0)
542 {
543 not_allowed_in_txn = true;
544 if (AH->connection)
546 else
547 ahprintf(AH, "COMMIT;\n");
548 }
549 }
550
551 /* Select owner and schema as necessary */
552 _becomeOwner(AH, te);
553 _selectOutputSchema(AH, te->namespace);
554
555 /*
556 * Now emit the DROP command, if the object has one. Note we
557 * don't necessarily emit it verbatim; at this point we add an
558 * appropriate IF EXISTS clause, if the user requested it.
559 */
560 if (strcmp(te->desc, "BLOB METADATA") == 0)
561 {
562 /* We must generate the per-blob commands */
563 if (ropt->if_exists)
564 IssueCommandPerBlob(AH, te,
565 "SELECT pg_catalog.lo_unlink(oid) "
566 "FROM pg_catalog.pg_largeobject_metadata "
567 "WHERE oid = '", "'");
568 else
569 IssueCommandPerBlob(AH, te,
570 "SELECT pg_catalog.lo_unlink('",
571 "')");
572 }
573 else if (*te->dropStmt != '\0')
574 {
575 if (!ropt->if_exists ||
576 strncmp(te->dropStmt, "--", 2) == 0)
577 {
578 /*
579 * Without --if-exists, or if it's just a comment (as
580 * happens for the public schema), print the dropStmt
581 * as-is.
582 */
583 ahprintf(AH, "%s", te->dropStmt);
584 }
585 else
586 {
587 /*
588 * Inject an appropriate spelling of "if exists". For
589 * old-style large objects, we have a routine that
590 * knows how to do it, without depending on
591 * te->dropStmt; use that. For other objects we need
592 * to parse the command.
593 */
594 if (strcmp(te->desc, "BLOB") == 0)
595 {
597 }
598 else
599 {
600 char *dropStmt = pg_strdup(te->dropStmt);
601 char *dropStmtOrig = dropStmt;
603
604 /*
605 * Need to inject IF EXISTS clause after ALTER
606 * TABLE part in ALTER TABLE .. DROP statement
607 */
608 if (strncmp(dropStmt, "ALTER TABLE", 11) == 0)
609 {
611 "ALTER TABLE IF EXISTS");
612 dropStmt = dropStmt + 11;
613 }
614
615 /*
616 * ALTER TABLE..ALTER COLUMN..DROP DEFAULT does
617 * not support the IF EXISTS clause, and therefore
618 * we simply emit the original command for DEFAULT
619 * objects (modulo the adjustment made above).
620 *
621 * Likewise, don't mess with DATABASE PROPERTIES.
622 *
623 * If we used CREATE OR REPLACE VIEW as a means of
624 * quasi-dropping an ON SELECT rule, that should
625 * be emitted unchanged as well.
626 *
627 * For other object types, we need to extract the
628 * first part of the DROP which includes the
629 * object type. Most of the time this matches
630 * te->desc, so search for that; however for the
631 * different kinds of CONSTRAINTs, we know to
632 * search for hardcoded "DROP CONSTRAINT" instead.
633 */
634 if (strcmp(te->desc, "DEFAULT") == 0 ||
635 strcmp(te->desc, "DATABASE PROPERTIES") == 0 ||
636 strncmp(dropStmt, "CREATE OR REPLACE VIEW", 22) == 0)
637 appendPQExpBufferStr(ftStmt, dropStmt);
638 else
639 {
640 char buffer[40];
641 char *mark;
642
643 if (strcmp(te->desc, "CONSTRAINT") == 0 ||
644 strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
645 strcmp(te->desc, "FK CONSTRAINT") == 0)
646 strcpy(buffer, "DROP CONSTRAINT");
647 else
648 snprintf(buffer, sizeof(buffer), "DROP %s",
649 te->desc);
650
651 mark = strstr(dropStmt, buffer);
652
653 if (mark)
654 {
655 *mark = '\0';
656 appendPQExpBuffer(ftStmt, "%s%s IF EXISTS%s",
657 dropStmt, buffer,
658 mark + strlen(buffer));
659 }
660 else
661 {
662 /* complain and emit unmodified command */
663 pg_log_warning("could not find where to insert IF EXISTS in statement \"%s\"",
664 dropStmtOrig);
665 appendPQExpBufferStr(ftStmt, dropStmt);
666 }
667 }
668
669 ahprintf(AH, "%s", ftStmt->data);
670
671 destroyPQExpBuffer(ftStmt);
672 pg_free(dropStmtOrig);
673 }
674 }
675 }
676
677 /*
678 * In --transaction-size mode, re-establish the transaction
679 * block if needed; otherwise, commit after every N drops.
680 */
681 if (ropt->txn_size > 0)
682 {
683 if (not_allowed_in_txn)
684 {
685 if (AH->connection)
686 StartTransaction(AHX);
687 else
688 ahprintf(AH, "BEGIN;\n");
689 AH->txnCount = 0;
690 }
691 else if (++AH->txnCount >= ropt->txn_size)
692 {
693 if (AH->connection)
694 {
696 StartTransaction(AHX);
697 }
698 else
699 ahprintf(AH, "COMMIT;\nBEGIN;\n");
700 AH->txnCount = 0;
701 }
702 }
703 }
704 }
705
706 /*
707 * _selectOutputSchema may have set currSchema to reflect the effect
708 * of a "SET search_path" command it emitted. However, by now we may
709 * have dropped that schema; or it might not have existed in the first
710 * place. In either case the effective value of search_path will not
711 * be what we think. Forcibly reset currSchema so that we will
712 * re-establish the search_path setting when needed (after creating
713 * the schema).
714 *
715 * If we treated users as pg_dump'able objects then we'd need to reset
716 * currUser here too.
717 */
718 free(AH->currSchema);
719 AH->currSchema = NULL;
720 }
721
722 if (parallel_mode)
723 {
724 /*
725 * In parallel mode, turn control over to the parallel-restore logic.
726 */
727 ParallelState *pstate;
728 TocEntry pending_list;
729
730 /* The archive format module may need some setup for this */
733
734 pending_list_header_init(&pending_list);
735
736 /* This runs PRE_DATA items and then disconnects from the database */
737 restore_toc_entries_prefork(AH, &pending_list);
738 Assert(AH->connection == NULL);
739
740 /* ParallelBackupStart() will actually fork the processes */
741 pstate = ParallelBackupStart(AH);
742 restore_toc_entries_parallel(AH, pstate, &pending_list);
743 ParallelBackupEnd(AH, pstate);
744
745 /* reconnect the leader and see if we missed something */
746 restore_toc_entries_postfork(AH, &pending_list);
747 Assert(AH->connection != NULL);
748 }
749 else
750 {
751 /*
752 * In serial mode, process everything in three phases: normal items,
753 * then ACLs, then post-ACL items. We might be able to skip one or
754 * both extra phases in some cases, eg data-only restores.
755 */
756 bool haveACL = false;
757 bool havePostACL = false;
758
759 for (te = AH->toc->next; te != AH->toc; te = te->next)
760 {
761 if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) == 0)
762 continue; /* ignore if not to be dumped at all */
763
764 switch (_tocEntryRestorePass(te))
765 {
767 (void) restore_toc_entry(AH, te, false);
768 break;
769 case RESTORE_PASS_ACL:
770 haveACL = true;
771 break;
773 havePostACL = true;
774 break;
775 }
776 }
777
778 if (haveACL)
779 {
780 for (te = AH->toc->next; te != AH->toc; te = te->next)
781 {
782 if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) != 0 &&
784 (void) restore_toc_entry(AH, te, false);
785 }
786 }
787
788 if (havePostACL)
789 {
790 for (te = AH->toc->next; te != AH->toc; te = te->next)
791 {
792 if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) != 0 &&
794 (void) restore_toc_entry(AH, te, false);
795 }
796 }
797 }
798
799 /*
800 * Close out any persistent transaction we may have. While these two
801 * cases are started in different places, we can end both cases here.
802 */
803 if (ropt->single_txn || ropt->txn_size > 0)
804 {
805 if (AH->connection)
807 else
808 ahprintf(AH, "COMMIT;\n\n");
809 }
810
811 if (AH->public.verbose)
812 dumpTimestamp(AH, "Completed on", time(NULL));
813
814 ahprintf(AH, "--\n-- PostgreSQL database dump complete\n--\n\n");
815
816 /*
817 * If generating plain-text output, exit restricted mode at the very end
818 * of the script. This is not pro forma; in particular, pg_dumpall
819 * requires this when transitioning from one database to another.
820 */
821 if (ropt->restrict_key)
822 ahprintf(AH, "\\unrestrict %s\n\n", ropt->restrict_key);
823
824 /*
825 * Clean up & we're done.
826 */
828
830 RestoreOutput(AH, sav);
831
832 if (ropt->useDB)
834}
835
836/*
837 * Restore a single TOC item. Used in both parallel and non-parallel restore;
838 * is_parallel is true if we are in a worker child process.
839 *
840 * Returns 0 normally, but WORKER_CREATE_DONE or WORKER_INHIBIT_DATA if
841 * the parallel parent has to make the corresponding status update.
842 */
843static int
844restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
845{
846 RestoreOptions *ropt = AH->public.ropt;
847 int status = WORKER_OK;
848 int reqs;
849 bool defnDumped;
850
851 AH->currentTE = te;
852
853 /* Dump any relevant dump warnings to stderr */
854 if (!ropt->suppressDumpWarnings && strcmp(te->desc, "WARNING") == 0)
855 {
856 if (ropt->dumpSchema && te->defn != NULL && strlen(te->defn) != 0)
857 pg_log_warning("warning from original dump file: %s", te->defn);
858 else if (te->copyStmt != NULL && strlen(te->copyStmt) != 0)
859 pg_log_warning("warning from original dump file: %s", te->copyStmt);
860 }
861
862 /* Work out what, if anything, we want from this entry */
863 reqs = te->reqs;
864
865 defnDumped = false;
866
867 /*
868 * If it has a schema component that we want, then process that
869 */
870 if ((reqs & REQ_SCHEMA) != 0)
871 {
872 bool object_is_db = false;
873
874 /*
875 * In --transaction-size mode, must exit our transaction block to
876 * create a database or set its properties.
877 */
878 if (strcmp(te->desc, "DATABASE") == 0 ||
879 strcmp(te->desc, "DATABASE PROPERTIES") == 0)
880 {
881 object_is_db = true;
882 if (ropt->txn_size > 0)
883 {
884 if (AH->connection)
886 else
887 ahprintf(AH, "COMMIT;\n\n");
888 }
889 }
890
891 /* Show namespace in log message if available */
892 if (te->namespace)
893 pg_log_info("creating %s \"%s.%s\"",
894 te->desc, te->namespace, te->tag);
895 else
896 pg_log_info("creating %s \"%s\"",
897 te->desc, te->tag);
898
900 defnDumped = true;
901
902 if (strcmp(te->desc, "TABLE") == 0)
903 {
904 if (AH->lastErrorTE == te)
905 {
906 /*
907 * We failed to create the table. If
908 * --no-data-for-failed-tables was given, mark the
909 * corresponding TABLE DATA to be ignored.
910 *
911 * In the parallel case this must be done in the parent, so we
912 * just set the return value.
913 */
914 if (ropt->noDataForFailedTables)
915 {
916 if (is_parallel)
917 status = WORKER_INHIBIT_DATA;
918 else
920 }
921 }
922 else
923 {
924 /*
925 * We created the table successfully. Mark the corresponding
926 * TABLE DATA for possible truncation.
927 *
928 * In the parallel case this must be done in the parent, so we
929 * just set the return value.
930 */
931 if (is_parallel)
932 status = WORKER_CREATE_DONE;
933 else
934 mark_create_done(AH, te);
935 }
936 }
937
938 /*
939 * If we created a DB, connect to it. Also, if we changed DB
940 * properties, reconnect to ensure that relevant GUC settings are
941 * applied to our session. (That also restarts the transaction block
942 * in --transaction-size mode.)
943 */
944 if (object_is_db)
945 {
946 pg_log_info("connecting to new database \"%s\"", te->tag);
947 _reconnectToDB(AH, te->tag);
948 }
949 }
950
951 /*
952 * If it has a data component that we want, then process that
953 */
954 if ((reqs & REQ_DATA) != 0)
955 {
956 /*
957 * hadDumper will be set if there is genuine data component for this
958 * node. Otherwise, we need to check the defn field for statements
959 * that need to be executed in data-only restores.
960 */
961 if (te->hadDumper)
962 {
963 /*
964 * If we can output the data, then restore it.
965 */
966 if (AH->PrintTocDataPtr != NULL)
967 {
969
970 if (strcmp(te->desc, "BLOBS") == 0 ||
971 strcmp(te->desc, "BLOB COMMENTS") == 0)
972 {
973 pg_log_info("processing %s", te->desc);
974
975 _selectOutputSchema(AH, "pg_catalog");
976
977 /* Send BLOB COMMENTS data to ExecuteSimpleCommands() */
978 if (strcmp(te->desc, "BLOB COMMENTS") == 0)
980
981 AH->PrintTocDataPtr(AH, te);
982
984 }
985 else
986 {
987 bool use_truncate;
988
990
991 /* Select owner and schema as necessary */
992 _becomeOwner(AH, te);
993 _selectOutputSchema(AH, te->namespace);
994
995 pg_log_info("processing data for table \"%s.%s\"",
996 te->namespace, te->tag);
997
998 /*
999 * In parallel restore, if we created the table earlier in
1000 * this run (so that we know it is empty) and we are not
1001 * restoring a load-via-partition-root data item then we
1002 * wrap the COPY in a transaction and precede it with a
1003 * TRUNCATE. If wal_level is set to minimal this prevents
1004 * WAL-logging the COPY. This obtains a speedup similar
1005 * to that from using single_txn mode in non-parallel
1006 * restores.
1007 *
1008 * We mustn't do this for load-via-partition-root cases
1009 * because some data might get moved across partition
1010 * boundaries, risking deadlock and/or loss of previously
1011 * loaded data. (We assume that all partitions of a
1012 * partitioned table will be treated the same way.)
1013 */
1014 use_truncate = is_parallel && te->created &&
1016
1017 if (use_truncate)
1018 {
1019 /*
1020 * Parallel restore is always talking directly to a
1021 * server, so no need to see if we should issue BEGIN.
1022 */
1024
1025 /*
1026 * Issue TRUNCATE with ONLY so that child tables are
1027 * not wiped.
1028 */
1029 ahprintf(AH, "TRUNCATE TABLE ONLY %s;\n\n",
1030 fmtQualifiedId(te->namespace, te->tag));
1031 }
1032
1033 /*
1034 * If we have a copy statement, use it.
1035 */
1036 if (te->copyStmt && strlen(te->copyStmt) > 0)
1037 {
1038 ahprintf(AH, "%s", te->copyStmt);
1040 }
1041 else
1043
1044 AH->PrintTocDataPtr(AH, te);
1045
1046 /*
1047 * Terminate COPY if needed.
1048 */
1049 if (AH->outputKind == OUTPUT_COPYDATA &&
1050 RestoringToDB(AH))
1051 EndDBCopyMode(&AH->public, te->tag);
1053
1054 /* close out the transaction started above */
1055 if (use_truncate)
1057
1059 }
1060 }
1061 }
1062 else if (!defnDumped)
1063 {
1064 /* If we haven't already dumped the defn part, do so now */
1065 pg_log_info("executing %s %s", te->desc, te->tag);
1067 }
1068 }
1069
1070 /*
1071 * If it has a statistics component that we want, then process that
1072 */
1073 if ((reqs & REQ_STATS) != 0)
1075
1076 /*
1077 * If we emitted anything for this TOC entry, that counts as one action
1078 * against the transaction-size limit. Commit if it's time to.
1079 */
1080 if ((reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) != 0 && ropt->txn_size > 0)
1081 {
1082 if (++AH->txnCount >= ropt->txn_size)
1083 {
1084 if (AH->connection)
1085 {
1088 }
1089 else
1090 ahprintf(AH, "COMMIT;\nBEGIN;\n\n");
1091 AH->txnCount = 0;
1092 }
1093 }
1094
1095 if (AH->public.n_errors > 0 && status == WORKER_OK)
1096 status = WORKER_IGNORED_ERRORS;
1097
1098 return status;
1099}
1100
1101/*
1102 * Allocate a new RestoreOptions block.
1103 * This is mainly so we can initialize it, but also for future expansion,
1104 */
1107{
1109
1111
1112 /* set any fields that shouldn't default to zeroes */
1113 opts->format = archUnknown;
1114 opts->cparams.promptPassword = TRI_DEFAULT;
1115 opts->dumpSections = DUMP_UNSECTIONED;
1116 opts->compression_spec.algorithm = PG_COMPRESSION_NONE;
1117 opts->compression_spec.level = 0;
1118 opts->dumpSchema = true;
1119 opts->dumpData = true;
1120 opts->dumpStatistics = true;
1121
1122 return opts;
1123}
1124
1125static void
1127{
1128 RestoreOptions *ropt = AH->public.ropt;
1129
1130 /* This hack is only needed in a data-only restore */
1131 if (ropt->dumpSchema || !ropt->disable_triggers)
1132 return;
1133
1134 pg_log_info("disabling triggers for %s", te->tag);
1135
1136 /*
1137 * Become superuser if possible, since they are the only ones who can
1138 * disable constraint triggers. If -S was not given, assume the initial
1139 * user identity is a superuser. (XXX would it be better to become the
1140 * table owner?)
1141 */
1142 _becomeUser(AH, ropt->superuser);
1143
1144 /*
1145 * Disable them.
1146 */
1147 ahprintf(AH, "ALTER TABLE %s DISABLE TRIGGER ALL;\n\n",
1148 fmtQualifiedId(te->namespace, te->tag));
1149}
1150
1151static void
1153{
1154 RestoreOptions *ropt = AH->public.ropt;
1155
1156 /* This hack is only needed in a data-only restore */
1157 if (ropt->dumpSchema || !ropt->disable_triggers)
1158 return;
1159
1160 pg_log_info("enabling triggers for %s", te->tag);
1161
1162 /*
1163 * Become superuser if possible, since they are the only ones who can
1164 * disable constraint triggers. If -S was not given, assume the initial
1165 * user identity is a superuser. (XXX would it be better to become the
1166 * table owner?)
1167 */
1168 _becomeUser(AH, ropt->superuser);
1169
1170 /*
1171 * Enable them.
1172 */
1173 ahprintf(AH, "ALTER TABLE %s ENABLE TRIGGER ALL;\n\n",
1174 fmtQualifiedId(te->namespace, te->tag));
1175}
1176
1177/*
1178 * Detect whether a TABLE DATA TOC item is performing "load via partition
1179 * root", that is the target table is an ancestor partition rather than the
1180 * table the TOC item is nominally for.
1181 *
1182 * In newer archive files this can be detected by checking for a special
1183 * comment placed in te->defn. In older files we have to fall back to seeing
1184 * if the COPY statement targets the named table or some other one. This
1185 * will not work for data dumped as INSERT commands, so we could give a false
1186 * negative in that case; fortunately, that's a rarely-used option.
1187 */
1188static bool
1190{
1191 if (te->defn &&
1192 strncmp(te->defn, "-- load via partition root ", 27) == 0)
1193 return true;
1194 if (te->copyStmt && *te->copyStmt)
1195 {
1196 PQExpBuffer copyStmt = createPQExpBuffer();
1197 bool result;
1198
1199 /*
1200 * Build the initial part of the COPY as it would appear if the
1201 * nominal target table is the actual target. If we see anything
1202 * else, it must be a load-via-partition-root case.
1203 */
1204 appendPQExpBuffer(copyStmt, "COPY %s ",
1205 fmtQualifiedId(te->namespace, te->tag));
1206 result = strncmp(te->copyStmt, copyStmt->data, copyStmt->len) != 0;
1207 destroyPQExpBuffer(copyStmt);
1208 return result;
1209 }
1210 /* Assume it's not load-via-partition-root */
1211 return false;
1212}
1213
1214/*
1215 * This is a routine that is part of the dumper interface, hence the 'Archive*' parameter.
1216 */
1217
1218/* Public */
1219void
1220WriteData(Archive *AHX, const void *data, size_t dLen)
1221{
1222 ArchiveHandle *AH = (ArchiveHandle *) AHX;
1223
1224 if (!AH->currToc)
1225 pg_fatal("internal error -- WriteData cannot be called outside the context of a DataDumper routine");
1226
1227 AH->WriteDataPtr(AH, data, dLen);
1228}
1229
1230/*
1231 * Create a new TOC entry. The TOC was designed as a TOC, but is now the
1232 * repository for all metadata. But the name has stuck.
1233 *
1234 * The new entry is added to the Archive's TOC list. Most callers can ignore
1235 * the result value because nothing else need be done, but a few want to
1236 * manipulate the TOC entry further.
1237 */
1238
1239/* Public */
1240TocEntry *
1241ArchiveEntry(Archive *AHX, CatalogId catalogId, DumpId dumpId,
1243{
1244 ArchiveHandle *AH = (ArchiveHandle *) AHX;
1245 TocEntry *newToc;
1246
1247 newToc = (TocEntry *) pg_malloc0(sizeof(TocEntry));
1248
1249 AH->tocCount++;
1250 if (dumpId > AH->maxDumpId)
1251 AH->maxDumpId = dumpId;
1252
1253 newToc->prev = AH->toc->prev;
1254 newToc->next = AH->toc;
1255 AH->toc->prev->next = newToc;
1256 AH->toc->prev = newToc;
1257
1258 newToc->catalogId = catalogId;
1259 newToc->dumpId = dumpId;
1260 newToc->section = opts->section;
1261
1262 newToc->tag = pg_strdup(opts->tag);
1263 newToc->namespace = opts->namespace ? pg_strdup(opts->namespace) : NULL;
1264 newToc->tablespace = opts->tablespace ? pg_strdup(opts->tablespace) : NULL;
1265 newToc->tableam = opts->tableam ? pg_strdup(opts->tableam) : NULL;
1266 newToc->relkind = opts->relkind;
1267 newToc->owner = opts->owner ? pg_strdup(opts->owner) : NULL;
1268 newToc->desc = pg_strdup(opts->description);
1269 newToc->defn = opts->createStmt ? pg_strdup(opts->createStmt) : NULL;
1270 newToc->dropStmt = opts->dropStmt ? pg_strdup(opts->dropStmt) : NULL;
1271 newToc->copyStmt = opts->copyStmt ? pg_strdup(opts->copyStmt) : NULL;
1272
1273 if (opts->nDeps > 0)
1274 {
1275 newToc->dependencies = (DumpId *) pg_malloc(opts->nDeps * sizeof(DumpId));
1276 memcpy(newToc->dependencies, opts->deps, opts->nDeps * sizeof(DumpId));
1277 newToc->nDeps = opts->nDeps;
1278 }
1279 else
1280 {
1281 newToc->dependencies = NULL;
1282 newToc->nDeps = 0;
1283 }
1284
1285 newToc->dataDumper = opts->dumpFn;
1286 newToc->dataDumperArg = opts->dumpArg;
1287 newToc->hadDumper = opts->dumpFn ? true : false;
1288
1289 newToc->defnDumper = opts->defnFn;
1290 newToc->defnDumperArg = opts->defnArg;
1291
1292 newToc->formatData = NULL;
1293 newToc->dataLength = 0;
1294
1295 if (AH->ArchiveEntryPtr != NULL)
1296 AH->ArchiveEntryPtr(AH, newToc);
1297
1298 return newToc;
1299}
1300
1301/* Public */
1302void
1304{
1305 ArchiveHandle *AH = (ArchiveHandle *) AHX;
1306 RestoreOptions *ropt = AH->public.ropt;
1307 TocEntry *te;
1308 pg_compress_specification out_compression_spec = {0};
1309 teSection curSection;
1310 CompressFileHandle *sav;
1311 const char *fmtName;
1312 char stamp_str[64];
1313
1314 /* TOC is always uncompressed */
1315 out_compression_spec.algorithm = PG_COMPRESSION_NONE;
1316
1317 sav = SaveOutput(AH);
1318 if (ropt->filename)
1319 SetOutput(AH, ropt->filename, out_compression_spec);
1320
1321 if (strftime(stamp_str, sizeof(stamp_str), PGDUMP_STRFTIME_FMT,
1322 localtime(&AH->createDate)) == 0)
1323 strcpy(stamp_str, "[unknown]");
1324
1325 ahprintf(AH, ";\n; Archive created at %s\n", stamp_str);
1326 ahprintf(AH, "; dbname: %s\n; TOC Entries: %d\n; Compression: %s\n",
1327 sanitize_line(AH->archdbname, false),
1328 AH->tocCount,
1330
1331 switch (AH->format)
1332 {
1333 case archCustom:
1334 fmtName = "CUSTOM";
1335 break;
1336 case archDirectory:
1337 fmtName = "DIRECTORY";
1338 break;
1339 case archTar:
1340 fmtName = "TAR";
1341 break;
1342 default:
1343 fmtName = "UNKNOWN";
1344 }
1345
1346 ahprintf(AH, "; Dump Version: %d.%d-%d\n",
1348 ahprintf(AH, "; Format: %s\n", fmtName);
1349 ahprintf(AH, "; Integer: %zu bytes\n", AH->intSize);
1350 ahprintf(AH, "; Offset: %zu bytes\n", AH->offSize);
1351 if (AH->archiveRemoteVersion)
1352 ahprintf(AH, "; Dumped from database version: %s\n",
1354 if (AH->archiveDumpVersion)
1355 ahprintf(AH, "; Dumped by pg_dump version: %s\n",
1356 AH->archiveDumpVersion);
1357
1358 ahprintf(AH, ";\n;\n; Selected TOC Entries:\n;\n");
1359
1360 curSection = SECTION_PRE_DATA;
1361 for (te = AH->toc->next; te != AH->toc; te = te->next)
1362 {
1363 /* This bit must match ProcessArchiveRestoreOptions' marking logic */
1364 if (te->section != SECTION_NONE)
1365 curSection = te->section;
1366 te->reqs = _tocEntryRequired(te, curSection, AH);
1367 /* Now, should we print it? */
1368 if (ropt->verbose ||
1369 (te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) != 0)
1370 {
1371 char *sanitized_name;
1372 char *sanitized_schema;
1373 char *sanitized_owner;
1374
1375 /*
1376 */
1377 sanitized_name = sanitize_line(te->tag, false);
1378 sanitized_schema = sanitize_line(te->namespace, true);
1379 sanitized_owner = sanitize_line(te->owner, false);
1380
1381 ahprintf(AH, "%d; %u %u %s %s %s %s\n", te->dumpId,
1383 te->desc, sanitized_schema, sanitized_name,
1384 sanitized_owner);
1385
1386 free(sanitized_name);
1387 free(sanitized_schema);
1388 free(sanitized_owner);
1389 }
1390 if (ropt->verbose && te->nDeps > 0)
1391 {
1392 int i;
1393
1394 ahprintf(AH, ";\tdepends on:");
1395 for (i = 0; i < te->nDeps; i++)
1396 ahprintf(AH, " %d", te->dependencies[i]);
1397 ahprintf(AH, "\n");
1398 }
1399 }
1400
1401 /* Enforce strict names checking */
1402 if (ropt->strict_names)
1403 StrictNamesCheck(ropt);
1404
1405 if (ropt->filename)
1406 RestoreOutput(AH, sav);
1407}
1408
1409/***********
1410 * Large Object Archival
1411 ***********/
1412
1413/* Called by a dumper to signal start of a LO */
1414int
1416{
1417 ArchiveHandle *AH = (ArchiveHandle *) AHX;
1418
1419 if (!AH->StartLOPtr)
1420 pg_fatal("large-object output not supported in chosen format");
1421
1422 AH->StartLOPtr(AH, AH->currToc, oid);
1423
1424 return 1;
1425}
1426
1427/* Called by a dumper to signal end of a LO */
1428int
1430{
1431 ArchiveHandle *AH = (ArchiveHandle *) AHX;
1432
1433 if (AH->EndLOPtr)
1434 AH->EndLOPtr(AH, AH->currToc, oid);
1435
1436 return 1;
1437}
1438
1439/**********
1440 * Large Object Restoration
1441 **********/
1442
1443/*
1444 * Called by a format handler before a group of LOs is restored
1445 */
1446void
1448{
1449 RestoreOptions *ropt = AH->public.ropt;
1450
1451 /*
1452 * LOs must be restored within a transaction block, since we need the LO
1453 * handle to stay open while we write it. Establish a transaction unless
1454 * there's one being used globally.
1455 */
1456 if (!(ropt->single_txn || ropt->txn_size > 0))
1457 {
1458 if (AH->connection)
1460 else
1461 ahprintf(AH, "BEGIN;\n\n");
1462 }
1463
1464 AH->loCount = 0;
1465}
1466
1467/*
1468 * Called by a format handler after a group of LOs is restored
1469 */
1470void
1472{
1473 RestoreOptions *ropt = AH->public.ropt;
1474
1475 if (!(ropt->single_txn || ropt->txn_size > 0))
1476 {
1477 if (AH->connection)
1479 else
1480 ahprintf(AH, "COMMIT;\n\n");
1481 }
1482
1483 pg_log_info(ngettext("restored %d large object",
1484 "restored %d large objects",
1485 AH->loCount),
1486 AH->loCount);
1487}
1488
1489
1490/*
1491 * Called by a format handler to initiate restoration of a LO
1492 */
1493void
1495{
1496 bool old_lo_style = (AH->version < K_VERS_1_12);
1497 Oid loOid;
1498
1499 AH->loCount++;
1500
1501 /* Initialize the LO Buffer */
1502 if (AH->lo_buf == NULL)
1503 {
1504 /* First time through (in this process) so allocate the buffer */
1505 AH->lo_buf_size = LOBBUFSIZE;
1507 }
1508 AH->lo_buf_used = 0;
1509
1510 pg_log_info("restoring large object with OID %u", oid);
1511
1512 /* With an old archive we must do drop and create logic here */
1513 if (old_lo_style && drop)
1514 DropLOIfExists(AH, oid);
1515
1516 if (AH->connection)
1517 {
1518 if (old_lo_style)
1519 {
1520 loOid = lo_create(AH->connection, oid);
1521 if (loOid == 0 || loOid != oid)
1522 pg_fatal("could not create large object %u: %s",
1523 oid, PQerrorMessage(AH->connection));
1524 }
1525 AH->loFd = lo_open(AH->connection, oid, INV_WRITE);
1526 if (AH->loFd == -1)
1527 pg_fatal("could not open large object %u: %s",
1528 oid, PQerrorMessage(AH->connection));
1529 }
1530 else
1531 {
1532 if (old_lo_style)
1533 ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n",
1534 oid, INV_WRITE);
1535 else
1536 ahprintf(AH, "SELECT pg_catalog.lo_open('%u', %d);\n",
1537 oid, INV_WRITE);
1538 }
1539
1540 AH->writingLO = true;
1541}
1542
1543void
1545{
1546 if (AH->lo_buf_used > 0)
1547 {
1548 /* Write remaining bytes from the LO buffer */
1549 dump_lo_buf(AH);
1550 }
1551
1552 AH->writingLO = false;
1553
1554 if (AH->connection)
1555 {
1556 lo_close(AH->connection, AH->loFd);
1557 AH->loFd = -1;
1558 }
1559 else
1560 {
1561 ahprintf(AH, "SELECT pg_catalog.lo_close(0);\n\n");
1562 }
1563}
1564
1565/***********
1566 * Sorting and Reordering
1567 ***********/
1568
1569void
1571{
1572 ArchiveHandle *AH = (ArchiveHandle *) AHX;
1573 RestoreOptions *ropt = AH->public.ropt;
1574 FILE *fh;
1575 StringInfoData linebuf;
1576
1577 /* Allocate space for the 'wanted' array, and init it */
1578 ropt->idWanted = (bool *) pg_malloc0(sizeof(bool) * AH->maxDumpId);
1579
1580 /* Setup the file */
1581 fh = fopen(ropt->tocFile, PG_BINARY_R);
1582 if (!fh)
1583 pg_fatal("could not open TOC file \"%s\": %m", ropt->tocFile);
1584
1585 initStringInfo(&linebuf);
1586
1587 while (pg_get_line_buf(fh, &linebuf))
1588 {
1589 char *cmnt;
1590 char *endptr;
1591 DumpId id;
1592 TocEntry *te;
1593
1594 /* Truncate line at comment, if any */
1595 cmnt = strchr(linebuf.data, ';');
1596 if (cmnt != NULL)
1597 {
1598 cmnt[0] = '\0';
1599 linebuf.len = cmnt - linebuf.data;
1600 }
1601
1602 /* Ignore if all blank */
1603 if (strspn(linebuf.data, " \t\r\n") == linebuf.len)
1604 continue;
1605
1606 /* Get an ID, check it's valid and not already seen */
1607 id = strtol(linebuf.data, &endptr, 10);
1608 if (endptr == linebuf.data || id <= 0 || id > AH->maxDumpId ||
1609 ropt->idWanted[id - 1])
1610 {
1611 pg_log_warning("line ignored: %s", linebuf.data);
1612 continue;
1613 }
1614
1615 /* Find TOC entry */
1616 te = getTocEntryByDumpId(AH, id);
1617 if (!te)
1618 pg_fatal("could not find entry for ID %d",
1619 id);
1620
1621 /* Mark it wanted */
1622 ropt->idWanted[id - 1] = true;
1623
1624 /*
1625 * Move each item to the end of the list as it is selected, so that
1626 * they are placed in the desired order. Any unwanted items will end
1627 * up at the front of the list, which may seem unintuitive but it's
1628 * what we need. In an ordinary serial restore that makes no
1629 * difference, but in a parallel restore we need to mark unrestored
1630 * items' dependencies as satisfied before we start examining
1631 * restorable items. Otherwise they could have surprising
1632 * side-effects on the order in which restorable items actually get
1633 * restored.
1634 */
1635 _moveBefore(AH->toc, te);
1636 }
1637
1638 pg_free(linebuf.data);
1639
1640 if (fclose(fh) != 0)
1641 pg_fatal("could not close TOC file: %m");
1642}
1643
1644/**********************
1645 * Convenience functions that look like standard IO functions
1646 * for writing data when in dump mode.
1647 **********************/
1648
1649/* Public */
1650void
1651archputs(const char *s, Archive *AH)
1652{
1653 WriteData(AH, s, strlen(s));
1654}
1655
1656/* Public */
1657int
1658archprintf(Archive *AH, const char *fmt,...)
1659{
1660 int save_errno = errno;
1661 char *p;
1662 size_t len = 128; /* initial assumption about buffer size */
1663 size_t cnt;
1664
1665 for (;;)
1666 {
1667 va_list args;
1668
1669 /* Allocate work buffer. */
1670 p = (char *) pg_malloc(len);
1671
1672 /* Try to format the data. */
1673 errno = save_errno;
1674 va_start(args, fmt);
1675 cnt = pvsnprintf(p, len, fmt, args);
1676 va_end(args);
1677
1678 if (cnt < len)
1679 break; /* success */
1680
1681 /* Release buffer and loop around to try again with larger len. */
1682 free(p);
1683 len = cnt;
1684 }
1685
1686 WriteData(AH, p, cnt);
1687 free(p);
1688 return (int) cnt;
1689}
1690
1691
1692/*******************************
1693 * Stuff below here should be 'private' to the archiver routines
1694 *******************************/
1695
1696static void
1698 const pg_compress_specification compression_spec)
1699{
1700 CompressFileHandle *CFH;
1701 const char *mode;
1702 int fn = -1;
1703
1704 if (filename)
1705 {
1706 if (strcmp(filename, "-") == 0)
1707 fn = fileno(stdout);
1708 }
1709 else if (AH->FH)
1710 fn = fileno(AH->FH);
1711 else if (AH->fSpec)
1712 {
1713 filename = AH->fSpec;
1714 }
1715 else
1716 fn = fileno(stdout);
1717
1718 if (AH->mode == archModeAppend)
1719 mode = PG_BINARY_A;
1720 else
1721 mode = PG_BINARY_W;
1722
1723 CFH = InitCompressFileHandle(compression_spec);
1724
1725 if (!CFH->open_func(filename, fn, mode, CFH))
1726 {
1727 if (filename)
1728 pg_fatal("could not open output file \"%s\": %m", filename);
1729 else
1730 pg_fatal("could not open output file: %m");
1731 }
1732
1733 AH->OF = CFH;
1734}
1735
1736static CompressFileHandle *
1738{
1739 return (CompressFileHandle *) AH->OF;
1740}
1741
1742static void
1744{
1745 errno = 0;
1746 if (!EndCompressFileHandle(AH->OF))
1747 pg_fatal("could not close output file: %m");
1748
1749 AH->OF = savedOutput;
1750}
1751
1752
1753
1754/*
1755 * Print formatted text to the output file (usually stdout).
1756 */
1757int
1758ahprintf(ArchiveHandle *AH, const char *fmt,...)
1759{
1760 int save_errno = errno;
1761 char *p;
1762 size_t len = 128; /* initial assumption about buffer size */
1763 size_t cnt;
1764
1765 for (;;)
1766 {
1767 va_list args;
1768
1769 /* Allocate work buffer. */
1770 p = (char *) pg_malloc(len);
1771
1772 /* Try to format the data. */
1773 errno = save_errno;
1774 va_start(args, fmt);
1775 cnt = pvsnprintf(p, len, fmt, args);
1776 va_end(args);
1777
1778 if (cnt < len)
1779 break; /* success */
1780
1781 /* Release buffer and loop around to try again with larger len. */
1782 free(p);
1783 len = cnt;
1784 }
1785
1786 ahwrite(p, 1, cnt, AH);
1787 free(p);
1788 return (int) cnt;
1789}
1790
1791/*
1792 * Single place for logic which says 'We are restoring to a direct DB connection'.
1793 */
1794static int
1796{
1797 RestoreOptions *ropt = AH->public.ropt;
1798
1799 return (ropt && ropt->useDB && AH->connection);
1800}
1801
1802/*
1803 * Dump the current contents of the LO data buffer while writing a LO
1804 */
1805static void
1807{
1808 if (AH->connection)
1809 {
1810 int res;
1811
1812 res = lo_write(AH->connection, AH->loFd, AH->lo_buf, AH->lo_buf_used);
1813 pg_log_debug(ngettext("wrote %zu byte of large object data (result = %d)",
1814 "wrote %zu bytes of large object data (result = %d)",
1815 AH->lo_buf_used),
1816 AH->lo_buf_used, res);
1817 /* We assume there are no short writes, only errors */
1818 if (res != AH->lo_buf_used)
1819 warn_or_exit_horribly(AH, "could not write to large object: %s",
1821 }
1822 else
1823 {
1825
1827 (const unsigned char *) AH->lo_buf,
1828 AH->lo_buf_used,
1829 AH);
1830
1831 /* Hack: turn off writingLO so ahwrite doesn't recurse to here */
1832 AH->writingLO = false;
1833 ahprintf(AH, "SELECT pg_catalog.lowrite(0, %s);\n", buf->data);
1834 AH->writingLO = true;
1835
1837 }
1838 AH->lo_buf_used = 0;
1839}
1840
1841
1842/*
1843 * Write buffer to the output file (usually stdout). This is used for
1844 * outputting 'restore' scripts etc. It is even possible for an archive
1845 * format to create a custom output routine to 'fake' a restore if it
1846 * wants to generate a script (see TAR output).
1847 */
1848void
1849ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
1850{
1851 int bytes_written = 0;
1852
1853 if (AH->writingLO)
1854 {
1855 size_t remaining = size * nmemb;
1856
1857 while (AH->lo_buf_used + remaining > AH->lo_buf_size)
1858 {
1859 size_t avail = AH->lo_buf_size - AH->lo_buf_used;
1860
1861 memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, avail);
1862 ptr = (const char *) ptr + avail;
1863 remaining -= avail;
1864 AH->lo_buf_used += avail;
1865 dump_lo_buf(AH);
1866 }
1867
1868 memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, remaining);
1869 AH->lo_buf_used += remaining;
1870
1871 bytes_written = size * nmemb;
1872 }
1873 else if (AH->CustomOutPtr)
1874 bytes_written = AH->CustomOutPtr(AH, ptr, size * nmemb);
1875
1876 /*
1877 * If we're doing a restore, and it's direct to DB, and we're connected
1878 * then send it to the DB.
1879 */
1880 else if (RestoringToDB(AH))
1881 bytes_written = ExecuteSqlCommandBuf(&AH->public, (const char *) ptr, size * nmemb);
1882 else
1883 {
1885
1886 CFH->write_func(ptr, size * nmemb, CFH);
1887 bytes_written = size * nmemb;
1888 }
1889
1890 if (bytes_written != size * nmemb)
1892}
1893
1894/* on some error, we may decide to go on... */
1895void
1896warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...)
1897{
1898 va_list ap;
1899
1900 switch (AH->stage)
1901 {
1902
1903 case STAGE_NONE:
1904 /* Do nothing special */
1905 break;
1906
1907 case STAGE_INITIALIZING:
1908 if (AH->stage != AH->lastErrorStage)
1909 pg_log_info("while INITIALIZING:");
1910 break;
1911
1912 case STAGE_PROCESSING:
1913 if (AH->stage != AH->lastErrorStage)
1914 pg_log_info("while PROCESSING TOC:");
1915 break;
1916
1917 case STAGE_FINALIZING:
1918 if (AH->stage != AH->lastErrorStage)
1919 pg_log_info("while FINALIZING:");
1920 break;
1921 }
1922 if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE)
1923 {
1924 pg_log_info("from TOC entry %d; %u %u %s %s %s",
1925 AH->currentTE->dumpId,
1927 AH->currentTE->catalogId.oid,
1928 AH->currentTE->desc ? AH->currentTE->desc : "(no desc)",
1929 AH->currentTE->tag ? AH->currentTE->tag : "(no tag)",
1930 AH->currentTE->owner ? AH->currentTE->owner : "(no owner)");
1931 }
1932 AH->lastErrorStage = AH->stage;
1933 AH->lastErrorTE = AH->currentTE;
1934
1935 va_start(ap, fmt);
1937 va_end(ap);
1938
1939 if (AH->public.exit_on_error)
1940 exit_nicely(1);
1941 else
1942 AH->public.n_errors++;
1943}
1944
1945#ifdef NOT_USED
1946
1947static void
1948_moveAfter(ArchiveHandle *AH, TocEntry *pos, TocEntry *te)
1949{
1950 /* Unlink te from list */
1951 te->prev->next = te->next;
1952 te->next->prev = te->prev;
1953
1954 /* and insert it after "pos" */
1955 te->prev = pos;
1956 te->next = pos->next;
1957 pos->next->prev = te;
1958 pos->next = te;
1959}
1960#endif
1961
1962static void
1964{
1965 /* Unlink te from list */
1966 te->prev->next = te->next;
1967 te->next->prev = te->prev;
1968
1969 /* and insert it before "pos" */
1970 te->prev = pos->prev;
1971 te->next = pos;
1972 pos->prev->next = te;
1973 pos->prev = te;
1974}
1975
1976/*
1977 * Build index arrays for the TOC list
1978 *
1979 * This should be invoked only after we have created or read in all the TOC
1980 * items.
1981 *
1982 * The arrays are indexed by dump ID (so entry zero is unused). Note that the
1983 * array entries run only up to maxDumpId. We might see dependency dump IDs
1984 * beyond that (if the dump was partial); so always check the array bound
1985 * before trying to touch an array entry.
1986 */
1987static void
1989{
1990 DumpId maxDumpId = AH->maxDumpId;
1991 TocEntry *te;
1992
1993 AH->tocsByDumpId = (TocEntry **) pg_malloc0((maxDumpId + 1) * sizeof(TocEntry *));
1994 AH->tableDataId = (DumpId *) pg_malloc0((maxDumpId + 1) * sizeof(DumpId));
1995
1996 for (te = AH->toc->next; te != AH->toc; te = te->next)
1997 {
1998 /* this check is purely paranoia, maxDumpId should be correct */
1999 if (te->dumpId <= 0 || te->dumpId > maxDumpId)
2000 pg_fatal("bad dumpId");
2001
2002 /* tocsByDumpId indexes all TOCs by their dump ID */
2003 AH->tocsByDumpId[te->dumpId] = te;
2004
2005 /*
2006 * tableDataId provides the TABLE DATA item's dump ID for each TABLE
2007 * TOC entry that has a DATA item. We compute this by reversing the
2008 * TABLE DATA item's dependency, knowing that a TABLE DATA item has
2009 * just one dependency and it is the TABLE item.
2010 */
2011 if (strcmp(te->desc, "TABLE DATA") == 0 && te->nDeps > 0)
2012 {
2013 DumpId tableId = te->dependencies[0];
2014
2015 /*
2016 * The TABLE item might not have been in the archive, if this was
2017 * a data-only dump; but its dump ID should be less than its data
2018 * item's dump ID, so there should be a place for it in the array.
2019 */
2020 if (tableId <= 0 || tableId > maxDumpId)
2021 pg_fatal("bad table dumpId for TABLE DATA item");
2022
2023 AH->tableDataId[tableId] = te->dumpId;
2024 }
2025 }
2026}
2027
2028TocEntry *
2030{
2031 /* build index arrays if we didn't already */
2032 if (AH->tocsByDumpId == NULL)
2034
2035 if (id > 0 && id <= AH->maxDumpId)
2036 return AH->tocsByDumpId[id];
2037
2038 return NULL;
2039}
2040
2041int
2043{
2044 TocEntry *te = getTocEntryByDumpId(AH, id);
2045
2046 if (!te)
2047 return 0;
2048
2049 return te->reqs;
2050}
2051
2052size_t
2054{
2055 int off;
2056
2057 /* Save the flag */
2058 AH->WriteBytePtr(AH, wasSet);
2059
2060 /* Write out pgoff_t smallest byte first, prevents endian mismatch */
2061 for (off = 0; off < sizeof(pgoff_t); off++)
2062 {
2063 AH->WriteBytePtr(AH, o & 0xFF);
2064 o >>= 8;
2065 }
2066 return sizeof(pgoff_t) + 1;
2067}
2068
2069int
2071{
2072 int i;
2073 int off;
2074 int offsetFlg;
2075
2076 /* Initialize to zero */
2077 *o = 0;
2078
2079 /* Check for old version */
2080 if (AH->version < K_VERS_1_7)
2081 {
2082 /* Prior versions wrote offsets using WriteInt */
2083 i = ReadInt(AH);
2084 /* -1 means not set */
2085 if (i < 0)
2086 return K_OFFSET_POS_NOT_SET;
2087 else if (i == 0)
2088 return K_OFFSET_NO_DATA;
2089
2090 /* Cast to pgoff_t because it was written as an int. */
2091 *o = (pgoff_t) i;
2092 return K_OFFSET_POS_SET;
2093 }
2094
2095 /*
2096 * Read the flag indicating the state of the data pointer. Check if valid
2097 * and die if not.
2098 *
2099 * This used to be handled by a negative or zero pointer, now we use an
2100 * extra byte specifically for the state.
2101 */
2102 offsetFlg = AH->ReadBytePtr(AH) & 0xFF;
2103
2104 switch (offsetFlg)
2105 {
2107 case K_OFFSET_NO_DATA:
2108 case K_OFFSET_POS_SET:
2109
2110 break;
2111
2112 default:
2113 pg_fatal("unexpected data offset flag %d", offsetFlg);
2114 }
2115
2116 /*
2117 * Read the bytes
2118 */
2119 for (off = 0; off < AH->offSize; off++)
2120 {
2121 if (off < sizeof(pgoff_t))
2122 *o |= ((pgoff_t) (AH->ReadBytePtr(AH))) << (off * 8);
2123 else
2124 {
2125 if (AH->ReadBytePtr(AH) != 0)
2126 pg_fatal("file offset in dump file is too large");
2127 }
2128 }
2129
2130 return offsetFlg;
2131}
2132
2133size_t
2135{
2136 int b;
2137
2138 /*
2139 * This is a bit yucky, but I don't want to make the binary format very
2140 * dependent on representation, and not knowing much about it, I write out
2141 * a sign byte. If you change this, don't forget to change the file
2142 * version #, and modify ReadInt to read the new format AS WELL AS the old
2143 * formats.
2144 */
2145
2146 /* SIGN byte */
2147 if (i < 0)
2148 {
2149 AH->WriteBytePtr(AH, 1);
2150 i = -i;
2151 }
2152 else
2153 AH->WriteBytePtr(AH, 0);
2154
2155 for (b = 0; b < AH->intSize; b++)
2156 {
2157 AH->WriteBytePtr(AH, i & 0xFF);
2158 i >>= 8;
2159 }
2160
2161 return AH->intSize + 1;
2162}
2163
2164int
2166{
2167 int res = 0;
2168 int bv,
2169 b;
2170 int sign = 0; /* Default positive */
2171 int bitShift = 0;
2172
2173 if (AH->version > K_VERS_1_0)
2174 /* Read a sign byte */
2175 sign = AH->ReadBytePtr(AH);
2176
2177 for (b = 0; b < AH->intSize; b++)
2178 {
2179 bv = AH->ReadBytePtr(AH) & 0xFF;
2180 if (bv != 0)
2181 res = res + (bv << bitShift);
2182 bitShift += 8;
2183 }
2184
2185 if (sign)
2186 res = -res;
2187
2188 return res;
2189}
2190
2191size_t
2192WriteStr(ArchiveHandle *AH, const char *c)
2193{
2194 size_t res;
2195
2196 if (c)
2197 {
2198 int len = strlen(c);
2199
2200 res = WriteInt(AH, len);
2201 AH->WriteBufPtr(AH, c, len);
2202 res += len;
2203 }
2204 else
2205 res = WriteInt(AH, -1);
2206
2207 return res;
2208}
2209
2210char *
2212{
2213 char *buf;
2214 int l;
2215
2216 l = ReadInt(AH);
2217 if (l < 0)
2218 buf = NULL;
2219 else
2220 {
2221 buf = (char *) pg_malloc(l + 1);
2222 AH->ReadBufPtr(AH, buf, l);
2223
2224 buf[l] = '\0';
2225 }
2226
2227 return buf;
2228}
2229
2230static bool
2231_fileExistsInDirectory(const char *dir, const char *filename)
2232{
2233 struct stat st;
2234 char buf[MAXPGPATH];
2235
2236 if (snprintf(buf, MAXPGPATH, "%s/%s", dir, filename) >= MAXPGPATH)
2237 pg_fatal("directory name too long: \"%s\"", dir);
2238
2239 return (stat(buf, &st) == 0 && S_ISREG(st.st_mode));
2240}
2241
2242static int
2244{
2245 FILE *fh;
2246 char sig[6]; /* More than enough */
2247 size_t cnt;
2248 int wantClose = 0;
2249
2250 pg_log_debug("attempting to ascertain archive format");
2251
2252 free(AH->lookahead);
2253
2254 AH->readHeader = 0;
2255 AH->lookaheadSize = 512;
2256 AH->lookahead = pg_malloc0(512);
2257 AH->lookaheadLen = 0;
2258 AH->lookaheadPos = 0;
2259
2260 if (AH->fSpec)
2261 {
2262 struct stat st;
2263
2264 wantClose = 1;
2265
2266 /*
2267 * Check if the specified archive is a directory. If so, check if
2268 * there's a "toc.dat" (or "toc.dat.{gz,lz4,zst}") file in it.
2269 */
2270 if (stat(AH->fSpec, &st) == 0 && S_ISDIR(st.st_mode))
2271 {
2272 AH->format = archDirectory;
2273 if (_fileExistsInDirectory(AH->fSpec, "toc.dat"))
2274 return AH->format;
2275#ifdef HAVE_LIBZ
2276 if (_fileExistsInDirectory(AH->fSpec, "toc.dat.gz"))
2277 return AH->format;
2278#endif
2279#ifdef USE_LZ4
2280 if (_fileExistsInDirectory(AH->fSpec, "toc.dat.lz4"))
2281 return AH->format;
2282#endif
2283#ifdef USE_ZSTD
2284 if (_fileExistsInDirectory(AH->fSpec, "toc.dat.zst"))
2285 return AH->format;
2286#endif
2287 pg_fatal("directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)",
2288 AH->fSpec);
2289 fh = NULL; /* keep compiler quiet */
2290 }
2291 else
2292 {
2293 fh = fopen(AH->fSpec, PG_BINARY_R);
2294 if (!fh)
2295 pg_fatal("could not open input file \"%s\": %m", AH->fSpec);
2296 }
2297 }
2298 else
2299 {
2300 fh = stdin;
2301 if (!fh)
2302 pg_fatal("could not open input file: %m");
2303 }
2304
2305 if ((cnt = fread(sig, 1, 5, fh)) != 5)
2306 {
2307 if (ferror(fh))
2308 pg_fatal("could not read input file: %m");
2309 else
2310 pg_fatal("input file is too short (read %zu, expected 5)", cnt);
2311 }
2312
2313 /* Save it, just in case we need it later */
2314 memcpy(&AH->lookahead[0], sig, 5);
2315 AH->lookaheadLen = 5;
2316
2317 if (strncmp(sig, "PGDMP", 5) == 0)
2318 {
2319 /* It's custom format, stop here */
2320 AH->format = archCustom;
2321 AH->readHeader = 1;
2322 }
2323 else
2324 {
2325 /*
2326 * *Maybe* we have a tar archive format file or a text dump ... So,
2327 * read first 512 byte header...
2328 */
2329 cnt = fread(&AH->lookahead[AH->lookaheadLen], 1, 512 - AH->lookaheadLen, fh);
2330 /* read failure is checked below */
2331 AH->lookaheadLen += cnt;
2332
2333 if (AH->lookaheadLen >= strlen(TEXT_DUMPALL_HEADER) &&
2334 (strncmp(AH->lookahead, TEXT_DUMP_HEADER, strlen(TEXT_DUMP_HEADER)) == 0 ||
2335 strncmp(AH->lookahead, TEXT_DUMPALL_HEADER, strlen(TEXT_DUMPALL_HEADER)) == 0))
2336 {
2337 /*
2338 * looks like it's probably a text format dump. so suggest they
2339 * try psql
2340 */
2341 pg_fatal("input file appears to be a text format dump. Please use psql.");
2342 }
2343
2344 if (AH->lookaheadLen != 512)
2345 {
2346 if (feof(fh))
2347 pg_fatal("input file does not appear to be a valid archive (too short?)");
2348 else
2349 READ_ERROR_EXIT(fh);
2350 }
2351
2352 if (!isValidTarHeader(AH->lookahead))
2353 pg_fatal("input file does not appear to be a valid archive");
2354
2355 AH->format = archTar;
2356 }
2357
2358 /* Close the file if we opened it */
2359 if (wantClose)
2360 {
2361 if (fclose(fh) != 0)
2362 pg_fatal("could not close input file: %m");
2363 /* Forget lookahead, since we'll re-read header after re-opening */
2364 AH->readHeader = 0;
2365 AH->lookaheadLen = 0;
2366 }
2367
2368 return AH->format;
2369}
2370
2371
2372/*
2373 * Allocate an archive handle
2374 */
2375static ArchiveHandle *
2376_allocAH(const char *FileSpec, const ArchiveFormat fmt,
2377 const pg_compress_specification compression_spec,
2378 bool dosync, ArchiveMode mode,
2380{
2381 ArchiveHandle *AH;
2382 CompressFileHandle *CFH;
2383 pg_compress_specification out_compress_spec = {0};
2384
2385 pg_log_debug("allocating AH for %s, format %d",
2386 FileSpec ? FileSpec : "(stdio)", fmt);
2387
2388 AH = (ArchiveHandle *) pg_malloc0(sizeof(ArchiveHandle));
2389
2390 AH->version = K_VERS_SELF;
2391
2392 /* initialize for backwards compatible string processing */
2393 AH->public.encoding = 0; /* PG_SQL_ASCII */
2394 AH->public.std_strings = false;
2395
2396 /* sql error handling */
2397 AH->public.exit_on_error = true;
2398 AH->public.n_errors = 0;
2399
2400 AH->archiveDumpVersion = PG_VERSION;
2401
2402 AH->createDate = time(NULL);
2403
2404 AH->intSize = sizeof(int);
2405 AH->offSize = sizeof(pgoff_t);
2406 if (FileSpec)
2407 {
2408 AH->fSpec = pg_strdup(FileSpec);
2409
2410 /*
2411 * Not used; maybe later....
2412 *
2413 * AH->workDir = pg_strdup(FileSpec); for(i=strlen(FileSpec) ; i > 0 ;
2414 * i--) if (AH->workDir[i-1] == '/')
2415 */
2416 }
2417 else
2418 AH->fSpec = NULL;
2419
2420 AH->currUser = NULL; /* unknown */
2421 AH->currSchema = NULL; /* ditto */
2422 AH->currTablespace = NULL; /* ditto */
2423 AH->currTableAm = NULL; /* ditto */
2424
2425 AH->toc = (TocEntry *) pg_malloc0(sizeof(TocEntry));
2426
2427 AH->toc->next = AH->toc;
2428 AH->toc->prev = AH->toc;
2429
2430 AH->mode = mode;
2431 AH->compression_spec = compression_spec;
2432 AH->dosync = dosync;
2434
2435 memset(&(AH->sqlparse), 0, sizeof(AH->sqlparse));
2436
2437 /* Open stdout with no compression for AH output handle */
2438 out_compress_spec.algorithm = PG_COMPRESSION_NONE;
2439 CFH = InitCompressFileHandle(out_compress_spec);
2440 if (!CFH->open_func(NULL, fileno(stdout), PG_BINARY_A, CFH))
2441 pg_fatal("could not open stdout for appending: %m");
2442 AH->OF = CFH;
2443
2444 /*
2445 * On Windows, we need to use binary mode to read/write non-text files,
2446 * which include all archive formats as well as compressed plain text.
2447 * Force stdin/stdout into binary mode if that is what we are using.
2448 */
2449#ifdef WIN32
2450 if ((fmt != archNull || compression_spec.algorithm != PG_COMPRESSION_NONE) &&
2451 (AH->fSpec == NULL || strcmp(AH->fSpec, "") == 0))
2452 {
2453 if (mode == archModeWrite)
2454 _setmode(fileno(stdout), O_BINARY);
2455 else
2456 _setmode(fileno(stdin), O_BINARY);
2457 }
2458#endif
2459
2460 AH->SetupWorkerPtr = setupWorkerPtr;
2461
2462 if (fmt == archUnknown)
2464 else
2465 AH->format = fmt;
2466
2467 switch (AH->format)
2468 {
2469 case archCustom:
2471 break;
2472
2473 case archNull:
2475 break;
2476
2477 case archDirectory:
2479 break;
2480
2481 case archTar:
2483 break;
2484
2485 default:
2486 pg_fatal("unrecognized file format \"%d\"", AH->format);
2487 }
2488
2489 return AH;
2490}
2491
2492/*
2493 * Write out all data (tables & LOs)
2494 */
2495void
2497{
2498 TocEntry *te;
2499
2500 if (pstate && pstate->numWorkers > 1)
2501 {
2502 /*
2503 * In parallel mode, this code runs in the leader process. We
2504 * construct an array of candidate TEs, then sort it into decreasing
2505 * size order, then dispatch each TE to a data-transfer worker. By
2506 * dumping larger tables first, we avoid getting into a situation
2507 * where we're down to one job and it's big, losing parallelism.
2508 */
2509 TocEntry **tes;
2510 int ntes;
2511
2512 tes = (TocEntry **) pg_malloc(AH->tocCount * sizeof(TocEntry *));
2513 ntes = 0;
2514 for (te = AH->toc->next; te != AH->toc; te = te->next)
2515 {
2516 /* Consider only TEs with dataDumper functions ... */
2517 if (!te->dataDumper)
2518 continue;
2519 /* ... and ignore ones not enabled for dump */
2520 if ((te->reqs & REQ_DATA) == 0)
2521 continue;
2522
2523 tes[ntes++] = te;
2524 }
2525
2526 if (ntes > 1)
2527 qsort(tes, ntes, sizeof(TocEntry *), TocEntrySizeCompareQsort);
2528
2529 for (int i = 0; i < ntes; i++)
2530 DispatchJobForTocEntry(AH, pstate, tes[i], ACT_DUMP,
2531 mark_dump_job_done, NULL);
2532
2533 pg_free(tes);
2534
2535 /* Now wait for workers to finish. */
2536 WaitForWorkers(AH, pstate, WFW_ALL_IDLE);
2537 }
2538 else
2539 {
2540 /* Non-parallel mode: just dump all candidate TEs sequentially. */
2541 for (te = AH->toc->next; te != AH->toc; te = te->next)
2542 {
2543 /* Must have same filter conditions as above */
2544 if (!te->dataDumper)
2545 continue;
2546 if ((te->reqs & REQ_DATA) == 0)
2547 continue;
2548
2550 }
2551 }
2552}
2553
2554
2555/*
2556 * Callback function that's invoked in the leader process after a step has
2557 * been parallel dumped.
2558 *
2559 * We don't need to do anything except check for worker failure.
2560 */
2561static void
2563 TocEntry *te,
2564 int status,
2565 void *callback_data)
2566{
2567 pg_log_info("finished item %d %s %s",
2568 te->dumpId, te->desc, te->tag);
2569
2570 if (status != 0)
2571 pg_fatal("worker process failed: exit code %d",
2572 status);
2573}
2574
2575
2576void
2578{
2579 StartDataPtrType startPtr;
2580 EndDataPtrType endPtr;
2581
2582 AH->currToc = te;
2583
2584 if (strcmp(te->desc, "BLOBS") == 0)
2585 {
2586 startPtr = AH->StartLOsPtr;
2587 endPtr = AH->EndLOsPtr;
2588 }
2589 else
2590 {
2591 startPtr = AH->StartDataPtr;
2592 endPtr = AH->EndDataPtr;
2593 }
2594
2595 if (startPtr != NULL)
2596 (*startPtr) (AH, te);
2597
2598 /*
2599 * The user-provided DataDumper routine needs to call AH->WriteData
2600 */
2601 te->dataDumper((Archive *) AH, te->dataDumperArg);
2602
2603 if (endPtr != NULL)
2604 (*endPtr) (AH, te);
2605
2606 AH->currToc = NULL;
2607}
2608
2609void
2611{
2612 TocEntry *te;
2613 char workbuf[32];
2614 int tocCount;
2615 int i;
2616
2617 /* count entries that will actually be dumped */
2618 tocCount = 0;
2619 for (te = AH->toc->next; te != AH->toc; te = te->next)
2620 {
2621 if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS | REQ_SPECIAL)) != 0)
2622 tocCount++;
2623 }
2624
2625 /* printf("%d TOC Entries to save\n", tocCount); */
2626
2627 WriteInt(AH, tocCount);
2628
2629 for (te = AH->toc->next; te != AH->toc; te = te->next)
2630 {
2631 if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS | REQ_SPECIAL)) == 0)
2632 continue;
2633
2634 WriteInt(AH, te->dumpId);
2635 WriteInt(AH, te->dataDumper ? 1 : 0);
2636
2637 /* OID is recorded as a string for historical reasons */
2638 sprintf(workbuf, "%u", te->catalogId.tableoid);
2639 WriteStr(AH, workbuf);
2640 sprintf(workbuf, "%u", te->catalogId.oid);
2641 WriteStr(AH, workbuf);
2642
2643 WriteStr(AH, te->tag);
2644 WriteStr(AH, te->desc);
2645 WriteInt(AH, te->section);
2646
2647 if (te->defnLen)
2648 {
2649 /*
2650 * defnLen should only be set for custom format's second call to
2651 * WriteToc(), which rewrites the TOC in place to update data
2652 * offsets. Instead of calling the defnDumper a second time
2653 * (which could involve re-executing queries), just skip writing
2654 * the entry. While regenerating the definition should
2655 * theoretically produce the same result as before, it's expensive
2656 * and feels risky.
2657 *
2658 * The custom format only calls WriteToc() a second time if
2659 * fseeko() is usable (see _CloseArchive() in pg_backup_custom.c),
2660 * so we can safely use it without checking. For other formats,
2661 * we fail because one of our assumptions must no longer hold
2662 * true.
2663 *
2664 * XXX This is a layering violation, but the alternative is an
2665 * awkward and complicated callback infrastructure for this
2666 * special case. This might be worth revisiting in the future.
2667 */
2668 if (AH->format != archCustom)
2669 pg_fatal("unexpected TOC entry in WriteToc(): %d %s %s",
2670 te->dumpId, te->desc, te->tag);
2671
2672 if (fseeko(AH->FH, te->defnLen, SEEK_CUR) != 0)
2673 pg_fatal("error during file seek: %m");
2674 }
2675 else if (te->defnDumper)
2676 {
2677 char *defn = te->defnDumper((Archive *) AH, te->defnDumperArg, te);
2678
2679 te->defnLen = WriteStr(AH, defn);
2680 pg_free(defn);
2681 }
2682 else
2683 WriteStr(AH, te->defn);
2684
2685 WriteStr(AH, te->dropStmt);
2686 WriteStr(AH, te->copyStmt);
2687 WriteStr(AH, te->namespace);
2688 WriteStr(AH, te->tablespace);
2689 WriteStr(AH, te->tableam);
2690 WriteInt(AH, te->relkind);
2691 WriteStr(AH, te->owner);
2692 WriteStr(AH, "false");
2693
2694 /* Dump list of dependencies */
2695 for (i = 0; i < te->nDeps; i++)
2696 {
2697 sprintf(workbuf, "%d", te->dependencies[i]);
2698 WriteStr(AH, workbuf);
2699 }
2700 WriteStr(AH, NULL); /* Terminate List */
2701
2702 if (AH->WriteExtraTocPtr)
2703 AH->WriteExtraTocPtr(AH, te);
2704 }
2705}
2706
2707void
2709{
2710 int i;
2711 char *tmp;
2712 DumpId *deps;
2713 int depIdx;
2714 int depSize;
2715 TocEntry *te;
2716 bool is_supported;
2717
2718 AH->tocCount = ReadInt(AH);
2719 AH->maxDumpId = 0;
2720
2721 for (i = 0; i < AH->tocCount; i++)
2722 {
2723 te = (TocEntry *) pg_malloc0(sizeof(TocEntry));
2724 te->dumpId = ReadInt(AH);
2725
2726 if (te->dumpId > AH->maxDumpId)
2727 AH->maxDumpId = te->dumpId;
2728
2729 /* Sanity check */
2730 if (te->dumpId <= 0)
2731 pg_fatal("entry ID %d out of range -- perhaps a corrupt TOC",
2732 te->dumpId);
2733
2734 te->hadDumper = ReadInt(AH);
2735
2736 if (AH->version >= K_VERS_1_8)
2737 {
2738 tmp = ReadStr(AH);
2739 sscanf(tmp, "%u", &te->catalogId.tableoid);
2740 free(tmp);
2741 }
2742 else
2744 tmp = ReadStr(AH);
2745 sscanf(tmp, "%u", &te->catalogId.oid);
2746 free(tmp);
2747
2748 te->tag = ReadStr(AH);
2749 te->desc = ReadStr(AH);
2750
2751 if (AH->version >= K_VERS_1_11)
2752 {
2753 te->section = ReadInt(AH);
2754 }
2755 else
2756 {
2757 /*
2758 * Rules for pre-8.4 archives wherein pg_dump hasn't classified
2759 * the entries into sections. This list need not cover entry
2760 * types added later than 8.4.
2761 */
2762 if (strcmp(te->desc, "COMMENT") == 0 ||
2763 strcmp(te->desc, "ACL") == 0 ||
2764 strcmp(te->desc, "ACL LANGUAGE") == 0)
2765 te->section = SECTION_NONE;
2766 else if (strcmp(te->desc, "TABLE DATA") == 0 ||
2767 strcmp(te->desc, "BLOBS") == 0 ||
2768 strcmp(te->desc, "BLOB COMMENTS") == 0)
2769 te->section = SECTION_DATA;
2770 else if (strcmp(te->desc, "CONSTRAINT") == 0 ||
2771 strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
2772 strcmp(te->desc, "FK CONSTRAINT") == 0 ||
2773 strcmp(te->desc, "INDEX") == 0 ||
2774 strcmp(te->desc, "RULE") == 0 ||
2775 strcmp(te->desc, "TRIGGER") == 0)
2777 else
2779 }
2780
2781 te->defn = ReadStr(AH);
2782 te->dropStmt = ReadStr(AH);
2783
2784 if (AH->version >= K_VERS_1_3)
2785 te->copyStmt = ReadStr(AH);
2786
2787 if (AH->version >= K_VERS_1_6)
2788 te->namespace = ReadStr(AH);
2789
2790 if (AH->version >= K_VERS_1_10)
2791 te->tablespace = ReadStr(AH);
2792
2793 if (AH->version >= K_VERS_1_14)
2794 te->tableam = ReadStr(AH);
2795
2796 if (AH->version >= K_VERS_1_16)
2797 te->relkind = ReadInt(AH);
2798
2799 te->owner = ReadStr(AH);
2800 is_supported = true;
2801 if (AH->version < K_VERS_1_9)
2802 is_supported = false;
2803 else
2804 {
2805 tmp = ReadStr(AH);
2806
2807 if (strcmp(tmp, "true") == 0)
2808 is_supported = false;
2809
2810 free(tmp);
2811 }
2812
2813 if (!is_supported)
2814 pg_log_warning("restoring tables WITH OIDS is not supported anymore");
2815
2816 /* Read TOC entry dependencies */
2817 if (AH->version >= K_VERS_1_5)
2818 {
2819 depSize = 100;
2820 deps = (DumpId *) pg_malloc(sizeof(DumpId) * depSize);
2821 depIdx = 0;
2822 for (;;)
2823 {
2824 tmp = ReadStr(AH);
2825 if (!tmp)
2826 break; /* end of list */
2827 if (depIdx >= depSize)
2828 {
2829 depSize *= 2;
2830 deps = (DumpId *) pg_realloc(deps, sizeof(DumpId) * depSize);
2831 }
2832 sscanf(tmp, "%d", &deps[depIdx]);
2833 free(tmp);
2834 depIdx++;
2835 }
2836
2837 if (depIdx > 0) /* We have a non-null entry */
2838 {
2839 deps = (DumpId *) pg_realloc(deps, sizeof(DumpId) * depIdx);
2840 te->dependencies = deps;
2841 te->nDeps = depIdx;
2842 }
2843 else
2844 {
2845 free(deps);
2846 te->dependencies = NULL;
2847 te->nDeps = 0;
2848 }
2849 }
2850 else
2851 {
2852 te->dependencies = NULL;
2853 te->nDeps = 0;
2854 }
2855 te->dataLength = 0;
2856
2857 if (AH->ReadExtraTocPtr)
2858 AH->ReadExtraTocPtr(AH, te);
2859
2860 pg_log_debug("read TOC entry %d (ID %d) for %s %s",
2861 i, te->dumpId, te->desc, te->tag);
2862
2863 /* link completed entry into TOC circular list */
2864 te->prev = AH->toc->prev;
2865 AH->toc->prev->next = te;
2866 AH->toc->prev = te;
2867 te->next = AH->toc;
2868
2869 /* special processing immediately upon read for some items */
2870 if (strcmp(te->desc, "ENCODING") == 0)
2871 processEncodingEntry(AH, te);
2872 else if (strcmp(te->desc, "STDSTRINGS") == 0)
2873 processStdStringsEntry(AH, te);
2874 else if (strcmp(te->desc, "SEARCHPATH") == 0)
2875 processSearchPathEntry(AH, te);
2876 }
2877}
2878
2879static void
2881{
2882 /* te->defn should have the form SET client_encoding = 'foo'; */
2883 char *defn = pg_strdup(te->defn);
2884 char *ptr1;
2885 char *ptr2 = NULL;
2886 int encoding;
2887
2888 ptr1 = strchr(defn, '\'');
2889 if (ptr1)
2890 ptr2 = strchr(++ptr1, '\'');
2891 if (ptr2)
2892 {
2893 *ptr2 = '\0';
2895 if (encoding < 0)
2896 pg_fatal("unrecognized encoding \"%s\"",
2897 ptr1);
2898 AH->public.encoding = encoding;
2900 }
2901 else
2902 pg_fatal("invalid ENCODING item: %s",
2903 te->defn);
2904
2905 free(defn);
2906}
2907
2908static void
2910{
2911 /* te->defn should have the form SET standard_conforming_strings = 'x'; */
2912 char *ptr1;
2913
2914 ptr1 = strchr(te->defn, '\'');
2915 if (ptr1 && strncmp(ptr1, "'on'", 4) == 0)
2916 AH->public.std_strings = true;
2917 else if (ptr1 && strncmp(ptr1, "'off'", 5) == 0)
2918 AH->public.std_strings = false;
2919 else
2920 pg_fatal("invalid STDSTRINGS item: %s",
2921 te->defn);
2922}
2923
2924static void
2926{
2927 /*
2928 * te->defn should contain a command to set search_path. We just copy it
2929 * verbatim for use later.
2930 */
2931 AH->public.searchpath = pg_strdup(te->defn);
2932}
2933
2934static void
2936{
2937 const char *missing_name;
2938
2939 Assert(ropt->strict_names);
2940
2941 if (ropt->schemaNames.head != NULL)
2942 {
2943 missing_name = simple_string_list_not_touched(&ropt->schemaNames);
2944 if (missing_name != NULL)
2945 pg_fatal("schema \"%s\" not found", missing_name);
2946 }
2947
2948 if (ropt->tableNames.head != NULL)
2949 {
2950 missing_name = simple_string_list_not_touched(&ropt->tableNames);
2951 if (missing_name != NULL)
2952 pg_fatal("table \"%s\" not found", missing_name);
2953 }
2954
2955 if (ropt->indexNames.head != NULL)
2956 {
2957 missing_name = simple_string_list_not_touched(&ropt->indexNames);
2958 if (missing_name != NULL)
2959 pg_fatal("index \"%s\" not found", missing_name);
2960 }
2961
2962 if (ropt->functionNames.head != NULL)
2963 {
2964 missing_name = simple_string_list_not_touched(&ropt->functionNames);
2965 if (missing_name != NULL)
2966 pg_fatal("function \"%s\" not found", missing_name);
2967 }
2968
2969 if (ropt->triggerNames.head != NULL)
2970 {
2971 missing_name = simple_string_list_not_touched(&ropt->triggerNames);
2972 if (missing_name != NULL)
2973 pg_fatal("trigger \"%s\" not found", missing_name);
2974 }
2975}
2976
2977/*
2978 * Determine whether we want to restore this TOC entry.
2979 *
2980 * Returns 0 if entry should be skipped, or some combination of the
2981 * REQ_SCHEMA, REQ_DATA, and REQ_STATS bits if we want to restore schema, data
2982 * and/or statistics portions of this TOC entry, or REQ_SPECIAL if it's a
2983 * special entry.
2984 */
2985static int
2987{
2988 int res = REQ_SCHEMA | REQ_DATA;
2989 RestoreOptions *ropt = AH->public.ropt;
2990
2991 /*
2992 * For binary upgrade mode, dump pg_largeobject_metadata and the
2993 * associated pg_shdepend rows. This is faster to restore than the
2994 * equivalent set of large object commands. We can only do this for
2995 * upgrades from v12 and newer; in older versions, pg_largeobject_metadata
2996 * was created WITH OIDS, so the OID column is hidden and won't be dumped.
2997 */
2998 if (ropt->binary_upgrade && AH->public.remoteVersion >= 120000 &&
2999 strcmp(te->desc, "TABLE DATA") == 0 &&
3000 (te->catalogId.oid == LargeObjectMetadataRelationId ||
3001 te->catalogId.oid == SharedDependRelationId))
3002 return REQ_DATA;
3003
3004 /* These items are treated specially */
3005 if (strcmp(te->desc, "ENCODING") == 0 ||
3006 strcmp(te->desc, "STDSTRINGS") == 0 ||
3007 strcmp(te->desc, "SEARCHPATH") == 0)
3008 return REQ_SPECIAL;
3009
3010 if (strcmp(te->desc, "STATISTICS DATA") == 0)
3011 {
3012 if (!ropt->dumpStatistics)
3013 return 0;
3014
3015 res = REQ_STATS;
3016 }
3017
3018 /*
3019 * DATABASE and DATABASE PROPERTIES also have a special rule: they are
3020 * restored in createDB mode, and not restored otherwise, independently of
3021 * all else.
3022 */
3023 if (strcmp(te->desc, "DATABASE") == 0 ||
3024 strcmp(te->desc, "DATABASE PROPERTIES") == 0)
3025 {
3026 if (ropt->createDB)
3027 return REQ_SCHEMA;
3028 else
3029 return 0;
3030 }
3031
3032 /*
3033 * Process exclusions that affect certain classes of TOC entries.
3034 */
3035
3036 /* If it's an ACL, maybe ignore it */
3037 if (ropt->aclsSkip && _tocEntryIsACL(te))
3038 return 0;
3039
3040 /* If it's a comment, maybe ignore it */
3041 if (ropt->no_comments && strcmp(te->desc, "COMMENT") == 0)
3042 return 0;
3043
3044 /* If it's a policy, maybe ignore it */
3045 if (ropt->no_policies &&
3046 (strcmp(te->desc, "POLICY") == 0 ||
3047 strcmp(te->desc, "ROW SECURITY") == 0))
3048 return 0;
3049
3050 /*
3051 * If it's a comment on a policy, a publication, or a subscription, maybe
3052 * ignore it.
3053 */
3054 if (strcmp(te->desc, "COMMENT") == 0)
3055 {
3056 if (ropt->no_policies &&
3057 strncmp(te->tag, "POLICY", strlen("POLICY")) == 0)
3058 return 0;
3059
3060 if (ropt->no_publications &&
3061 strncmp(te->tag, "PUBLICATION", strlen("PUBLICATION")) == 0)
3062 return 0;
3063
3064 if (ropt->no_subscriptions &&
3065 strncmp(te->tag, "SUBSCRIPTION", strlen("SUBSCRIPTION")) == 0)
3066 return 0;
3067 }
3068
3069 /*
3070 * If it's a publication or a table part of a publication, maybe ignore
3071 * it.
3072 */
3073 if (ropt->no_publications &&
3074 (strcmp(te->desc, "PUBLICATION") == 0 ||
3075 strcmp(te->desc, "PUBLICATION TABLE") == 0 ||
3076 strcmp(te->desc, "PUBLICATION TABLES IN SCHEMA") == 0))
3077 return 0;
3078
3079 /* If it's a security label, maybe ignore it */
3080 if (ropt->no_security_labels && strcmp(te->desc, "SECURITY LABEL") == 0)
3081 return 0;
3082
3083 /*
3084 * If it's a security label on a publication or a subscription, maybe
3085 * ignore it.
3086 */
3087 if (strcmp(te->desc, "SECURITY LABEL") == 0)
3088 {
3089 if (ropt->no_publications &&
3090 strncmp(te->tag, "PUBLICATION", strlen("PUBLICATION")) == 0)
3091 return 0;
3092
3093 if (ropt->no_subscriptions &&
3094 strncmp(te->tag, "SUBSCRIPTION", strlen("SUBSCRIPTION")) == 0)
3095 return 0;
3096 }
3097
3098 /* If it's a subscription, maybe ignore it */
3099 if (ropt->no_subscriptions && strcmp(te->desc, "SUBSCRIPTION") == 0)
3100 return 0;
3101
3102 /* Ignore it if section is not to be dumped/restored */
3103 switch (curSection)
3104 {
3105 case SECTION_PRE_DATA:
3106 if (!(ropt->dumpSections & DUMP_PRE_DATA))
3107 return 0;
3108 break;
3109 case SECTION_DATA:
3110 if (!(ropt->dumpSections & DUMP_DATA))
3111 return 0;
3112 break;
3113 case SECTION_POST_DATA:
3114 if (!(ropt->dumpSections & DUMP_POST_DATA))
3115 return 0;
3116 break;
3117 default:
3118 /* shouldn't get here, really, but ignore it */
3119 return 0;
3120 }
3121
3122 /* Ignore it if rejected by idWanted[] (cf. SortTocFromFile) */
3123 if (ropt->idWanted && !ropt->idWanted[te->dumpId - 1])
3124 return 0;
3125
3126 /*
3127 * Check options for selective dump/restore.
3128 */
3129 if (strcmp(te->desc, "ACL") == 0 ||
3130 strcmp(te->desc, "COMMENT") == 0 ||
3131 strcmp(te->desc, "STATISTICS DATA") == 0 ||
3132 strcmp(te->desc, "SECURITY LABEL") == 0)
3133 {
3134 /* Database properties react to createDB, not selectivity options. */
3135 if (strncmp(te->tag, "DATABASE ", 9) == 0)
3136 {
3137 if (!ropt->createDB)
3138 return 0;
3139 }
3140 else if (ropt->schemaNames.head != NULL ||
3141 ropt->schemaExcludeNames.head != NULL ||
3142 ropt->selTypes)
3143 {
3144 /*
3145 * In a selective dump/restore, we want to restore these dependent
3146 * TOC entry types only if their parent object is being restored.
3147 * Without selectivity options, we let through everything in the
3148 * archive. Note there may be such entries with no parent, eg
3149 * non-default ACLs for built-in objects. Also, we make
3150 * per-column ACLs additionally depend on the table's ACL if any
3151 * to ensure correct restore order, so those dependencies should
3152 * be ignored in this check.
3153 *
3154 * This code depends on the parent having been marked already,
3155 * which should be the case; if it isn't, perhaps due to
3156 * SortTocFromFile rearrangement, skipping the dependent entry
3157 * seems prudent anyway.
3158 *
3159 * Ideally we'd handle, eg, table CHECK constraints this way too.
3160 * But it's hard to tell which of their dependencies is the one to
3161 * consult.
3162 */
3163 bool dumpthis = false;
3164
3165 for (int i = 0; i < te->nDeps; i++)
3166 {
3167 TocEntry *pte = getTocEntryByDumpId(AH, te->dependencies[i]);
3168
3169 if (!pte)
3170 continue; /* probably shouldn't happen */
3171 if (strcmp(pte->desc, "ACL") == 0)
3172 continue; /* ignore dependency on another ACL */
3173 if (pte->reqs == 0)
3174 continue; /* this object isn't marked, so ignore it */
3175 /* Found a parent to be dumped, so we want to dump this too */
3176 dumpthis = true;
3177 break;
3178 }
3179 if (!dumpthis)
3180 return 0;
3181 }
3182 }
3183 else
3184 {
3185 /* Apply selective-restore rules for standalone TOC entries. */
3186 if (ropt->schemaNames.head != NULL)
3187 {
3188 /* If no namespace is specified, it means all. */
3189 if (!te->namespace)
3190 return 0;
3191 if (!simple_string_list_member(&ropt->schemaNames, te->namespace))
3192 return 0;
3193 }
3194
3195 if (ropt->schemaExcludeNames.head != NULL &&
3196 te->namespace &&
3197 simple_string_list_member(&ropt->schemaExcludeNames, te->namespace))
3198 return 0;
3199
3200 if (ropt->selTypes)
3201 {
3202 if (strcmp(te->desc, "TABLE") == 0 ||
3203 strcmp(te->desc, "TABLE DATA") == 0 ||
3204 strcmp(te->desc, "VIEW") == 0 ||
3205 strcmp(te->desc, "FOREIGN TABLE") == 0 ||
3206 strcmp(te->desc, "MATERIALIZED VIEW") == 0 ||
3207 strcmp(te->desc, "MATERIALIZED VIEW DATA") == 0 ||
3208 strcmp(te->desc, "SEQUENCE") == 0 ||
3209 strcmp(te->desc, "SEQUENCE SET") == 0)
3210 {
3211 if (!ropt->selTable)
3212 return 0;
3213 if (ropt->tableNames.head != NULL &&
3215 return 0;
3216 }
3217 else if (strcmp(te->desc, "INDEX") == 0)
3218 {
3219 if (!ropt->selIndex)
3220 return 0;
3221 if (ropt->indexNames.head != NULL &&
3223 return 0;
3224 }
3225 else if (strcmp(te->desc, "FUNCTION") == 0 ||
3226 strcmp(te->desc, "AGGREGATE") == 0 ||
3227 strcmp(te->desc, "PROCEDURE") == 0)
3228 {
3229 if (!ropt->selFunction)
3230 return 0;
3231 if (ropt->functionNames.head != NULL &&
3233 return 0;
3234 }
3235 else if (strcmp(te->desc, "TRIGGER") == 0)
3236 {
3237 if (!ropt->selTrigger)
3238 return 0;
3239 if (ropt->triggerNames.head != NULL &&
3241 return 0;
3242 }
3243 else
3244 return 0;
3245 }
3246 }
3247
3248
3249 /*
3250 * Determine whether the TOC entry contains schema and/or data components,
3251 * and mask off inapplicable REQ bits. If it had a dataDumper, assume
3252 * it's both schema and data. Otherwise it's probably schema-only, but
3253 * there are exceptions.
3254 */
3255 if (!te->hadDumper)
3256 {
3257 /*
3258 * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then it
3259 * is considered a data entry. We don't need to check for BLOBS or
3260 * old-style BLOB COMMENTS entries, because they will have hadDumper =
3261 * true ... but we do need to check new-style BLOB ACLs, comments,
3262 * etc.
3263 */
3264 if (strcmp(te->desc, "SEQUENCE SET") == 0 ||
3265 strcmp(te->desc, "BLOB") == 0 ||
3266 strcmp(te->desc, "BLOB METADATA") == 0 ||
3267 (strcmp(te->desc, "ACL") == 0 &&
3268 strncmp(te->tag, "LARGE OBJECT", 12) == 0) ||
3269 (strcmp(te->desc, "COMMENT") == 0 &&
3270 strncmp(te->tag, "LARGE OBJECT", 12) == 0) ||
3271 (strcmp(te->desc, "SECURITY LABEL") == 0 &&
3272 strncmp(te->tag, "LARGE OBJECT", 12) == 0))
3273 res = res & REQ_DATA;
3274 else
3275 res = res & ~REQ_DATA;
3276 }
3277
3278 /*
3279 * If there's no definition command, there's no schema component. Treat
3280 * "load via partition root" comments as not schema.
3281 */
3282 if (!te->defn || !te->defn[0] ||
3283 strncmp(te->defn, "-- load via partition root ", 27) == 0)
3284 res = res & ~REQ_SCHEMA;
3285
3286 /*
3287 * Special case: <Init> type with <Max OID> tag; this is obsolete and we
3288 * always ignore it.
3289 */
3290 if ((strcmp(te->desc, "<Init>") == 0) && (strcmp(te->tag, "Max OID") == 0))
3291 return 0;
3292
3293 /* Mask it if we don't want data */
3294 if (!ropt->dumpData)
3295 {
3296 /*
3297 * The sequence_data option overrides dumpData for SEQUENCE SET.
3298 *
3299 * In binary-upgrade mode, even with dumpData unset, we do not mask
3300 * out large objects. (Only large object definitions, comments and
3301 * other metadata should be generated in binary-upgrade mode, not the
3302 * actual data, but that need not concern us here.)
3303 */
3304 if (!(ropt->sequence_data && strcmp(te->desc, "SEQUENCE SET") == 0) &&
3305 !(ropt->binary_upgrade &&
3306 (strcmp(te->desc, "BLOB") == 0 ||
3307 strcmp(te->desc, "BLOB METADATA") == 0 ||
3308 (strcmp(te->desc, "ACL") == 0 &&
3309 strncmp(te->tag, "LARGE OBJECT", 12) == 0) ||
3310 (strcmp(te->desc, "COMMENT") == 0 &&
3311 strncmp(te->tag, "LARGE OBJECT", 12) == 0) ||
3312 (strcmp(te->desc, "SECURITY LABEL") == 0 &&
3313 strncmp(te->tag, "LARGE OBJECT", 12) == 0))))
3314 res = res & (REQ_SCHEMA | REQ_STATS);
3315 }
3316
3317 /* Mask it if we don't want schema */
3318 if (!ropt->dumpSchema)
3319 res = res & (REQ_DATA | REQ_STATS);
3320
3321 return res;
3322}
3323
3324/*
3325 * Identify which pass we should restore this TOC entry in.
3326 *
3327 * See notes with the RestorePass typedef in pg_backup_archiver.h.
3328 */
3329static RestorePass
3331{
3332 /* "ACL LANGUAGE" was a crock emitted only in PG 7.4 */
3333 if (strcmp(te->desc, "ACL") == 0 ||
3334 strcmp(te->desc, "ACL LANGUAGE") == 0 ||
3335 strcmp(te->desc, "DEFAULT ACL") == 0)
3336 return RESTORE_PASS_ACL;
3337 if (strcmp(te->desc, "EVENT TRIGGER") == 0 ||
3338 strcmp(te->desc, "MATERIALIZED VIEW DATA") == 0)
3339 return RESTORE_PASS_POST_ACL;
3340
3341 /*
3342 * Comments and security labels need to be emitted in the same pass as
3343 * their parent objects. ACLs haven't got comments and security labels,
3344 * and neither do matview data objects, but event triggers do.
3345 * (Fortunately, event triggers haven't got ACLs, or we'd need yet another
3346 * weird special case.)
3347 */
3348 if ((strcmp(te->desc, "COMMENT") == 0 ||
3349 strcmp(te->desc, "SECURITY LABEL") == 0) &&
3350 strncmp(te->tag, "EVENT TRIGGER ", 14) == 0)
3351 return RESTORE_PASS_POST_ACL;
3352
3353 /*
3354 * If statistics data is dependent on materialized view data, it must be
3355 * deferred to RESTORE_PASS_POST_ACL. Those entries are already marked as
3356 * SECTION_POST_DATA, and some other stats entries (e.g., index stats)
3357 * will also be marked as SECTION_POST_DATA. Additionally, our lookahead
3358 * code in fetchAttributeStats() assumes that we dump all statistics data
3359 * entries in TOC order. To ensure this assumption holds, we move all
3360 * statistics data entries in SECTION_POST_DATA to RESTORE_PASS_POST_ACL.
3361 */
3362 if (strcmp(te->desc, "STATISTICS DATA") == 0 &&
3364 return RESTORE_PASS_POST_ACL;
3365
3366 /* All else can be handled in the main pass. */
3367 return RESTORE_PASS_MAIN;
3368}
3369
3370/*
3371 * Identify TOC entries that are ACLs.
3372 *
3373 * Note: it seems worth duplicating some code here to avoid a hard-wired
3374 * assumption that these are exactly the same entries that we restore during
3375 * the RESTORE_PASS_ACL phase.
3376 */
3377static bool
3379{
3380 /* "ACL LANGUAGE" was a crock emitted only in PG 7.4 */
3381 if (strcmp(te->desc, "ACL") == 0 ||
3382 strcmp(te->desc, "ACL LANGUAGE") == 0 ||
3383 strcmp(te->desc, "DEFAULT ACL") == 0)
3384 return true;
3385 return false;
3386}
3387
3388/*
3389 * Issue SET commands for parameters that we want to have set the same way
3390 * at all times during execution of a restore script.
3391 */
3392static void
3394{
3395 RestoreOptions *ropt = AH->public.ropt;
3396
3397 /*
3398 * Disable timeouts to allow for slow commands, idle parallel workers, etc
3399 */
3400 ahprintf(AH, "SET statement_timeout = 0;\n");
3401 ahprintf(AH, "SET lock_timeout = 0;\n");
3402 ahprintf(AH, "SET idle_in_transaction_session_timeout = 0;\n");
3403 ahprintf(AH, "SET transaction_timeout = 0;\n");
3404
3405 /* Select the correct character set encoding */
3406 ahprintf(AH, "SET client_encoding = '%s';\n",
3408
3409 /* Select the correct string literal syntax */
3410 ahprintf(AH, "SET standard_conforming_strings = %s;\n",
3411 AH->public.std_strings ? "on" : "off");
3412
3413 /* Select the role to be used during restore */
3414 if (ropt && ropt->use_role)
3415 ahprintf(AH, "SET ROLE %s;\n", fmtId(ropt->use_role));
3416
3417 /* Select the dump-time search_path */
3418 if (AH->public.searchpath)
3419 ahprintf(AH, "%s", AH->public.searchpath);
3420
3421 /* Make sure function checking is disabled */
3422 ahprintf(AH, "SET check_function_bodies = false;\n");
3423
3424 /* Ensure that all valid XML data will be accepted */
3425 ahprintf(AH, "SET xmloption = content;\n");
3426
3427 /* Avoid annoying notices etc */
3428 ahprintf(AH, "SET client_min_messages = warning;\n");
3429 if (!AH->public.std_strings)
3430 ahprintf(AH, "SET escape_string_warning = off;\n");
3431
3432 /* Adjust row-security state */
3433 if (ropt && ropt->enable_row_security)
3434 ahprintf(AH, "SET row_security = on;\n");
3435 else
3436 ahprintf(AH, "SET row_security = off;\n");
3437
3438 /*
3439 * In --transaction-size mode, we should always be in a transaction when
3440 * we begin to restore objects.
3441 */
3442 if (ropt && ropt->txn_size > 0)
3443 {
3444 if (AH->connection)
3446 else
3447 ahprintf(AH, "\nBEGIN;\n");
3448 AH->txnCount = 0;
3449 }
3450
3451 ahprintf(AH, "\n");
3452}
3453
3454/*
3455 * Issue a SET SESSION AUTHORIZATION command. Caller is responsible
3456 * for updating state if appropriate. If user is NULL or an empty string,
3457 * the specification DEFAULT will be used.
3458 */
3459static void
3461{
3463
3464 appendPQExpBufferStr(cmd, "SET SESSION AUTHORIZATION ");
3465
3466 /*
3467 * SQL requires a string literal here. Might as well be correct.
3468 */
3469 if (user && *user)
3470 appendStringLiteralAHX(cmd, user, AH);
3471 else
3472 appendPQExpBufferStr(cmd, "DEFAULT");
3473 appendPQExpBufferChar(cmd, ';');
3474
3475 if (RestoringToDB(AH))
3476 {
3477 PGresult *res;
3478
3479 res = PQexec(AH->connection, cmd->data);
3480
3481 if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
3482 /* NOT warn_or_exit_horribly... use -O instead to skip this. */
3483 pg_fatal("could not set session user to \"%s\": %s",
3485
3486 PQclear(res);
3487 }
3488 else
3489 ahprintf(AH, "%s\n\n", cmd->data);
3490
3491 destroyPQExpBuffer(cmd);
3492}
3493
3494
3495/*
3496 * Issue the commands to connect to the specified database.
3497 *
3498 * If we're currently restoring right into a database, this will
3499 * actually establish a connection. Otherwise it puts a \connect into
3500 * the script output.
3501 */
3502static void
3504{
3505 if (RestoringToDB(AH))
3507 else
3508 {
3509 PQExpBufferData connectbuf;
3510 RestoreOptions *ropt = AH->public.ropt;
3511
3512 /*
3513 * We must temporarily exit restricted mode for \connect, etc.
3514 * Anything added between this line and the following \restrict must
3515 * be careful to avoid any possible meta-command injection vectors.
3516 */
3517 ahprintf(AH, "\\unrestrict %s\n", ropt->restrict_key);
3518
3519 initPQExpBuffer(&connectbuf);
3520 appendPsqlMetaConnect(&connectbuf, dbname);
3521 ahprintf(AH, "%s", connectbuf.data);
3522 termPQExpBuffer(&connectbuf);
3523
3524 ahprintf(AH, "\\restrict %s\n\n", ropt->restrict_key);
3525 }
3526
3527 /*
3528 * NOTE: currUser keeps track of what the imaginary session user in our
3529 * script is. It's now effectively reset to the original userID.
3530 */
3531 free(AH->currUser);
3532 AH->currUser = NULL;
3533
3534 /* don't assume we still know the output schema, tablespace, etc either */
3535 free(AH->currSchema);
3536 AH->currSchema = NULL;
3537
3538 free(AH->currTableAm);
3539 AH->currTableAm = NULL;
3540
3541 free(AH->currTablespace);
3542 AH->currTablespace = NULL;
3543
3544 /* re-establish fixed state */
3546}
3547
3548/*
3549 * Become the specified user, and update state to avoid redundant commands
3550 *
3551 * NULL or empty argument is taken to mean restoring the session default
3552 */
3553static void
3555{
3556 if (!user)
3557 user = ""; /* avoid null pointers */
3558
3559 if (AH->currUser && strcmp(AH->currUser, user) == 0)
3560 return; /* no need to do anything */
3561
3563
3564 /*
3565 * NOTE: currUser keeps track of what the imaginary session user in our
3566 * script is
3567 */
3568 free(AH->currUser);
3569 AH->currUser = pg_strdup(user);
3570}
3571
3572/*
3573 * Become the owner of the given TOC entry object. If
3574 * changes in ownership are not allowed, this doesn't do anything.
3575 */
3576static void
3578{
3579 RestoreOptions *ropt = AH->public.ropt;
3580
3581 if (ropt && (ropt->noOwner || !ropt->use_setsessauth))
3582 return;
3583
3584 _becomeUser(AH, te->owner);
3585}
3586
3587
3588/*
3589 * Issue the commands to select the specified schema as the current schema
3590 * in the target database.
3591 */
3592static void
3593_selectOutputSchema(ArchiveHandle *AH, const char *schemaName)
3594{
3595 PQExpBuffer qry;
3596
3597 /*
3598 * If there was a SEARCHPATH TOC entry, we're supposed to just stay with
3599 * that search_path rather than switching to entry-specific paths.
3600 * Otherwise, it's an old archive that will not restore correctly unless
3601 * we set the search_path as it's expecting.
3602 */
3603 if (AH->public.searchpath)
3604 return;
3605
3606 if (!schemaName || *schemaName == '\0' ||
3607 (AH->currSchema && strcmp(AH->currSchema, schemaName) == 0))
3608 return; /* no need to do anything */
3609
3610 qry = createPQExpBuffer();
3611
3612 appendPQExpBuffer(qry, "SET search_path = %s",
3613 fmtId(schemaName));
3614 if (strcmp(schemaName, "pg_catalog") != 0)
3615 appendPQExpBufferStr(qry, ", pg_catalog");
3616
3617 if (RestoringToDB(AH))
3618 {
3619 PGresult *res;
3620
3621 res = PQexec(AH->connection, qry->data);
3622
3623 if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
3625 "could not set \"search_path\" to \"%s\": %s",
3626 schemaName, PQerrorMessage(AH->connection));
3627
3628 PQclear(res);
3629 }
3630 else
3631 ahprintf(AH, "%s;\n\n", qry->data);
3632
3633 free(AH->currSchema);
3634 AH->currSchema = pg_strdup(schemaName);
3635
3636 destroyPQExpBuffer(qry);
3637}
3638
3639/*
3640 * Issue the commands to select the specified tablespace as the current one
3641 * in the target database.
3642 */
3643static void
3645{
3646 RestoreOptions *ropt = AH->public.ropt;
3647 PQExpBuffer qry;
3648 const char *want,
3649 *have;
3650
3651 /* do nothing in --no-tablespaces mode */
3652 if (ropt->noTablespace)
3653 return;
3654
3655 have = AH->currTablespace;
3656 want = tablespace;
3657
3658 /* no need to do anything for non-tablespace object */
3659 if (!want)
3660 return;
3661
3662 if (have && strcmp(want, have) == 0)
3663 return; /* no need to do anything */
3664
3665 qry = createPQExpBuffer();
3666
3667 if (strcmp(want, "") == 0)
3668 {
3669 /* We want the tablespace to be the database's default */
3670 appendPQExpBufferStr(qry, "SET default_tablespace = ''");
3671 }
3672 else
3673 {
3674 /* We want an explicit tablespace */
3675 appendPQExpBuffer(qry, "SET default_tablespace = %s", fmtId(want));
3676 }
3677
3678 if (RestoringToDB(AH))
3679 {
3680 PGresult *res;
3681
3682 res = PQexec(AH->connection, qry->data);
3683
3684 if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
3686 "could not set \"default_tablespace\" to %s: %s",
3687 fmtId(want), PQerrorMessage(AH->connection));
3688
3689 PQclear(res);
3690 }
3691 else
3692 ahprintf(AH, "%s;\n\n", qry->data);
3693
3694 free(AH->currTablespace);
3695 AH->currTablespace = pg_strdup(want);
3696
3697 destroyPQExpBuffer(qry);
3698}
3699
3700/*
3701 * Set the proper default_table_access_method value for the table.
3702 */
3703static void
3705{
3706 RestoreOptions *ropt = AH->public.ropt;
3707 PQExpBuffer cmd;
3708 const char *want,
3709 *have;
3710
3711 /* do nothing in --no-table-access-method mode */
3712 if (ropt->noTableAm)
3713 return;
3714
3715 have = AH->currTableAm;
3716 want = tableam;
3717
3718 if (!want)
3719 return;
3720
3721 if (have && strcmp(want, have) == 0)
3722 return;
3723
3724 cmd = createPQExpBuffer();
3725 appendPQExpBuffer(cmd, "SET default_table_access_method = %s;", fmtId(want));
3726
3727 if (RestoringToDB(AH))
3728 {
3729 PGresult *res;
3730
3731 res = PQexec(AH->connection, cmd->data);
3732
3733 if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
3735 "could not set \"default_table_access_method\": %s",
3737
3738 PQclear(res);
3739 }
3740 else
3741 ahprintf(AH, "%s\n\n", cmd->data);
3742
3743 destroyPQExpBuffer(cmd);
3744
3745 free(AH->currTableAm);
3746 AH->currTableAm = pg_strdup(want);
3747}
3748
3749/*
3750 * Set the proper default table access method for a table without storage.
3751 * Currently, this is required only for partitioned tables with a table AM.
3752 */
3753static void
3755{
3756 RestoreOptions *ropt = AH->public.ropt;
3757 const char *tableam = te->tableam;
3758 PQExpBuffer cmd;
3759
3760 /* do nothing in --no-table-access-method mode */
3761 if (ropt->noTableAm)
3762 return;
3763
3764 if (!tableam)
3765 return;
3766
3767 Assert(te->relkind == RELKIND_PARTITIONED_TABLE);
3768
3769 cmd = createPQExpBuffer();
3770
3771 appendPQExpBufferStr(cmd, "ALTER TABLE ");
3772 appendPQExpBuffer(cmd, "%s ", fmtQualifiedId(te->namespace, te->tag));
3773 appendPQExpBuffer(cmd, "SET ACCESS METHOD %s;",
3774 fmtId(tableam));
3775
3776 if (RestoringToDB(AH))
3777 {
3778 PGresult *res;
3779
3780 res = PQexec(AH->connection, cmd->data);
3781
3782 if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
3784 "could not alter table access method: %s",
3786 PQclear(res);
3787 }
3788 else
3789 ahprintf(AH, "%s\n\n", cmd->data);
3790
3791 destroyPQExpBuffer(cmd);
3792}
3793
3794/*
3795 * Extract an object description for a TOC entry, and append it to buf.
3796 *
3797 * This is used for ALTER ... OWNER TO.
3798 *
3799 * If the object type has no owner, do nothing.
3800 */
3801static void
3803{
3804 const char *type = te->desc;
3805
3806 /* objects that don't require special decoration */
3807 if (strcmp(type, "COLLATION") == 0 ||
3808 strcmp(type, "CONVERSION") == 0 ||
3809 strcmp(type, "DOMAIN") == 0 ||
3810 strcmp(type, "FOREIGN TABLE") == 0 ||
3811 strcmp(type, "MATERIALIZED VIEW") == 0 ||
3812 strcmp(type, "SEQUENCE") == 0 ||
3813 strcmp(type, "STATISTICS") == 0 ||
3814 strcmp(type, "TABLE") == 0 ||
3815 strcmp(type, "TEXT SEARCH DICTIONARY") == 0 ||
3816 strcmp(type, "TEXT SEARCH CONFIGURATION") == 0 ||
3817 strcmp(type, "TYPE") == 0 ||
3818 strcmp(type, "VIEW") == 0 ||
3819 /* non-schema-specified objects */
3820 strcmp(type, "DATABASE") == 0 ||
3821 strcmp(type, "PROCEDURAL LANGUAGE") == 0 ||
3822 strcmp(type, "SCHEMA") == 0 ||
3823 strcmp(type, "EVENT TRIGGER") == 0 ||
3824 strcmp(type, "FOREIGN DATA WRAPPER") == 0 ||
3825 strcmp(type, "SERVER") == 0 ||
3826 strcmp(type, "PUBLICATION") == 0 ||
3827 strcmp(type, "SUBSCRIPTION") == 0)
3828 {
3829 appendPQExpBuffer(buf, "%s ", type);
3830 if (te->namespace && *te->namespace)
3831 appendPQExpBuffer(buf, "%s.", fmtId(te->namespace));
3833 }
3834 /* LOs just have a name, but it's numeric so must not use fmtId */
3835 else if (strcmp(type, "BLOB") == 0)
3836 {
3837 appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag);
3838 }
3839
3840 /*
3841 * These object types require additional decoration. Fortunately, the
3842 * information needed is exactly what's in the DROP command.
3843 */
3844 else if (strcmp(type, "AGGREGATE") == 0 ||
3845 strcmp(type, "FUNCTION") == 0 ||
3846 strcmp(type, "OPERATOR") == 0 ||
3847 strcmp(type, "OPERATOR CLASS") == 0 ||
3848 strcmp(type, "OPERATOR FAMILY") == 0 ||
3849 strcmp(type, "PROCEDURE") == 0)
3850 {
3851 /* Chop "DROP " off the front and make a modifiable copy */
3852 char *first = pg_strdup(te->dropStmt + 5);
3853 char *last;
3854
3855 /* point to last character in string */
3856 last = first + strlen(first) - 1;
3857
3858 /* Strip off any ';' or '\n' at the end */
3859 while (last >= first && (*last == '\n' || *last == ';'))
3860 last--;
3861 *(last + 1) = '\0';
3862
3863 appendPQExpBufferStr(buf, first);
3864
3865 free(first);
3866 return;
3867 }
3868 /* these object types don't have separate owners */
3869 else if (strcmp(type, "CAST") == 0 ||
3870 strcmp(type, "CHECK CONSTRAINT") == 0 ||
3871 strcmp(type, "CONSTRAINT") == 0 ||
3872 strcmp(type, "DATABASE PROPERTIES") == 0 ||
3873 strcmp(type, "DEFAULT") == 0 ||
3874 strcmp(type, "FK CONSTRAINT") == 0 ||
3875 strcmp(type, "INDEX") == 0 ||
3876 strcmp(type, "RULE") == 0 ||
3877 strcmp(type, "TRIGGER") == 0 ||
3878 strcmp(type, "ROW SECURITY") == 0 ||
3879 strcmp(type, "POLICY") == 0 ||
3880 strcmp(type, "USER MAPPING") == 0)
3881 {
3882 /* do nothing */
3883 }
3884 else
3885 pg_fatal("don't know how to set owner for object type \"%s\"", type);
3886}
3887
3888/*
3889 * Emit the SQL commands to create the object represented by a TOC entry
3890 *
3891 * This now also includes issuing an ALTER OWNER command to restore the
3892 * object's ownership, if wanted. But note that the object's permissions
3893 * will remain at default, until the matching ACL TOC entry is restored.
3894 */
3895static void
3896_printTocEntry(ArchiveHandle *AH, TocEntry *te, const char *pfx)
3897{
3898 RestoreOptions *ropt = AH->public.ropt;
3899
3900 /*
3901 * Select owner, schema, tablespace and default AM as necessary. The
3902 * default access method for partitioned tables is handled after
3903 * generating the object definition, as it requires an ALTER command
3904 * rather than SET.
3905 */
3906 _becomeOwner(AH, te);
3907 _selectOutputSchema(AH, te->namespace);
3909 if (te->relkind != RELKIND_PARTITIONED_TABLE)
3911
3912 /* Emit header comment for item */
3913 if (!AH->noTocComments)
3914 {
3915 char *sanitized_name;
3916 char *sanitized_schema;
3917 char *sanitized_owner;
3918
3919 ahprintf(AH, "--\n");
3920 if (AH->public.verbose)
3921 {
3922 ahprintf(AH, "-- TOC entry %d (class %u OID %u)\n",
3923 te->dumpId, te->catalogId.tableoid, te->catalogId.oid);
3924 if (te->nDeps > 0)
3925 {
3926 int i;
3927
3928 ahprintf(AH, "-- Dependencies:");
3929 for (i = 0; i < te->nDeps; i++)
3930 ahprintf(AH, " %d", te->dependencies[i]);
3931 ahprintf(AH, "\n");
3932 }
3933 }
3934
3935 sanitized_name = sanitize_line(te->tag, false);
3936 sanitized_schema = sanitize_line(te->namespace, true);
3937 sanitized_owner = sanitize_line(ropt->noOwner ? NULL : te->owner, true);
3938
3939 ahprintf(AH, "-- %sName: %s; Type: %s; Schema: %s; Owner: %s",
3940 pfx, sanitized_name, te->desc, sanitized_schema,
3941 sanitized_owner);
3942
3943 free(sanitized_name);
3944 free(sanitized_schema);
3945 free(sanitized_owner);
3946
3947 if (te->tablespace && strlen(te->tablespace) > 0 && !ropt->noTablespace)
3948 {
3949 char *sanitized_tablespace;
3950
3951 sanitized_tablespace = sanitize_line(te->tablespace, false);
3952 ahprintf(AH, "; Tablespace: %s", sanitized_tablespace);
3953 free(sanitized_tablespace);
3954 }
3955 ahprintf(AH, "\n");
3956
3957 if (AH->PrintExtraTocPtr != NULL)
3958 AH->PrintExtraTocPtr(AH, te);
3959 ahprintf(AH, "--\n\n");
3960 }
3961
3962 /*
3963 * Actually print the definition. Normally we can just print the defn
3964 * string if any, but we have four special cases:
3965 *
3966 * 1. A crude hack for suppressing AUTHORIZATION clause that old pg_dump
3967 * versions put into CREATE SCHEMA. Don't mutate the variant for schema
3968 * "public" that is a comment. We have to do this when --no-owner mode is
3969 * selected. This is ugly, but I see no other good way ...
3970 *
3971 * 2. BLOB METADATA entries need special processing since their defn
3972 * strings are just lists of OIDs, not complete SQL commands.
3973 *
3974 * 3. ACL LARGE OBJECTS entries need special processing because they
3975 * contain only one copy of the ACL GRANT/REVOKE commands, which we must
3976 * apply to each large object listed in the associated BLOB METADATA.
3977 *
3978 * 4. Entries with a defnDumper need to call it to generate the
3979 * definition. This is primarily intended to provide a way to save memory
3980 * for objects that would otherwise need a lot of it (e.g., statistics
3981 * data).
3982 */
3983 if (ropt->noOwner &&
3984 strcmp(te->desc, "SCHEMA") == 0 && strncmp(te->defn, "--", 2) != 0)
3985 {
3986 ahprintf(AH, "CREATE SCHEMA %s;\n\n\n", fmtId(te->tag));
3987 }
3988 else if (strcmp(te->desc, "BLOB METADATA") == 0)
3989 {
3990 IssueCommandPerBlob(AH, te, "SELECT pg_catalog.lo_create('", "')");
3991 }
3992 else if (strcmp(te->desc, "ACL") == 0 &&
3993 strncmp(te->tag, "LARGE OBJECTS", 13) == 0)
3994 {
3995 IssueACLPerBlob(AH, te);
3996 }
3997 else if (te->defnLen && AH->format != archTar)
3998 {
3999 /*
4000 * If defnLen is set, the defnDumper has already been called for this
4001 * TOC entry. We don't normally expect a defnDumper to be called for
4002 * a TOC entry a second time in _printTocEntry(), but there's an
4003 * exception. The tar format first calls WriteToc(), which scans the
4004 * entire TOC, and then it later calls RestoreArchive() to generate
4005 * restore.sql, which scans the TOC again. There doesn't appear to be
4006 * a good way to prevent a second defnDumper call in this case without
4007 * storing the definition in memory, which defeats the purpose. This
4008 * second defnDumper invocation should generate the same output as the
4009 * first, but even if it doesn't, the worst-case scenario is that
4010 * restore.sql might have different statistics data than the archive.
4011 *
4012 * In all other cases, encountering a TOC entry a second time in
4013 * _printTocEntry() is unexpected, so we fail because one of our
4014 * assumptions must no longer hold true.
4015 *
4016 * XXX This is a layering violation, but the alternative is an awkward
4017 * and complicated callback infrastructure for this special case. This
4018 * might be worth revisiting in the future.
4019 */
4020 pg_fatal("unexpected TOC entry in _printTocEntry(): %d %s %s",
4021 te->dumpId, te->desc, te->tag);
4022 }
4023 else if (te->defnDumper)
4024 {
4025 char *defn = te->defnDumper((Archive *) AH, te->defnDumperArg, te);
4026
4027 te->defnLen = ahprintf(AH, "%s\n\n", defn);
4028 pg_free(defn);
4029 }
4030 else if (te->defn && strlen(te->defn) > 0)
4031 {
4032 ahprintf(AH, "%s\n\n", te->defn);
4033
4034 /*
4035 * If the defn string contains multiple SQL commands, txn_size mode
4036 * should count it as N actions not one. But rather than build a full
4037 * SQL parser, approximate this by counting semicolons. One case
4038 * where that tends to be badly fooled is function definitions, so
4039 * ignore them. (restore_toc_entry will count one action anyway.)
4040 */
4041 if (ropt->txn_size > 0 &&
4042 strcmp(te->desc, "FUNCTION") != 0 &&
4043 strcmp(te->desc, "PROCEDURE") != 0)
4044 {
4045 const char *p = te->defn;
4046 int nsemis = 0;
4047
4048 while ((p = strchr(p, ';')) != NULL)
4049 {
4050 nsemis++;
4051 p++;
4052 }
4053 if (nsemis > 1)
4054 AH->txnCount += nsemis - 1;
4055 }
4056 }
4057
4058 /*
4059 * If we aren't using SET SESSION AUTH to determine ownership, we must
4060 * instead issue an ALTER OWNER command. Schema "public" is special; when
4061 * a dump emits a comment in lieu of creating it, we use ALTER OWNER even
4062 * when using SET SESSION for all other objects. We assume that anything
4063 * without a DROP command is not a separately ownable object.
4064 */
4065 if (!ropt->noOwner &&
4066 (!ropt->use_setsessauth ||
4067 (strcmp(te->desc, "SCHEMA") == 0 &&
4068 strncmp(te->defn, "--", 2) == 0)) &&
4069 te->owner && strlen(te->owner) > 0 &&
4070 te->dropStmt && strlen(te->dropStmt) > 0)
4071 {
4072 if (strcmp(te->desc, "BLOB METADATA") == 0)
4073 {
4074 /* BLOB METADATA needs special code to handle multiple LOs */
4075 char *cmdEnd = psprintf(" OWNER TO %s", fmtId(te->owner));
4076
4077 IssueCommandPerBlob(AH, te, "ALTER LARGE OBJECT ", cmdEnd);
4078 pg_free(cmdEnd);
4079 }
4080 else
4081 {
4082 /* For all other cases, we can use _getObjectDescription */
4083 PQExpBufferData temp;
4084
4085 initPQExpBuffer(&temp);
4086 _getObjectDescription(&temp, te);
4087
4088 /*
4089 * If _getObjectDescription() didn't fill the buffer, then there
4090 * is no owner.
4091 */
4092 if (temp.data[0])
4093 ahprintf(AH, "ALTER %s OWNER TO %s;\n\n",
4094 temp.data, fmtId(te->owner));
4095 termPQExpBuffer(&temp);
4096 }
4097 }
4098
4099 /*
4100 * Select a partitioned table's default AM, once the table definition has
4101 * been generated.
4102 */
4103 if (te->relkind == RELKIND_PARTITIONED_TABLE)
4105
4106 /*
4107 * If it's an ACL entry, it might contain SET SESSION AUTHORIZATION
4108 * commands, so we can no longer assume we know the current auth setting.
4109 */
4110 if (_tocEntryIsACL(te))
4111 {
4112 free(AH->currUser);
4113 AH->currUser = NULL;
4114 }
4115}
4116
4117/*
4118 * Write the file header for a custom-format archive
4119 */
4120void
4122{
4123 struct tm crtm;
4124
4125 AH->WriteBufPtr(AH, "PGDMP", 5); /* Magic code */
4126 AH->WriteBytePtr(AH, ARCHIVE_MAJOR(AH->version));
4127 AH->WriteBytePtr(AH, ARCHIVE_MINOR(AH->version));
4128 AH->WriteBytePtr(AH, ARCHIVE_REV(AH->version));
4129 AH->WriteBytePtr(AH, AH->intSize);
4130 AH->WriteBytePtr(AH, AH->offSize);
4131 AH->WriteBytePtr(AH, AH->format);
4133 crtm = *localtime(&AH->createDate);
4134 WriteInt(AH, crtm.tm_sec);
4135 WriteInt(AH, crtm.tm_min);
4136 WriteInt(AH, crtm.tm_hour);
4137 WriteInt(AH, crtm.tm_mday);
4138 WriteInt(AH, crtm.tm_mon);
4139 WriteInt(AH, crtm.tm_year);
4140 WriteInt(AH, crtm.tm_isdst);
4141 WriteStr(AH, PQdb(AH->connection));
4143 WriteStr(AH, PG_VERSION);
4144}
4145
4146void
4148{
4149 char *errmsg;
4150 char vmaj,
4151 vmin,
4152 vrev;
4153 int fmt;
4154
4155 /*
4156 * If we haven't already read the header, do so.
4157 *
4158 * NB: this code must agree with _discoverArchiveFormat(). Maybe find a
4159 * way to unify the cases?
4160 */
4161 if (!AH->readHeader)
4162 {
4163 char tmpMag[7];
4164
4165 AH->ReadBufPtr(AH, tmpMag, 5);
4166
4167 if (strncmp(tmpMag, "PGDMP", 5) != 0)
4168 pg_fatal("did not find magic string in file header");
4169 }
4170
4171 vmaj = AH->ReadBytePtr(AH);
4172 vmin = AH->ReadBytePtr(AH);
4173
4174 if (vmaj > 1 || (vmaj == 1 && vmin > 0)) /* Version > 1.0 */
4175 vrev = AH->ReadBytePtr(AH);
4176 else
4177 vrev = 0;
4178
4179 AH->version = MAKE_ARCHIVE_VERSION(vmaj, vmin, vrev);
4180
4181 if (AH->version < K_VERS_1_0 || AH->version > K_VERS_MAX)
4182 pg_fatal("unsupported version (%d.%d) in file header",
4183 vmaj, vmin);
4184
4185 AH->intSize = AH->ReadBytePtr(AH);
4186 if (AH->intSize > 32)
4187 pg_fatal("sanity check on integer size (%zu) failed", AH->intSize);
4188
4189 if (AH->intSize > sizeof(int))
4190 pg_log_warning("archive was made on a machine with larger integers, some operations might fail");
4191
4192 if (AH->version >= K_VERS_1_7)
4193 AH->offSize = AH->ReadBytePtr(AH);
4194 else
4195 AH->offSize = AH->intSize;
4196
4197 fmt = AH->ReadBytePtr(AH);
4198
4199 if (AH->format != fmt)
4200 pg_fatal("expected format (%d) differs from format found in file (%d)",
4201 AH->format, fmt);
4202
4203 if (AH->version >= K_VERS_1_15)
4205 else if (AH->version >= K_VERS_1_2)
4206 {
4207 /* Guess the compression method based on the level */
4208 if (AH->version < K_VERS_1_4)
4209 AH->compression_spec.level = AH->ReadBytePtr(AH);
4210 else
4211 AH->compression_spec.level = ReadInt(AH);
4212
4213 if (AH->compression_spec.level != 0)
4215 }
4216 else
4218
4220 if (errmsg)
4221 {
4222 pg_log_warning("archive is compressed, but this installation does not support compression (%s) -- no data will be available",
4223 errmsg);
4224 pg_free(errmsg);
4225 }
4226
4227 if (AH->version >= K_VERS_1_4)
4228 {
4229 struct tm crtm;
4230
4231 crtm.tm_sec = ReadInt(AH);
4232 crtm.tm_min = ReadInt(AH);
4233 crtm.tm_hour = ReadInt(AH);
4234 crtm.tm_mday = ReadInt(AH);
4235 crtm.tm_mon = ReadInt(AH);
4236 crtm.tm_year = ReadInt(AH);
4237 crtm.tm_isdst = ReadInt(AH);
4238
4239 /*
4240 * Newer versions of glibc have mktime() report failure if tm_isdst is
4241 * inconsistent with the prevailing timezone, e.g. tm_isdst = 1 when
4242 * TZ=UTC. This is problematic when restoring an archive under a
4243 * different timezone setting. If we get a failure, try again with
4244 * tm_isdst set to -1 ("don't know").
4245 *
4246 * XXX with or without this hack, we reconstruct createDate
4247 * incorrectly when the prevailing timezone is different from
4248 * pg_dump's. Next time we bump the archive version, we should flush
4249 * this representation and store a plain seconds-since-the-Epoch
4250 * timestamp instead.
4251 */
4252 AH->createDate = mktime(&crtm);
4253 if (AH->createDate == (time_t) -1)
4254 {
4255 crtm.tm_isdst = -1;
4256 AH->createDate = mktime(&crtm);
4257 if (AH->createDate == (time_t) -1)
4258 pg_log_warning("invalid creation date in header");
4259 }
4260 }
4261
4262 if (AH->version >= K_VERS_1_4)
4263 {
4264 AH->archdbname = ReadStr(AH);
4265 }
4266
4267 if (AH->version >= K_VERS_1_10)
4268 {
4269 AH->archiveRemoteVersion = ReadStr(AH);
4270 AH->archiveDumpVersion = ReadStr(AH);
4271 }
4272}
4273
4274
4275/*
4276 * checkSeek
4277 * check to see if ftell/fseek can be performed.
4278 */
4279bool
4280checkSeek(FILE *fp)
4281{
4282 pgoff_t tpos;
4283
4284 /* Check that ftello works on this file */
4285 tpos = ftello(fp);
4286 if (tpos < 0)
4287 return false;
4288
4289 /*
4290 * Check that fseeko(SEEK_SET) works, too. NB: we used to try to test
4291 * this with fseeko(fp, 0, SEEK_CUR). But some platforms treat that as a
4292 * successful no-op even on files that are otherwise unseekable.
4293 */
4294 if (fseeko(fp, tpos, SEEK_SET) != 0)
4295 return false;
4296
4297 return true;
4298}
4299
4300
4301/*
4302 * dumpTimestamp
4303 */
4304static void
4305dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim)
4306{
4307 char buf[64];
4308
4309 if (strftime(buf, sizeof(buf), PGDUMP_STRFTIME_FMT, localtime(&tim)) != 0)
4310 ahprintf(AH, "-- %s %s\n\n", msg, buf);
4311}
4312
4313/*
4314 * Main engine for parallel restore.
4315 *
4316 * Parallel restore is done in three phases. In this first phase,
4317 * we'll process all SECTION_PRE_DATA TOC entries that are allowed to be
4318 * processed in the RESTORE_PASS_MAIN pass. (In practice, that's all
4319 * PRE_DATA items other than ACLs.) Entries we can't process now are
4320 * added to the pending_list for later phases to deal with.
4321 */
4322static void
4324{
4325 bool skipped_some;
4326 TocEntry *next_work_item;
4327
4328 pg_log_debug("entering restore_toc_entries_prefork");
4329
4330 /* Adjust dependency information */
4331 fix_dependencies(AH);
4332
4333 /*
4334 * Do all the early stuff in a single connection in the parent. There's no
4335 * great point in running it in parallel, in fact it will actually run
4336 * faster in a single connection because we avoid all the connection and
4337 * setup overhead. Also, pre-9.2 pg_dump versions were not very good
4338 * about showing all the dependencies of SECTION_PRE_DATA items, so we do
4339 * not risk trying to process them out-of-order.
4340 *
4341 * Stuff that we can't do immediately gets added to the pending_list.
4342 * Note: we don't yet filter out entries that aren't going to be restored.
4343 * They might participate in dependency chains connecting entries that
4344 * should be restored, so we treat them as live until we actually process
4345 * them.
4346 *
4347 * Note: as of 9.2, it should be guaranteed that all PRE_DATA items appear
4348 * before DATA items, and all DATA items before POST_DATA items. That is
4349 * not certain to be true in older archives, though, and in any case use
4350 * of a list file would destroy that ordering (cf. SortTocFromFile). So
4351 * this loop cannot assume that it holds.
4352 */
4354 skipped_some = false;
4355 for (next_work_item = AH->toc->next; next_work_item != AH->toc; next_work_item = next_work_item->next)
4356 {
4357 bool do_now = true;
4358
4359 if (next_work_item->section != SECTION_PRE_DATA)
4360 {
4361 /* DATA and POST_DATA items are just ignored for now */
4362 if (next_work_item->section == SECTION_DATA ||
4363 next_work_item->section == SECTION_POST_DATA)
4364 {
4365 do_now = false;
4366 skipped_some = true;
4367 }
4368 else
4369 {
4370 /*
4371 * SECTION_NONE items, such as comments, can be processed now
4372 * if we are still in the PRE_DATA part of the archive. Once
4373 * we've skipped any items, we have to consider whether the
4374 * comment's dependencies are satisfied, so skip it for now.
4375 */
4376 if (skipped_some)
4377 do_now = false;
4378 }
4379 }
4380
4381 /*
4382 * Also skip items that need to be forced into later passes. We need
4383 * not set skipped_some in this case, since by assumption no main-pass
4384 * items could depend on these.
4385 */
4386 if (_tocEntryRestorePass(next_work_item) != RESTORE_PASS_MAIN)
4387 do_now = false;
4388
4389 if (do_now)
4390 {
4391 /* OK, restore the item and update its dependencies */
4392 pg_log_info("processing item %d %s %s",
4393 next_work_item->dumpId,
4394 next_work_item->desc, next_work_item->tag);
4395
4396 (void) restore_toc_entry(AH, next_work_item, false);
4397
4398 /* Reduce dependencies, but don't move anything to ready_heap */
4399 reduce_dependencies(AH, next_work_item, NULL);
4400 }
4401 else
4402 {
4403 /* Nope, so add it to pending_list */
4404 pending_list_append(pending_list, next_work_item);
4405 }
4406 }
4407
4408 /*
4409 * In --transaction-size mode, we must commit the open transaction before
4410 * dropping the database connection. This also ensures that child workers
4411 * can see the objects we've created so far.
4412 */
4413 if (AH->public.ropt->txn_size > 0)
4415
4416 /*
4417 * Now close parent connection in prep for parallel steps. We do this
4418 * mainly to ensure that we don't exceed the specified number of parallel
4419 * connections.
4420 */
4422
4423 /* blow away any transient state from the old connection */
4424 free(AH->currUser);
4425 AH->currUser = NULL;
4426 free(AH->currSchema);
4427 AH->currSchema = NULL;
4428 free(AH->currTablespace);
4429 AH->currTablespace = NULL;
4430 free(AH->currTableAm);
4431 AH->currTableAm = NULL;
4432}
4433
4434/*
4435 * Main engine for parallel restore.
4436 *
4437 * Parallel restore is done in three phases. In this second phase,
4438 * we process entries by dispatching them to parallel worker children
4439 * (processes on Unix, threads on Windows), each of which connects
4440 * separately to the database. Inter-entry dependencies are respected,
4441 * and so is the RestorePass multi-pass structure. When we can no longer
4442 * make any entries ready to process, we exit. Normally, there will be
4443 * nothing left to do; but if there is, the third phase will mop up.
4444 */
4445static void
4447 TocEntry *pending_list)
4448{
4449 binaryheap *ready_heap;
4450 TocEntry *next_work_item;
4451
4452 pg_log_debug("entering restore_toc_entries_parallel");
4453
4454 /* Set up ready_heap with enough room for all known TocEntrys */
4455 ready_heap = binaryheap_allocate(AH->tocCount,
4457 NULL);
4458
4459 /*
4460 * The pending_list contains all items that we need to restore. Move all
4461 * items that are available to process immediately into the ready_heap.
4462 * After this setup, the pending list is everything that needs to be done
4463 * but is blocked by one or more dependencies, while the ready heap
4464 * contains items that have no remaining dependencies and are OK to
4465 * process in the current restore pass.
4466 */
4468 move_to_ready_heap(pending_list, ready_heap, AH->restorePass);
4469
4470 /*
4471 * main parent loop
4472 *
4473 * Keep going until there is no worker still running AND there is no work
4474 * left to be done. Note invariant: at top of loop, there should always
4475 * be at least one worker available to dispatch a job to.
4476 */
4477 pg_log_info("entering main parallel loop");
4478
4479 for (;;)
4480 {
4481 /* Look for an item ready to be dispatched to a worker */
4482 next_work_item = pop_next_work_item(ready_heap, pstate);
4483 if (next_work_item != NULL)
4484 {
4485 /* If not to be restored, don't waste time launching a worker */
4486 if ((next_work_item->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) == 0)
4487 {
4488 pg_log_info("skipping item %d %s %s",
4489 next_work_item->dumpId,
4490 next_work_item->desc, next_work_item->tag);
4491 /* Update its dependencies as though we'd completed it */
4492 reduce_dependencies(AH, next_work_item, ready_heap);
4493 /* Loop around to see if anything else can be dispatched */
4494 continue;
4495 }
4496
4497 pg_log_info("launching item %d %s %s",
4498 next_work_item->dumpId,
4499 next_work_item->desc, next_work_item->tag);
4500
4501 /* Dispatch to some worker */
4502 DispatchJobForTocEntry(AH, pstate, next_work_item, ACT_RESTORE,
4503 mark_restore_job_done, ready_heap);
4504 }
4505 else if (IsEveryWorkerIdle(pstate))
4506 {
4507 /*
4508 * Nothing is ready and no worker is running, so we're done with
4509 * the current pass or maybe with the whole process.
4510 */
4511 if (AH->restorePass == RESTORE_PASS_LAST)
4512 break; /* No more parallel processing is possible */
4513
4514 /* Advance to next restore pass */
4515 AH->restorePass++;
4516 /* That probably allows some stuff to be made ready */
4517 move_to_ready_heap(pending_list, ready_heap, AH->restorePass);
4518 /* Loop around to see if anything's now ready */
4519 continue;
4520 }
4521 else
4522 {
4523 /*
4524 * We have nothing ready, but at least one child is working, so
4525 * wait for some subjob to finish.
4526 */
4527 }
4528
4529 /*
4530 * Before dispatching another job, check to see if anything has
4531 * finished. We should check every time through the loop so as to
4532 * reduce dependencies as soon as possible. If we were unable to
4533 * dispatch any job this time through, wait until some worker finishes
4534 * (and, hopefully, unblocks some pending item). If we did dispatch
4535 * something, continue as soon as there's at least one idle worker.
4536 * Note that in either case, there's guaranteed to be at least one
4537 * idle worker when we return to the top of the loop. This ensures we
4538 * won't block inside DispatchJobForTocEntry, which would be
4539 * undesirable: we'd rather postpone dispatching until we see what's
4540 * been unblocked by finished jobs.
4541 */
4542 WaitForWorkers(AH, pstate,
4543 next_work_item ? WFW_ONE_IDLE : WFW_GOT_STATUS);
4544 }
4545
4546 /* There should now be nothing in ready_heap. */
4547 Assert(binaryheap_empty(ready_heap));
4548
4549 binaryheap_free(ready_heap);
4550
4551 pg_log_info("finished main parallel loop");
4552}
4553
4554/*
4555 * Main engine for parallel restore.
4556 *
4557 * Parallel restore is done in three phases. In this third phase,
4558 * we mop up any remaining TOC entries by processing them serially.
4559 * This phase normally should have nothing to do, but if we've somehow
4560 * gotten stuck due to circular dependencies or some such, this provides
4561 * at least some chance of completing the restore successfully.
4562 */
4563static void
4565{
4566 RestoreOptions *ropt = AH->public.ropt;
4567 TocEntry *te;
4568
4569 pg_log_debug("entering restore_toc_entries_postfork");
4570
4571 /*
4572 * Now reconnect the single parent connection.
4573 */
4574 ConnectDatabaseAhx((Archive *) AH, &ropt->cparams, true);
4575
4576 /* re-establish fixed state */
4578
4579 /*
4580 * Make sure there is no work left due to, say, circular dependencies, or
4581 * some other pathological condition. If so, do it in the single parent
4582 * connection. We don't sweat about RestorePass ordering; it's likely we
4583 * already violated that.
4584 */
4585 for (te = pending_list->pending_next; te != pending_list; te = te->pending_next)
4586 {
4587 pg_log_info("processing missed item %d %s %s",
4588 te->dumpId, te->desc, te->tag);
4589 (void) restore_toc_entry(AH, te, false);
4590 }
4591}
4592
4593/*
4594 * Check if te1 has an exclusive lock requirement for an item that te2 also
4595 * requires, whether or not te2's requirement is for an exclusive lock.
4596 */
4597static bool
4599{
4600 int j,
4601 k;
4602
4603 for (j = 0; j < te1->nLockDeps; j++)
4604 {
4605 for (k = 0; k < te2->nDeps; k++)
4606 {
4607 if (te1->lockDeps[j] == te2->dependencies[k])
4608 return true;
4609 }
4610 }
4611 return false;
4612}
4613
4614
4615/*
4616 * Initialize the header of the pending-items list.
4617 *
4618 * This is a circular list with a dummy TocEntry as header, just like the
4619 * main TOC list; but we use separate list links so that an entry can be in
4620 * the main TOC list as well as in the pending list.
4621 */
4622static void
4624{
4625 l->pending_prev = l->pending_next = l;
4626}
4627
4628/* Append te to the end of the pending-list headed by l */
4629static void
4631{
4632 te->pending_prev = l->pending_prev;
4633 l->pending_prev->pending_next = te;
4634 l->pending_prev = te;
4635 te->pending_next = l;
4636}
4637
4638/* Remove te from the pending-list */
4639static void
4641{
4644 te->pending_prev = NULL;
4645 te->pending_next = NULL;
4646}
4647
4648
4649/* qsort comparator for sorting TocEntries by dataLength */
4650static int
4651TocEntrySizeCompareQsort(const void *p1, const void *p2)
4652{
4653 const TocEntry *te1 = *(const TocEntry *const *) p1;
4654 const TocEntry *te2 = *(const TocEntry *const *) p2;
4655
4656 /* Sort by decreasing dataLength */
4657 if (te1->dataLength > te2->dataLength)
4658 return -1;
4659 if (te1->dataLength < te2->dataLength)
4660 return 1;
4661
4662 /* For equal dataLengths, sort by dumpId, just to be stable */
4663 if (te1->dumpId < te2->dumpId)
4664 return -1;
4665 if (te1->dumpId > te2->dumpId)
4666 return 1;
4667
4668 return 0;
4669}
4670
4671/* binaryheap comparator for sorting TocEntries by dataLength */
4672static int
4673TocEntrySizeCompareBinaryheap(void *p1, void *p2, void *arg)
4674{
4675 /* return opposite of qsort comparator for max-heap */
4676 return -TocEntrySizeCompareQsort(&p1, &p2);
4677}
4678
4679
4680/*
4681 * Move all immediately-ready items from pending_list to ready_heap.
4682 *
4683 * Items are considered ready if they have no remaining dependencies and
4684 * they belong in the current restore pass. (See also reduce_dependencies,
4685 * which applies the same logic one-at-a-time.)
4686 */
4687static void
4689 binaryheap *ready_heap,
4690 RestorePass pass)
4691{
4692 TocEntry *te;
4693 TocEntry *next_te;
4694
4695 for (te = pending_list->pending_next; te != pending_list; te = next_te)
4696 {
4697 /* must save list link before possibly removing te from list */
4698 next_te = te->pending_next;
4699
4700 if (te->depCount == 0 &&
4701 _tocEntryRestorePass(te) == pass)
4702 {
4703 /* Remove it from pending_list ... */
4705 /* ... and add to ready_heap */
4706 binaryheap_add(ready_heap, te);
4707 }
4708 }
4709}
4710
4711/*
4712 * Find the next work item (if any) that is capable of being run now,
4713 * and remove it from the ready_heap.
4714 *
4715 * Returns the item, or NULL if nothing is runnable.
4716 *
4717 * To qualify, the item must have no remaining dependencies
4718 * and no requirements for locks that are incompatible with
4719 * items currently running. Items in the ready_heap are known to have
4720 * no remaining dependencies, but we have to check for lock conflicts.
4721 */
4722static TocEntry *
4724 ParallelState *pstate)
4725{
4726 /*
4727 * Search the ready_heap until we find a suitable item. Note that we do a
4728 * sequential scan through the heap nodes, so even though we will first
4729 * try to choose the highest-priority item, we might end up picking
4730 * something with a much lower priority. However, we expect that we will
4731 * typically be able to pick one of the first few items, which should
4732 * usually have a relatively high priority.
4733 */
4734 for (int i = 0; i < binaryheap_size(ready_heap); i++)
4735 {
4736 TocEntry *te = (TocEntry *) binaryheap_get_node(ready_heap, i);
4737 bool conflicts = false;
4738
4739 /*
4740 * Check to see if the item would need exclusive lock on something
4741 * that a currently running item also needs lock on, or vice versa. If
4742 * so, we don't want to schedule them together.
4743 */
4744 for (int k = 0; k < pstate->numWorkers; k++)
4745 {
4746 TocEntry *running_te = pstate->te[k];
4747
4748 if (running_te == NULL)
4749 continue;
4750 if (has_lock_conflicts(te, running_te) ||
4751 has_lock_conflicts(running_te, te))
4752 {
4753 conflicts = true;
4754 break;
4755 }
4756 }
4757
4758 if (conflicts)
4759 continue;
4760
4761 /* passed all tests, so this item can run */
4762 binaryheap_remove_node(ready_heap, i);
4763 return te;
4764 }
4765
4766 pg_log_debug("no item ready");
4767 return NULL;
4768}
4769
4770
4771/*
4772 * Restore a single TOC item in parallel with others
4773 *
4774 * this is run in the worker, i.e. in a thread (Windows) or a separate process
4775 * (everything else). A worker process executes several such work items during
4776 * a parallel backup or restore. Once we terminate here and report back that
4777 * our work is finished, the leader process will assign us a new work item.
4778 */
4779int
4781{
4782 int status;
4783
4784 Assert(AH->connection != NULL);
4785
4786 /* Count only errors associated with this TOC entry */
4787 AH->public.n_errors = 0;
4788
4789 /* Restore the TOC item */
4790 status = restore_toc_entry(AH, te, true);
4791
4792 return status;
4793}
4794
4795
4796/*
4797 * Callback function that's invoked in the leader process after a step has
4798 * been parallel restored.
4799 *
4800 * Update status and reduce the dependency count of any dependent items.
4801 */
4802static void
4804 TocEntry *te,
4805 int status,
4806 void *callback_data)
4807{
4808 binaryheap *ready_heap = (binaryheap *) callback_data;
4809
4810 pg_log_info("finished item %d %s %s",
4811 te->dumpId, te->desc, te->tag);
4812
4813 if (status == WORKER_CREATE_DONE)
4814 mark_create_done(AH, te);
4815 else if (status == WORKER_INHIBIT_DATA)
4816 {
4818 AH->public.n_errors++;
4819 }
4820 else if (status == WORKER_IGNORED_ERRORS)
4821 AH->public.n_errors++;
4822 else if (status != 0)
4823 pg_fatal("worker process failed: exit code %d",
4824 status);
4825
4826 reduce_dependencies(AH, te, ready_heap);
4827}
4828
4829
4830/*
4831 * Process the dependency information into a form useful for parallel restore.
4832 *
4833 * This function takes care of fixing up some missing or badly designed
4834 * dependencies, and then prepares subsidiary data structures that will be
4835 * used in the main parallel-restore logic, including:
4836 * 1. We build the revDeps[] arrays of incoming dependency dumpIds.
4837 * 2. We set up depCount fields that are the number of as-yet-unprocessed
4838 * dependencies for each TOC entry.
4839 *
4840 * We also identify locking dependencies so that we can avoid trying to
4841 * schedule conflicting items at the same time.
4842 */
4843static void
4845{
4846 TocEntry *te;
4847 int i;
4848
4849 /*
4850 * Initialize the depCount/revDeps/nRevDeps fields, and make sure the TOC
4851 * items are marked as not being in any parallel-processing list.
4852 */
4853 for (te = AH->toc->next; te != AH->toc; te = te->next)
4854 {
4855 te->depCount = te->nDeps;
4856 te->revDeps = NULL;
4857 te->nRevDeps = 0;
4858 te->pending_prev = NULL;
4859 te->pending_next = NULL;
4860 }
4861
4862 /*
4863 * POST_DATA items that are shown as depending on a table need to be
4864 * re-pointed to depend on that table's data, instead. This ensures they
4865 * won't get scheduled until the data has been loaded.
4866 */
4868
4869 /*
4870 * Pre-8.4 versions of pg_dump neglected to set up a dependency from BLOB
4871 * COMMENTS to BLOBS. Cope. (We assume there's only one BLOBS and only
4872 * one BLOB COMMENTS in such files.)
4873 */
4874 if (AH->version < K_VERS_1_11)
4875 {
4876 for (te = AH->toc->next; te != AH->toc; te = te->next)
4877 {
4878 if (strcmp(te->desc, "BLOB COMMENTS") == 0 && te->nDeps == 0)
4879 {
4880 TocEntry *te2;
4881
4882 for (te2 = AH->toc->next; te2 != AH->toc; te2 = te2->next)
4883 {
4884 if (strcmp(te2->desc, "BLOBS") == 0)
4885 {
4886 te->dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
4887 te->dependencies[0] = te2->dumpId;
4888 te->nDeps++;
4889 te->depCount++;
4890 break;
4891 }
4892 }
4893 break;
4894 }
4895 }
4896 }
4897
4898 /*
4899 * At this point we start to build the revDeps reverse-dependency arrays,
4900 * so all changes of dependencies must be complete.
4901 */
4902
4903 /*
4904 * Count the incoming dependencies for each item. Also, it is possible
4905 * that the dependencies list items that are not in the archive at all
4906 * (that should not happen in 9.2 and later, but is highly likely in older
4907 * archives). Subtract such items from the depCounts.
4908 */
4909 for (te = AH->toc->next; te != AH->toc; te = te->next)
4910 {
4911 for (i = 0; i < te->nDeps; i++)
4912 {
4913 DumpId depid = te->dependencies[i];
4914
4915 if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL)
4916 AH->tocsByDumpId[depid]->nRevDeps++;
4917 else
4918 te->depCount--;
4919 }
4920 }
4921
4922 /*
4923 * Allocate space for revDeps[] arrays, and reset nRevDeps so we can use
4924 * it as a counter below.
4925 */
4926 for (te = AH->toc->next; te != AH->toc; te = te->next)
4927 {
4928 if (te->nRevDeps > 0)
4929 te->revDeps = (DumpId *) pg_malloc(te->nRevDeps * sizeof(DumpId));
4930 te->nRevDeps = 0;
4931 }
4932
4933 /*
4934 * Build the revDeps[] arrays of incoming-dependency dumpIds. This had
4935 * better agree with the loops above.
4936 */
4937 for (te = AH->toc->next; te != AH->toc; te = te->next)
4938 {
4939 for (i = 0; i < te->nDeps; i++)
4940 {
4941 DumpId depid = te->dependencies[i];
4942
4943 if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL)
4944 {
4945 TocEntry *otherte = AH->tocsByDumpId[depid];
4946
4947 otherte->revDeps[otherte->nRevDeps++] = te->dumpId;
4948 }
4949 }
4950 }
4951
4952 /*
4953 * Lastly, work out the locking dependencies.
4954 */
4955 for (te = AH->toc->next; te != AH->toc; te = te->next)
4956 {
4957 te->lockDeps = NULL;
4958 te->nLockDeps = 0;
4960 }
4961}
4962
4963/*
4964 * Change dependencies on table items to depend on table data items instead,
4965 * but only in POST_DATA items.
4966 *
4967 * Also, for any item having such dependency(s), set its dataLength to the
4968 * largest dataLength of the table data items it depends on. This ensures
4969 * that parallel restore will prioritize larger jobs (index builds, FK
4970 * constraint checks, etc) over smaller ones, avoiding situations where we
4971 * end a restore with only one active job working on a large table.
4972 */
4973static void
4975{
4976 TocEntry *te;
4977 int i;
4978 DumpId olddep;
4979
4980 for (te = AH->toc->next; te != AH->toc; te = te->next)
4981 {
4982 if (te->section != SECTION_POST_DATA)
4983 continue;
4984 for (i = 0; i < te->nDeps; i++)
4985 {
4986 olddep = te->dependencies[i];
4987 if (olddep <= AH->maxDumpId &&
4988 AH->tableDataId[olddep] != 0)
4989 {
4990 DumpId tabledataid = AH->tableDataId[olddep];
4991 TocEntry *tabledatate = AH->tocsByDumpId[tabledataid];
4992
4993 te->dependencies[i] = tabledataid;
4994 te->dataLength = Max(te->dataLength, tabledatate->dataLength);
4995 pg_log_debug("transferring dependency %d -> %d to %d",
4996 te->dumpId, olddep, tabledataid);
4997 }
4998 }
4999 }
5000}
5001
5002/*
5003 * Identify which objects we'll need exclusive lock on in order to restore
5004 * the given TOC entry (*other* than the one identified by the TOC entry
5005 * itself). Record their dump IDs in the entry's lockDeps[] array.
5006 */
5007static void
5009{
5010 DumpId *lockids;
5011 int nlockids;
5012 int i;
5013
5014 /*
5015 * We only care about this for POST_DATA items. PRE_DATA items are not
5016 * run in parallel, and DATA items are all independent by assumption.
5017 */
5018 if (te->section != SECTION_POST_DATA)
5019 return;
5020
5021 /* Quick exit if no dependencies at all */
5022 if (te->nDeps == 0)
5023 return;
5024
5025 /*
5026 * Most POST_DATA items are ALTER TABLEs or some moral equivalent of that,
5027 * and hence require exclusive lock. However, we know that CREATE INDEX
5028 * does not. (Maybe someday index-creating CONSTRAINTs will fall in that
5029 * category too ... but today is not that day.)
5030 */
5031 if (strcmp(te->desc, "INDEX") == 0)
5032 return;
5033
5034 /*
5035 * We assume the entry requires exclusive lock on each TABLE or TABLE DATA
5036 * item listed among its dependencies. Originally all of these would have
5037 * been TABLE items, but repoint_table_dependencies would have repointed
5038 * them to the TABLE DATA items if those are present (which they might not
5039 * be, eg in a schema-only dump). Note that all of the entries we are
5040 * processing here are POST_DATA; otherwise there might be a significant
5041 * difference between a dependency on a table and a dependency on its
5042 * data, so that closer analysis would be needed here.
5043 */
5044 lockids = (DumpId *) pg_malloc(te->nDeps * sizeof(DumpId));
5045 nlockids = 0;
5046 for (i = 0; i < te->nDeps; i++)
5047 {
5048 DumpId depid = te->dependencies[i];
5049
5050 if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL &&
5051 ((strcmp(AH->tocsByDumpId[depid]->desc, "TABLE DATA") == 0) ||
5052 strcmp(AH->tocsByDumpId[depid]->desc, "TABLE") == 0))
5053 lockids[nlockids++] = depid;
5054 }
5055
5056 if (nlockids == 0)
5057 {
5058 free(lockids);
5059 return;
5060 }
5061
5062 te->lockDeps = pg_realloc(lockids, nlockids * sizeof(DumpId));
5063 te->nLockDeps = nlockids;
5064}
5065
5066/*
5067 * Remove the specified TOC entry from the depCounts of items that depend on
5068 * it, thereby possibly making them ready-to-run. Any pending item that
5069 * becomes ready should be moved to the ready_heap, if that's provided.
5070 */
5071static void
5073 binaryheap *ready_heap)
5074{
5075 int i;
5076
5077 pg_log_debug("reducing dependencies for %d", te->dumpId);
5078
5079 for (i = 0; i < te->nRevDeps; i++)
5080 {
5081 TocEntry *otherte = AH->tocsByDumpId[te->revDeps[i]];
5082
5083 Assert(otherte->depCount > 0);
5084 otherte->depCount--;
5085
5086 /*
5087 * It's ready if it has no remaining dependencies, and it belongs in
5088 * the current restore pass, and it is currently a member of the
5089 * pending list (that check is needed to prevent double restore in
5090 * some cases where a list-file forces out-of-order restoring).
5091 * However, if ready_heap == NULL then caller doesn't want any list
5092 * memberships changed.
5093 */
5094 if (otherte->depCount == 0 &&
5095 _tocEntryRestorePass(otherte) == AH->restorePass &&
5096 otherte->pending_prev != NULL &&
5097 ready_heap != NULL)
5098 {
5099 /* Remove it from pending list ... */
5100 pending_list_remove(otherte);
5101 /* ... and add to ready_heap */
5102 binaryheap_add(ready_heap, otherte);
5103 }
5104 }
5105}
5106
5107/*
5108 * Set the created flag on the DATA member corresponding to the given
5109 * TABLE member
5110 */
5111static void
5113{
5114 if (AH->tableDataId[te->dumpId] != 0)
5115 {
5116 TocEntry *ted = AH->tocsByDumpId[AH->tableDataId[te->dumpId]];
5117
5118 ted->created = true;
5119 }
5120}
5121
5122/*
5123 * Mark the DATA member corresponding to the given TABLE member
5124 * as not wanted
5125 */
5126static void
5128{
5129 pg_log_info("table \"%s\" could not be created, will not restore its data",
5130 te->tag);
5131
5132 if (AH->tableDataId[te->dumpId] != 0)
5133 {
5134 TocEntry *ted = AH->tocsByDumpId[AH->tableDataId[te->dumpId]];
5135
5136 ted->reqs = 0;
5137 }
5138}
5139
5140/*
5141 * Clone and de-clone routines used in parallel restoration.
5142 *
5143 * Enough of the structure is cloned to ensure that there is no
5144 * conflict between different threads each with their own clone.
5145 */
5148{
5149 ArchiveHandle *clone;
5150
5151 /* Make a "flat" copy */
5152 clone = (ArchiveHandle *) pg_malloc(sizeof(ArchiveHandle));
5153 memcpy(clone, AH, sizeof(ArchiveHandle));
5154
5155 /* Likewise flat-copy the RestoreOptions, so we can alter them locally */
5156 clone->public.ropt = (RestoreOptions *) pg_malloc(sizeof(RestoreOptions));
5157 memcpy(clone->public.ropt, AH->public.ropt, sizeof(RestoreOptions));
5158
5159 /* Handle format-independent fields */
5160 memset(&(clone->sqlparse), 0, sizeof(clone->sqlparse));
5161
5162 /* The clone will have its own connection, so disregard connection state */
5163 clone->connection = NULL;
5164 clone->connCancel = NULL;
5165 clone->currUser = NULL;
5166 clone->currSchema = NULL;
5167 clone->currTableAm = NULL;
5168 clone->currTablespace = NULL;
5169
5170 /* savedPassword must be local in case we change it while connecting */
5171 if (clone->savedPassword)
5172 clone->savedPassword = pg_strdup(clone->savedPassword);
5173
5174 /* clone has its own error count, too */
5175 clone->public.n_errors = 0;
5176
5177 /* clones should not share lo_buf */
5178 clone->lo_buf = NULL;
5179
5180 /*
5181 * Clone connections disregard --transaction-size; they must commit after
5182 * each command so that the results are immediately visible to other
5183 * workers.
5184 */
5185 clone->public.ropt->txn_size = 0;
5186
5187 /*
5188 * Connect our new clone object to the database, using the same connection
5189 * parameters used for the original connection.
5190 */
5191 ConnectDatabaseAhx((Archive *) clone, &clone->public.ropt->cparams, true);
5192
5193 /* re-establish fixed state */
5194 if (AH->mode == archModeRead)
5196 /* in write case, setupDumpWorker will fix up connection state */
5197
5198 /* Let the format-specific code have a chance too */
5199 clone->ClonePtr(clone);
5200
5201 Assert(clone->connection != NULL);
5202 return clone;
5203}
5204
5205/*
5206 * Release clone-local storage.
5207 *
5208 * Note: we assume any clone-local connection was already closed.
5209 */
5210void
5212{
5213 /* Should not have an open database connection */
5214 Assert(AH->connection == NULL);
5215
5216 /* Clear format-specific state */
5217 AH->DeClonePtr(AH);
5218
5219 /* Clear state allocated by CloneArchive */
5220 if (AH->sqlparse.curCmd)
5222
5223 /* Clear any connection-local state */
5224 free(AH->currUser);
5225 free(AH->currSchema);
5226 free(AH->currTablespace);
5227 free(AH->currTableAm);
5228 free(AH->savedPassword);
5229
5230 free(AH);
5231}
int lo_write(int fd, const char *buf, int len)
Definition: be-fsstubs.c:182
void ParallelBackupEnd(ArchiveHandle *AH, ParallelState *pstate)
Definition: parallel.c:1061
void WaitForWorkers(ArchiveHandle *AH, ParallelState *pstate, WFW_WaitOption mode)
Definition: parallel.c:1453
ParallelState * ParallelBackupStart(ArchiveHandle *AH)
Definition: parallel.c:899
void DispatchJobForTocEntry(ArchiveHandle *AH, ParallelState *pstate, TocEntry *te, T_Action act, ParallelCompletionPtr callback, void *callback_data)
Definition: parallel.c:1207
bool IsEveryWorkerIdle(ParallelState *pstate)
Definition: parallel.c:1270
@ WFW_ALL_IDLE
Definition: parallel.h:35
@ WFW_GOT_STATUS
Definition: parallel.h:33
@ WFW_ONE_IDLE
Definition: parallel.h:34
void binaryheap_remove_node(binaryheap *heap, int n)
Definition: binaryheap.c:225
void binaryheap_add(binaryheap *heap, bh_node_type d)
Definition: binaryheap.c:154
void binaryheap_free(binaryheap *heap)
Definition: binaryheap.c:75
binaryheap * binaryheap_allocate(int capacity, binaryheap_comparator compare, void *arg)
Definition: binaryheap.c:39
#define binaryheap_size(h)
Definition: binaryheap.h:66
#define binaryheap_empty(h)
Definition: binaryheap.h:65
#define binaryheap_get_node(h, n)
Definition: binaryheap.h:67
#define PG_BINARY_R
Definition: c.h:1273
#define ngettext(s, p, n)
Definition: c.h:1179
#define PG_BINARY_A
Definition: c.h:1272
#define Max(x, y)
Definition: c.h:1010
#define PG_BINARY_W
Definition: c.h:1274
bool EndCompressFileHandle(CompressFileHandle *CFH)
Definition: compress_io.c:289
char * supports_compression(const pg_compress_specification compression_spec)
Definition: compress_io.c:87
CompressFileHandle * InitCompressFileHandle(const pg_compress_specification compression_spec)
Definition: compress_io.c:194
const char * get_compress_algorithm_name(pg_compress_algorithm algorithm)
Definition: compression.c:69
@ PG_COMPRESSION_GZIP
Definition: compression.h:24
@ PG_COMPRESSION_NONE
Definition: compression.h:23
char * sanitize_line(const char *str, bool want_hyphen)
Definition: dumputils.c:52
#define PGDUMP_STRFTIME_FMT
Definition: dumputils.h:34
int errmsg(const char *fmt,...)
Definition: elog.c:1080
char * PQdb(const PGconn *conn)
Definition: fe-connect.c:7538
char * PQerrorMessage(const PGconn *conn)
Definition: fe-connect.c:7704
PGresult * PQexec(PGconn *conn, const char *query)
Definition: fe-exec.c:2279
int lo_close(PGconn *conn, int fd)
Definition: fe-lobj.c:96
int lo_open(PGconn *conn, Oid lobjId, int mode)
Definition: fe-lobj.c:57
Oid lo_create(PGconn *conn, Oid lobjId)
Definition: fe-lobj.c:474
void * pg_malloc(size_t size)
Definition: fe_memutils.c:47
char * pg_strdup(const char *in)
Definition: fe_memutils.c:85
void * pg_malloc0(size_t size)
Definition: fe_memutils.c:53
void pg_free(void *ptr)
Definition: fe_memutils.c:105
void * pg_realloc(void *ptr, size_t size)
Definition: fe_memutils.c:65
DataDirSyncMethod
Definition: file_utils.h:28
@ DATA_DIR_SYNC_METHOD_FSYNC
Definition: file_utils.h:29
Assert(PointerIsAligned(start, uint64))
#define free(a)
Definition: header.h:65
int remaining
Definition: informix.c:692
char sign
Definition: informix.c:693
static DataDirSyncMethod sync_method
Definition: initdb.c:170
int b
Definition: isn.c:74
return true
Definition: isn.c:130
int j
Definition: isn.c:78
int i
Definition: isn.c:77
#define PQclear
Definition: libpq-be-fe.h:245
#define PQresultStatus
Definition: libpq-be-fe.h:247
@ PGRES_COMMAND_OK
Definition: libpq-fe.h:125
#define INV_WRITE
Definition: libpq-fs.h:21
static struct pg_tm tm
Definition: localtime.c:104
void pg_log_generic_v(enum pg_log_level level, enum pg_log_part part, const char *restrict fmt, va_list ap)
Definition: logging.c:219
#define pg_log_info(...)
Definition: logging.h:124
@ PG_LOG_PRIMARY
Definition: logging.h:67
@ PG_LOG_ERROR
Definition: logging.h:43
#define pg_log_debug(...)
Definition: logging.h:133
static AmcheckOptions opts
Definition: pg_amcheck.c:112
@ SECTION_NONE
Definition: pg_backup.h:57
@ SECTION_POST_DATA
Definition: pg_backup.h:60
@ SECTION_PRE_DATA
Definition: pg_backup.h:58
@ SECTION_DATA
Definition: pg_backup.h:59
int DumpId
Definition: pg_backup.h:284
void(* SetupWorkerPtrType)(Archive *AH)
Definition: pg_backup.h:291
enum _archiveFormat ArchiveFormat
void ConnectDatabaseAhx(Archive *AHX, const ConnParams *cparams, bool isReconnect)
Definition: pg_backup_db.c:109
@ archModeWrite
Definition: pg_backup.h:51
@ archModeAppend
Definition: pg_backup.h:50
@ archModeRead
Definition: pg_backup.h:52
void DisconnectDatabase(Archive *AHX)
Definition: pg_backup_db.c:164
enum _teSection teSection
@ archUnknown
Definition: pg_backup.h:41
@ archTar
Definition: pg_backup.h:43
@ archCustom
Definition: pg_backup.h:42
@ archDirectory
Definition: pg_backup.h:45
@ archNull
Definition: pg_backup.h:44
static void fix_dependencies(ArchiveHandle *AH)
static void repoint_table_dependencies(ArchiveHandle *AH)
void DeCloneArchive(ArchiveHandle *AH)
static int _discoverArchiveFormat(ArchiveHandle *AH)
#define TEXT_DUMPALL_HEADER
int TocIDRequired(ArchiveHandle *AH, DumpId id)
bool checkSeek(FILE *fp)
void ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
void warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...)
void WriteDataChunksForTocEntry(ArchiveHandle *AH, TocEntry *te)
static void _becomeOwner(ArchiveHandle *AH, TocEntry *te)
void WriteHead(ArchiveHandle *AH)
int EndLO(Archive *AHX, Oid oid)
static CompressFileHandle * SaveOutput(ArchiveHandle *AH)
#define TOC_PREFIX_DATA
static void _becomeUser(ArchiveHandle *AH, const char *user)
static void pending_list_append(TocEntry *l, TocEntry *te)
size_t WriteInt(ArchiveHandle *AH, int i)
void ProcessArchiveRestoreOptions(Archive *AHX)
static int restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
static void _moveBefore(TocEntry *pos, TocEntry *te)
RestoreOptions * NewRestoreOptions(void)
static bool _tocEntryIsACL(TocEntry *te)
static void move_to_ready_heap(TocEntry *pending_list, binaryheap *ready_heap, RestorePass pass)
char * ReadStr(ArchiveHandle *AH)
static void _getObjectDescription(PQExpBuffer buf, const TocEntry *te)
static void buildTocEntryArrays(ArchiveHandle *AH)
static void identify_locking_dependencies(ArchiveHandle *AH, TocEntry *te)
static void processEncodingEntry(ArchiveHandle *AH, TocEntry *te)
static void processSearchPathEntry(ArchiveHandle *AH, TocEntry *te)
int archprintf(Archive *AH, const char *fmt,...)
static void StrictNamesCheck(RestoreOptions *ropt)
static void mark_restore_job_done(ArchiveHandle *AH, TocEntry *te, int status, void *callback_data)
size_t WriteOffset(ArchiveHandle *AH, pgoff_t o, int wasSet)
static RestorePass _tocEntryRestorePass(TocEntry *te)
TocEntry * ArchiveEntry(Archive *AHX, CatalogId catalogId, DumpId dumpId, ArchiveOpts *opts)
int StartLO(Archive *AHX, Oid oid)
static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, const char *pfx)
#define TOC_PREFIX_STATS
ArchiveHandle * CloneArchive(ArchiveHandle *AH)
static void setupRestoreWorker(Archive *AHX)
static void _reconnectToDB(ArchiveHandle *AH, const char *dbname)
static int TocEntrySizeCompareQsort(const void *p1, const void *p2)
Archive * OpenArchive(const char *FileSpec, const ArchiveFormat fmt)
void StartRestoreLOs(ArchiveHandle *AH)
void CloseArchive(Archive *AHX)
static void pending_list_header_init(TocEntry *l)
static void restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate, TocEntry *pending_list)
static void SetOutput(ArchiveHandle *AH, const char *filename, const pg_compress_specification compression_spec)
TocEntry * getTocEntryByDumpId(ArchiveHandle *AH, DumpId id)
static void mark_create_done(ArchiveHandle *AH, TocEntry *te)
#define TEXT_DUMP_HEADER
static void _selectTableAccessMethod(ArchiveHandle *AH, const char *tableam)
void WriteDataChunks(ArchiveHandle *AH, ParallelState *pstate)
static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim)
int ahprintf(ArchiveHandle *AH, const char *fmt,...)
static void _selectOutputSchema(ArchiveHandle *AH, const char *schemaName)
static void mark_dump_job_done(ArchiveHandle *AH, TocEntry *te, int status, void *callback_data)
static bool is_load_via_partition_root(TocEntry *te)
Archive * CreateArchive(const char *FileSpec, const ArchiveFormat fmt, const pg_compress_specification compression_spec, bool dosync, ArchiveMode mode, SetupWorkerPtrType setupDumpWorker, DataDirSyncMethod sync_method)
static void _enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te)
DumpOptions * NewDumpOptions(void)
void SortTocFromFile(Archive *AHX)
int ReadOffset(ArchiveHandle *AH, pgoff_t *o)
static void inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te)
int ReadInt(ArchiveHandle *AH)
static void restore_toc_entries_prefork(ArchiveHandle *AH, TocEntry *pending_list)
static void _printTableAccessMethodNoStorage(ArchiveHandle *AH, TocEntry *te)
static void restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list)
#define TOC_PREFIX_NONE
static void RestoreOutput(ArchiveHandle *AH, CompressFileHandle *savedOutput)
static void _doSetFixedOutputState(ArchiveHandle *AH)
void PrintTOCSummary(Archive *AHX)
static void processStdStringsEntry(ArchiveHandle *AH, TocEntry *te)
static int TocEntrySizeCompareBinaryheap(void *p1, void *p2, void *arg)
static int RestoringToDB(ArchiveHandle *AH)
void ReadHead(ArchiveHandle *AH)
void SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ropt)
static void pending_list_remove(TocEntry *te)
static bool has_lock_conflicts(TocEntry *te1, TocEntry *te2)
void ReadToc(ArchiveHandle *AH)
static void reduce_dependencies(ArchiveHandle *AH, TocEntry *te, binaryheap *ready_heap)
void EndRestoreLO(ArchiveHandle *AH, Oid oid)
static void _selectTablespace(ArchiveHandle *AH, const char *tablespace)
void RestoreArchive(Archive *AHX)
void WriteToc(ArchiveHandle *AH)
void archputs(const char *s, Archive *AH)
static bool _fileExistsInDirectory(const char *dir, const char *filename)
DumpOptions * dumpOptionsFromRestoreOptions(RestoreOptions *ropt)
static int _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
static void _doSetSessionAuth(ArchiveHandle *AH, const char *user)
void EndRestoreLOs(ArchiveHandle *AH)
void StartRestoreLO(ArchiveHandle *AH, Oid oid, bool drop)
void InitDumpOptions(DumpOptions *opts)
static ArchiveHandle * _allocAH(const char *FileSpec, const ArchiveFormat fmt, const pg_compress_specification compression_spec, bool dosync, ArchiveMode mode, SetupWorkerPtrType setupWorkerPtr, DataDirSyncMethod sync_method)
static void dump_lo_buf(ArchiveHandle *AH)
static TocEntry * pop_next_work_item(binaryheap *ready_heap, ParallelState *pstate)
void WriteData(Archive *AHX, const void *data, size_t dLen)
int parallel_restore(ArchiveHandle *AH, TocEntry *te)
size_t WriteStr(ArchiveHandle *AH, const char *c)
static void _disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te)
#define K_VERS_1_15
void InitArchiveFmt_Null(ArchiveHandle *AH)
#define WORKER_CREATE_DONE
#define LOBBUFSIZE
void IssueACLPerBlob(ArchiveHandle *AH, TocEntry *te)
Definition: pg_backup_db.c:538
#define K_VERS_1_14
#define appendByteaLiteralAHX(buf, str, len, AH)
void struct _archiveOpts ArchiveOpts
void(* EndDataPtrType)(ArchiveHandle *AH, TocEntry *te)
#define K_VERS_SELF
#define K_VERS_1_10
void(* StartDataPtrType)(ArchiveHandle *AH, TocEntry *te)
#define ARCHIVE_MAJOR(version)
#define ARCHIVE_MINOR(version)
#define K_VERS_1_2
#define RESTORE_PASS_LAST
void InitArchiveFmt_Custom(ArchiveHandle *AH)
#define K_OFFSET_NO_DATA
void InitArchiveFmt_Tar(ArchiveHandle *AH)
#define REQ_SCHEMA
#define K_VERS_1_4
#define appendStringLiteralAHX(buf, str, AH)
void DropLOIfExists(ArchiveHandle *AH, Oid oid)
Definition: pg_backup_db.c:612
#define MAKE_ARCHIVE_VERSION(major, minor, rev)
#define K_VERS_1_5
void ReconnectToServer(ArchiveHandle *AH, const char *dbname)
Definition: pg_backup_db.c:73
#define ARCHIVE_REV(version)
#define REQ_STATS
#define WRITE_ERROR_EXIT
bool isValidTarHeader(char *header)
#define WORKER_IGNORED_ERRORS
#define REQ_SPECIAL
void IssueCommandPerBlob(ArchiveHandle *AH, TocEntry *te, const char *cmdBegin, const char *cmdEnd)
Definition: pg_backup_db.c:491
#define K_VERS_1_6
@ STAGE_INITIALIZING
@ STAGE_PROCESSING
@ STAGE_NONE
@ STAGE_FINALIZING
@ ACT_RESTORE
@ ACT_DUMP
#define K_VERS_1_0
#define K_OFFSET_POS_NOT_SET
#define K_OFFSET_POS_SET
#define K_VERS_MAX
#define K_VERS_1_8
#define WORKER_OK
@ OUTPUT_COPYDATA
@ OUTPUT_SQLCMDS
@ OUTPUT_OTHERDATA
#define K_VERS_1_12
#define REQ_DATA
#define READ_ERROR_EXIT(fd)
void InitArchiveFmt_Directory(ArchiveHandle *AH)
#define K_VERS_1_11
#define K_VERS_1_9
#define WORKER_INHIBIT_DATA
#define K_VERS_1_16
@ RESTORE_PASS_POST_ACL
@ RESTORE_PASS_ACL
@ RESTORE_PASS_MAIN
#define K_VERS_1_3
#define K_VERS_1_7
void EndDBCopyMode(Archive *AHX, const char *tocEntryTag)
Definition: pg_backup_db.c:439
int ExecuteSqlCommandBuf(Archive *AHX, const char *buf, size_t bufLen)
Definition: pg_backup_db.c:384
void exit_nicely(int code)
void * arg
#define DUMP_PRE_DATA
#define DUMP_DATA
#define DUMP_UNSECTIONED
#define pg_fatal(...)
#define DUMP_POST_DATA
static PgChecksumMode mode
Definition: pg_checksums.c:56
#define MAXPGPATH
const void size_t len
const void * data
static int sig
Definition: pg_ctl.c:81
int32 encoding
Definition: pg_database.h:41
static bool dosync
Definition: pg_dump.c:150
static void setupDumpWorker(Archive *AH)
Definition: pg_dump.c:1596
static char * filename
Definition: pg_dumpall.c:120
bool pg_get_line_buf(FILE *stream, StringInfo buf)
Definition: pg_get_line.c:95
static char * user
Definition: pg_regress.c:119
static char buf[DEFAULT_XLOG_SEG_SIZE]
Definition: pg_test_fsync.c:71
#define pg_encoding_to_char
Definition: pg_wchar.h:630
#define pg_char_to_encoding
Definition: pg_wchar.h:629
static char * tablespace
Definition: pgbench.c:217
#define pg_log_warning(...)
Definition: pgfnames.c:24
#define sprintf
Definition: port.h:262
#define snprintf
Definition: port.h:260
#define qsort(a, b, c, d)
Definition: port.h:499
#define pgoff_t
Definition: port.h:421
#define InvalidOid
Definition: postgres_ext.h:37
unsigned int Oid
Definition: postgres_ext.h:32
PQExpBuffer createPQExpBuffer(void)
Definition: pqexpbuffer.c:72
void initPQExpBuffer(PQExpBuffer str)
Definition: pqexpbuffer.c:90
void appendPQExpBuffer(PQExpBuffer str, const char *fmt,...)
Definition: pqexpbuffer.c:265
void destroyPQExpBuffer(PQExpBuffer str)
Definition: pqexpbuffer.c:114
void appendPQExpBufferChar(PQExpBuffer str, char ch)
Definition: pqexpbuffer.c:378
void appendPQExpBufferStr(PQExpBuffer str, const char *data)
Definition: pqexpbuffer.c:367
void termPQExpBuffer(PQExpBuffer str)
Definition: pqexpbuffer.c:129
char * c
size_t pvsnprintf(char *buf, size_t len, const char *fmt, va_list args)
Definition: psprintf.c:103
char * psprintf(const char *fmt,...)
Definition: psprintf.c:43
const char * simple_string_list_not_touched(SimpleStringList *list)
Definition: simple_list.c:144
bool simple_string_list_member(SimpleStringList *list, const char *val)
Definition: simple_list.c:87
char * dbname
Definition: streamutil.c:49
const char * fmtQualifiedId(const char *schema, const char *id)
Definition: string_utils.c:296
const char * fmtId(const char *rawid)
Definition: string_utils.c:248
void setFmtEncoding(int encoding)
Definition: string_utils.c:69
void appendPsqlMetaConnect(PQExpBuffer buf, const char *dbname)
Definition: string_utils.c:743
void initStringInfo(StringInfo str)
Definition: stringinfo.c:97
int minRemoteVersion
Definition: pg_backup.h:236
int remoteVersion
Definition: pg_backup.h:233
char * remoteVersionStr
Definition: pg_backup.h:232
DumpOptions * dopt
Definition: pg_backup.h:228
bool exit_on_error
Definition: pg_backup.h:251
char * searchpath
Definition: pg_backup.h:247
int maxRemoteVersion
Definition: pg_backup.h:237
int n_errors
Definition: pg_backup.h:252
bool std_strings
Definition: pg_backup.h:244
int numWorkers
Definition: pg_backup.h:239
int encoding
Definition: pg_backup.h:243
int verbose
Definition: pg_backup.h:231
RestoreOptions * ropt
Definition: pg_backup.h:229
Oid tableoid
Definition: pg_backup.h:280
bool(* open_func)(const char *path, int fd, const char *mode, CompressFileHandle *CFH)
Definition: compress_io.h:111
void(* write_func)(const void *ptr, size_t size, CompressFileHandle *CFH)
Definition: compress_io.h:140
TocEntry ** te
Definition: parallel.h:59
int numWorkers
Definition: parallel.h:57
SimpleStringListCell * head
Definition: simple_list.h:42
ArchiverStage stage
RestorePass restorePass
ArchiveFormat format
struct _tocEntry * toc
DeClonePtrType DeClonePtr
EndLOsPtrType EndLOsPtr
DataDirSyncMethod sync_method
struct _tocEntry * lastErrorTE
ReadExtraTocPtrType ReadExtraTocPtr
struct _tocEntry * currentTE
CustomOutPtrType CustomOutPtr
PGcancel *volatile connCancel
StartLOsPtrType StartLOsPtr
ArchiveEntryPtrType ArchiveEntryPtr
pg_compress_specification compression_spec
WriteDataPtrType WriteDataPtr
StartLOPtrType StartLOPtr
struct _tocEntry ** tocsByDumpId
ClonePtrType ClonePtr
WriteBufPtrType WriteBufPtr
PrepParallelRestorePtrType PrepParallelRestorePtr
EndLOPtrType EndLOPtr
WriteExtraTocPtrType WriteExtraTocPtr
ReadBytePtrType ReadBytePtr
PrintTocDataPtrType PrintTocDataPtr
struct _tocEntry * currToc
WriteBytePtrType WriteBytePtr
sqlparseInfo sqlparse
ReadBufPtrType ReadBufPtr
PrintExtraTocPtrType PrintExtraTocPtr
ArchiverStage lastErrorStage
StartDataPtrType StartDataPtr
ReopenPtrType ReopenPtr
ArchiverOutput outputKind
EndDataPtrType EndDataPtr
SetupWorkerPtrType SetupWorkerPtr
ClosePtrType ClosePtr
char * pgport
Definition: pg_backup.h:87
char * pghost
Definition: pg_backup.h:88
trivalue promptPassword
Definition: pg_backup.h:90
char * username
Definition: pg_backup.h:89
char * dbname
Definition: pg_backup.h:86
int dump_inserts
Definition: pg_backup.h:180
char * restrict_key
Definition: pg_backup.h:219
int column_inserts
Definition: pg_backup.h:184
int use_setsessauth
Definition: pg_backup.h:197
int outputCreateDB
Definition: pg_backup.h:205
bool include_everything
Definition: pg_backup.h:202
int sequence_data
Definition: pg_backup.h:211
int disable_dollar_quoting
Definition: pg_backup.h:183
bool dumpSchema
Definition: pg_backup.h:215
int no_comments
Definition: pg_backup.h:186
int outputNoTableAm
Definition: pg_backup.h:195
int enable_row_security
Definition: pg_backup.h:198
char * outputSuperuser
Definition: pg_backup.h:209
int dumpSections
Definition: pg_backup.h:177
int no_security_labels
Definition: pg_backup.h:189
bool dumpData
Definition: pg_backup.h:216
bool dumpStatistics
Definition: pg_backup.h:217
int no_publications
Definition: pg_backup.h:188
ConnParams cparams
Definition: pg_backup.h:172
const char * lockWaitTimeout
Definition: pg_backup.h:179
int no_subscriptions
Definition: pg_backup.h:190
bool aclsSkip
Definition: pg_backup.h:178
int outputClean
Definition: pg_backup.h:204
int no_policies
Definition: pg_backup.h:187
int outputNoTablespaces
Definition: pg_backup.h:196
int disable_triggers
Definition: pg_backup.h:194
int outputNoOwner
Definition: pg_backup.h:208
SimpleStringList schemaExcludeNames
Definition: pg_backup.h:140
int include_everything
Definition: pg_backup.h:125
bool * idWanted
Definition: pg_backup.h:157
int suppressDumpWarnings
Definition: pg_backup.h:151
ConnParams cparams
Definition: pg_backup.h:145
SimpleStringList functionNames
Definition: pg_backup.h:138
char * use_role
Definition: pg_backup.h:107
SimpleStringList tableNames
Definition: pg_backup.h:142
SimpleStringList indexNames
Definition: pg_backup.h:137
pg_compress_specification compression_spec
Definition: pg_backup.h:149
int no_subscriptions
Definition: pg_backup.h:117
SimpleStringList triggerNames
Definition: pg_backup.h:141
bool dumpStatistics
Definition: pg_backup.h:165
int disable_dollar_quoting
Definition: pg_backup.h:109
SimpleStringList schemaNames
Definition: pg_backup.h:139
char * restrict_key
Definition: pg_backup.h:167
const char * filename
Definition: pg_backup.h:120
int no_security_labels
Definition: pg_backup.h:116
char * tocFile
Definition: pg_backup.h:128
char * superuser
Definition: pg_backup.h:106
const char * lockWaitTimeout
Definition: pg_backup.h:124
int enable_row_security
Definition: pg_backup.h:158
int disable_triggers
Definition: pg_backup.h:102
int noDataForFailedTables
Definition: pg_backup.h:147
struct _tocEntry * pending_next
struct _tocEntry * prev
teSection section
struct _tocEntry * pending_prev
DefnDumperPtr defnDumper
DataDumperPtr dataDumper
pgoff_t dataLength
CatalogId catalogId
struct _tocEntry * next
const void * dataDumperArg
DumpId * revDeps
const void * defnDumperArg
DumpId * dependencies
DumpId * lockDeps
pg_compress_algorithm algorithm
Definition: compression.h:34
int tm_sec
Definition: pgtime.h:36
PQExpBuffer curCmd
unsigned short st_mode
Definition: win32_port.h:258
static void * fn(void *arg)
Definition: thread-alloc.c:119
@ TRI_DEFAULT
Definition: vacuumlo.c:36
const char * type
#define stat
Definition: win32_port.h:274
#define S_ISDIR(m)
Definition: win32_port.h:315
#define ftello(stream)
Definition: win32_port.h:209
#define S_ISREG(m)
Definition: win32_port.h:318
#define fseeko(stream, offset, origin)
Definition: win32_port.h:206
static void StartTransaction(void)
Definition: xact.c:2077
static void CommitTransaction(void)
Definition: xact.c:2241
ArchiveMode
Definition: xlog.h:64