PostgreSQL Source Code git master
Loading...
Searching...
No Matches
basebackup_incremental.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * basebackup_incremental.c
4 * code for incremental backup support
5 *
6 * This code isn't actually in charge of taking an incremental backup;
7 * the actual construction of the incremental backup happens in
8 * basebackup.c. Here, we're concerned with providing the necessary
9 * supports for that operation. In particular, we need to parse the
10 * backup manifest supplied by the user taking the incremental backup
11 * and extract the required information from it.
12 *
13 * Portions Copyright (c) 2010-2026, PostgreSQL Global Development Group
14 *
15 * IDENTIFICATION
16 * src/backend/backup/basebackup_incremental.c
17 *
18 *-------------------------------------------------------------------------
19 */
20#include "postgres.h"
21
22#include "access/timeline.h"
23#include "access/xlog.h"
25#include "backup/walsummary.h"
26#include "common/blkreftable.h"
27#include "common/hashfn.h"
28#include "common/int.h"
31
32#define BLOCKS_PER_READ 512
33
34/*
35 * We expect to find the last lines of the manifest, including the checksum,
36 * in the last MIN_CHUNK bytes of the manifest. We trigger an incremental
37 * parse step if we are about to overflow MAX_CHUNK bytes.
38 */
39#define MIN_CHUNK 1024
40#define MAX_CHUNK (128 * 1024)
41
42/*
43 * Details extracted from the WAL ranges present in the supplied backup manifest.
44 */
51
52/*
53 * Details extracted from the file list present in the supplied backup manifest.
54 */
55typedef struct
56{
58 const char *path;
61
62static uint32 hash_string_pointer(const char *s);
63#define SH_PREFIX backup_file
64#define SH_ELEMENT_TYPE backup_file_entry
65#define SH_KEY_TYPE const char *
66#define SH_KEY path
67#define SH_HASH_KEY(tb, key) hash_string_pointer(key)
68#define SH_EQUAL(tb, a, b) (strcmp(a, b) == 0)
69#define SH_SCOPE static inline
70#define SH_DECLARE
71#define SH_DEFINE
72#include "lib/simplehash.h"
73
75{
76 /* Memory context for this object and its subsidiary objects. */
78
79 /* Temporary buffer for storing the manifest while parsing it. */
81
82 /* WAL ranges extracted from the backup manifest. */
84
85 /*
86 * Files extracted from the backup manifest.
87 *
88 * We don't really need this information, because we use WAL summaries to
89 * figure out what's changed. It would be unsafe to just rely on the list
90 * of files that existed before, because it's possible for a file to be
91 * removed and a new one created with the same name and different
92 * contents. In such cases, the whole file must still be sent. We can tell
93 * from the WAL summaries whether that happened, but not from the file
94 * list.
95 *
96 * Nonetheless, this data is useful for sanity checking. If a file that we
97 * think we shouldn't need to send is not present in the manifest for the
98 * prior backup, something has gone terribly wrong. We retain the file
99 * names and sizes, but not the checksums or last modified times, for
100 * which we have no use.
101 *
102 * One significant downside of storing this data is that it consumes
103 * memory. If that turns out to be a problem, we might have to decide not
104 * to retain this information, or to make it optional.
105 */
107
108 /*
109 * Block-reference table for the incremental backup.
110 *
111 * It's possible that storing the entire block-reference table in memory
112 * will be a problem for some users. The in-memory format that we're using
113 * here is pretty efficient, converging to little more than 1 bit per
114 * block for relation forks with large numbers of modified blocks. It's
115 * possible, however, that if you try to perform an incremental backup of
116 * a database with a sufficiently large number of relations on a
117 * sufficiently small machine, you could run out of memory here. If that
118 * turns out to be a problem in practice, we'll need to be more clever.
119 */
121
122 /*
123 * State object for incremental JSON parsing
124 */
126};
127
129 int manifest_version);
131 uint64 manifest_system_identifier);
133 const char *pathname,
134 uint64 size,
135 pg_checksum_type checksum_type,
136 int checksum_length,
137 uint8 *checksum_payload);
139 TimeLineID tli,
140 XLogRecPtr start_lsn,
141 XLogRecPtr end_lsn);
143 const char *fmt,...)
145static int compare_block_numbers(const void *a, const void *b);
146
147/*
148 * Create a new object for storing information extracted from the manifest
149 * supplied when creating an incremental backup.
150 */
153{
155 MemoryContext oldcontext;
157
158 oldcontext = MemoryContextSwitchTo(mcxt);
159
161 ib->mcxt = mcxt;
162 initStringInfo(&ib->buf);
163
164 /*
165 * It's hard to guess how many files a "typical" installation will have in
166 * the data directory, but a fresh initdb creates almost 1000 files as of
167 * this writing, so it seems to make sense for our estimate to
168 * substantially higher.
169 */
170 ib->manifest_files = backup_file_create(mcxt, 10000, NULL);
171
173 /* Parse the manifest. */
174 context->private_data = ib;
180
181 ib->inc_state = json_parse_manifest_incremental_init(context);
182
183 MemoryContextSwitchTo(oldcontext);
184
185 return ib;
186}
187
188/*
189 * Before taking an incremental backup, the caller must supply the backup
190 * manifest from a prior backup. Each chunk of manifest data received
191 * from the client should be passed to this function.
192 */
193void
195 int len)
196{
197 MemoryContext oldcontext;
198
199 /* Switch to our memory context. */
200 oldcontext = MemoryContextSwitchTo(ib->mcxt);
201
202 if (ib->buf.len > MIN_CHUNK && ib->buf.len + len > MAX_CHUNK)
203 {
204 /*
205 * time for an incremental parse. We'll do all but the last MIN_CHUNK
206 * so that we have enough left for the final piece.
207 */
208 json_parse_manifest_incremental_chunk(ib->inc_state, ib->buf.data,
209 ib->buf.len - MIN_CHUNK, false);
210 /* now remove what we just parsed */
211 memmove(ib->buf.data, ib->buf.data + (ib->buf.len - MIN_CHUNK),
212 MIN_CHUNK + 1);
213 ib->buf.len = MIN_CHUNK;
214 }
215
217
218 /* Switch back to previous memory context. */
219 MemoryContextSwitchTo(oldcontext);
220}
221
222/*
223 * Finalize an IncrementalBackupInfo object after all manifest data has
224 * been supplied via calls to AppendIncrementalManifestData.
225 */
226void
228{
229 MemoryContext oldcontext;
230
231 /* Switch to our memory context. */
232 oldcontext = MemoryContextSwitchTo(ib->mcxt);
233
234 /* Parse the last chunk of the manifest */
235 json_parse_manifest_incremental_chunk(ib->inc_state, ib->buf.data,
236 ib->buf.len, true);
237
238 /* Done with the buffer, so release memory. */
239 pfree(ib->buf.data);
240 ib->buf.data = NULL;
241
242 /* Done with inc_state, so release that memory too */
244
245 /* Switch back to previous memory context. */
246 MemoryContextSwitchTo(oldcontext);
247}
248
249/*
250 * Prepare to take an incremental backup.
251 *
252 * Before this function is called, AppendIncrementalManifestData and
253 * FinalizeIncrementalManifest should have already been called to pass all
254 * the manifest data to this object.
255 *
256 * This function performs sanity checks on the data extracted from the
257 * manifest and figures out for which WAL ranges we need summaries, and
258 * whether those summaries are available. Then, it reads and combines the
259 * data from those summary files. It also updates the backup_state with the
260 * reference TLI and LSN for the prior backup.
261 */
262void
265{
266 MemoryContext oldcontext;
270 ListCell *lc;
272 int num_wal_ranges;
273 int i;
274 bool found_backup_start_tli = false;
278
279 Assert(ib->buf.data == NULL);
280
281 /* Switch to our memory context. */
282 oldcontext = MemoryContextSwitchTo(ib->mcxt);
283
284 /*
285 * A valid backup manifest must always contain at least one WAL range
286 * (usually exactly one, unless the backup spanned a timeline switch).
287 */
288 num_wal_ranges = list_length(ib->manifest_wal_ranges);
289 if (num_wal_ranges == 0)
292 errmsg("manifest contains no required WAL ranges")));
293
294 /*
295 * Match up the TLIs that appear in the WAL ranges of the backup manifest
296 * with those that appear in this server's timeline history. We expect
297 * every backup_wal_range to match to a TimeLineHistoryEntry; if it does
298 * not, that's an error.
299 *
300 * This loop also decides which of the WAL ranges is the manifest is most
301 * ancient and which one is the newest, according to the timeline history
302 * of this server, and stores TLIs of those WAL ranges into
303 * earliest_wal_range_tli and latest_wal_range_tli. It also updates
304 * earliest_wal_range_start_lsn to the start LSN of the WAL range for
305 * earliest_wal_range_tli.
306 *
307 * Note that the return value of readTimeLineHistory puts the latest
308 * timeline at the beginning of the list, not the end. Hence, the earliest
309 * TLI is the one that occurs nearest the end of the list returned by
310 * readTimeLineHistory, and the latest TLI is the one that occurs closest
311 * to the beginning.
312 */
315 for (i = 0; i < num_wal_ranges; ++i)
316 {
317 backup_wal_range *range = list_nth(ib->manifest_wal_ranges, i);
318 bool saw_earliest_wal_range_tli = false;
319 bool saw_latest_wal_range_tli = false;
320
321 /* Search this server's history for this WAL range's TLI. */
322 foreach(lc, expectedTLEs)
323 {
325
326 if (tle->tli == range->tli)
327 {
328 tlep[i] = tle;
329 break;
330 }
331
332 if (tle->tli == earliest_wal_range_tli)
334 if (tle->tli == latest_wal_range_tli)
336 }
337
338 /*
339 * An incremental backup can only be taken relative to a backup that
340 * represents a previous state of this server. If the backup requires
341 * WAL from a timeline that's not in our history, that definitely
342 * isn't the case.
343 */
344 if (tlep[i] == NULL)
347 errmsg("timeline %u found in manifest, but not in this server's history",
348 range->tli)));
349
350 /*
351 * If we found this TLI in the server's history before encountering
352 * the latest TLI seen so far in the server's history, then this TLI
353 * is the latest one seen so far.
354 *
355 * If on the other hand we saw the earliest TLI seen so far before
356 * finding this TLI, this TLI is earlier than the earliest one seen so
357 * far. And if this is the first TLI for which we've searched, it's
358 * also the earliest one seen so far.
359 *
360 * On the first loop iteration, both things should necessarily be
361 * true.
362 */
366 {
369 }
370 }
371
372 /*
373 * Propagate information about the prior backup into the backup_label that
374 * will be generated for this backup.
375 */
378
379 /*
380 * Sanity check start and end LSNs for the WAL ranges in the manifest.
381 *
382 * Commonly, there won't be any timeline switches during the prior backup
383 * at all, but if there are, they should happen at the same LSNs that this
384 * server switched timelines.
385 *
386 * Whether there are any timeline switches during the prior backup or not,
387 * the prior backup shouldn't require any WAL from a timeline prior to the
388 * start of that timeline. It also shouldn't require any WAL from later
389 * than the start of this backup.
390 *
391 * If any of these sanity checks fail, one possible explanation is that
392 * the user has generated WAL on the same timeline with the same LSNs more
393 * than once. For instance, if two standbys running on timeline 1 were
394 * both promoted and (due to a broken archiving setup) both selected new
395 * timeline ID 2, then it's possible that one of these checks might trip.
396 *
397 * Note that there are lots of ways for the user to do something very bad
398 * without tripping any of these checks, and they are not intended to be
399 * comprehensive. It's pretty hard to see how we could be certain of
400 * anything here. However, if there's a problem staring us right in the
401 * face, it's best to report it, so we do.
402 */
403 for (i = 0; i < num_wal_ranges; ++i)
404 {
405 backup_wal_range *range = list_nth(ib->manifest_wal_ranges, i);
406
407 if (range->tli == earliest_wal_range_tli)
408 {
409 if (range->start_lsn < tlep[i]->begin)
412 errmsg("manifest requires WAL from initial timeline %u starting at %X/%08X, but that timeline begins at %X/%08X",
413 range->tli,
414 LSN_FORMAT_ARGS(range->start_lsn),
415 LSN_FORMAT_ARGS(tlep[i]->begin))));
416 }
417 else
418 {
419 if (range->start_lsn != tlep[i]->begin)
422 errmsg("manifest requires WAL from continuation timeline %u starting at %X/%08X, but that timeline begins at %X/%08X",
423 range->tli,
424 LSN_FORMAT_ARGS(range->start_lsn),
425 LSN_FORMAT_ARGS(tlep[i]->begin))));
426 }
427
428 if (range->tli == latest_wal_range_tli)
429 {
430 if (range->end_lsn > backup_state->startpoint)
433 errmsg("manifest requires WAL from final timeline %u ending at %X/%08X, but this backup starts at %X/%08X",
434 range->tli,
435 LSN_FORMAT_ARGS(range->end_lsn),
437 errhint("This can happen for incremental backups on a standby if there was little activity since the previous backup.")));
438 }
439 else
440 {
441 if (range->end_lsn != tlep[i]->end)
444 errmsg("manifest requires WAL from non-final timeline %u ending at %X/%08X, but this server switched timelines at %X/%08X",
445 range->tli,
446 LSN_FORMAT_ARGS(range->end_lsn),
447 LSN_FORMAT_ARGS(tlep[i]->end))));
448 }
449
450 }
451
452 /*
453 * Wait for WAL summarization to catch up to the backup start LSN. This
454 * will throw an error if the WAL summarizer appears to be stuck. If WAL
455 * summarization gets disabled while we're waiting, this will return
456 * immediately, and we'll error out further down if the WAL summaries are
457 * incomplete.
458 */
460
461 /*
462 * Retrieve a list of all WAL summaries on any timeline that overlap with
463 * the LSN range of interest. We could instead call GetWalSummaries() once
464 * per timeline in the loop that follows, but that would involve reading
465 * the directory multiple times. It should be mildly faster - and perhaps
466 * a bit safer - to do it just once.
467 */
470
471 /*
472 * We need WAL summaries for everything that happened during the prior
473 * backup and everything that happened afterward up until the point where
474 * the current backup started.
475 */
476 foreach(lc, expectedTLEs)
477 {
483
484 /*
485 * Working through the history of this server from the current
486 * timeline backwards, we skip everything until we find the timeline
487 * where this backup started. Most of the time, this means we won't
488 * skip anything at all, as it's unlikely that the timeline has
489 * changed since the beginning of the backup moments ago.
490 */
491 if (tle->tli == backup_state->starttli)
492 {
495 }
496 else if (!found_backup_start_tli)
497 continue;
498
499 /*
500 * Find the summaries that overlap the LSN range of interest for this
501 * timeline. If this is the earliest timeline involved, the range of
502 * interest begins with the start LSN of the prior backup; otherwise,
503 * it begins at the LSN at which this timeline came into existence. If
504 * this is the latest TLI involved, the range of interest ends at the
505 * start LSN of the current backup; otherwise, it ends at the point
506 * where we switched from this timeline to the next one.
507 */
508 if (tle->tli == earliest_wal_range_tli)
512
513 /*
514 * There is no guarantee that the WAL summaries we found cover the
515 * entire range of LSNs for which summaries are required, or indeed
516 * that we found any WAL summaries at all. Check whether we have a
517 * problem of that sort.
518 */
521 {
525 errmsg("WAL summaries are required on timeline %u from %X/%08X to %X/%08X, but no summaries for that timeline and LSN range exist",
526 tle->tli,
529 else
532 errmsg("WAL summaries are required on timeline %u from %X/%08X to %X/%08X, but the summaries for that timeline and LSN range are incomplete",
533 tle->tli,
536 errdetail("The first unsummarized LSN in this range is %X/%08X.",
538 }
539
540 /*
541 * Remember that we need to read these summaries.
542 *
543 * Technically, it's possible that this could read more files than
544 * required, since tli_wslist in theory could contain redundant
545 * summaries. For instance, if we have a summary from 0/10000000 to
546 * 0/20000000 and also one from 0/00000000 to 0/30000000, then the
547 * latter subsumes the former and the former could be ignored.
548 *
549 * We ignore this possibility because the WAL summarizer only tries to
550 * generate summaries that do not overlap. If somehow they exist,
551 * we'll do a bit of extra work but the results should still be
552 * correct.
553 */
555
556 /*
557 * Timelines earlier than the one in which the prior backup began are
558 * not relevant.
559 */
560 if (tle->tli == earliest_wal_range_tli)
561 break;
562 }
563
564 /*
565 * Read all of the required block reference table files and merge all of
566 * the data into a single in-memory block reference table.
567 *
568 * See the comments for struct IncrementalBackupInfo for some thoughts on
569 * memory usage.
570 */
571 ib->brtab = CreateEmptyBlockRefTable();
572 foreach(lc, required_wslist)
573 {
576 BlockRefTableReader *reader;
577 RelFileLocator rlocator;
578 ForkNumber forknum;
579 BlockNumber limit_block;
581
582 wsio.file = OpenWalSummaryFile(ws, false);
583 wsio.filepos = 0;
585 (errmsg_internal("reading WAL summary file \"%s\"",
586 FilePathName(wsio.file))));
588 FilePathName(wsio.file),
590 while (BlockRefTableReaderNextRelation(reader, &rlocator, &forknum,
591 &limit_block))
592 {
593 BlockRefTableSetLimitBlock(ib->brtab, &rlocator,
594 forknum, limit_block);
595
596 while (1)
597 {
598 unsigned nblocks;
599 unsigned i;
600
601 nblocks = BlockRefTableReaderGetBlocks(reader, blocks,
603 if (nblocks == 0)
604 break;
605
606 for (i = 0; i < nblocks; ++i)
607 BlockRefTableMarkBlockModified(ib->brtab, &rlocator,
608 forknum, blocks[i]);
609 }
610 }
612 FileClose(wsio.file);
613 }
614
615 /* Switch back to previous memory context. */
616 MemoryContextSwitchTo(oldcontext);
617}
618
619/*
620 * Get the pathname that should be used when a file is sent incrementally.
621 *
622 * The result is a palloc'd string.
623 */
624char *
626 ForkNumber forknum, unsigned segno)
627{
628 RelPathStr path;
629 char *lastslash;
630 char *ipath;
631
632 path = GetRelationPath(dboid, spcoid, relfilenumber, INVALID_PROC_NUMBER,
633 forknum);
634
635 lastslash = strrchr(path.str, '/');
637 *lastslash = '\0';
638
639 if (segno > 0)
640 ipath = psprintf("%s/INCREMENTAL.%s.%u", path.str, lastslash + 1, segno);
641 else
642 ipath = psprintf("%s/INCREMENTAL.%s", path.str, lastslash + 1);
643
644 return ipath;
645}
646
647/*
648 * How should we back up a particular file as part of an incremental backup?
649 *
650 * If the return value is BACK_UP_FILE_FULLY, caller should back up the whole
651 * file just as if this were not an incremental backup. The contents of the
652 * relative_block_numbers array are unspecified in this case.
653 *
654 * If the return value is BACK_UP_FILE_INCREMENTALLY, caller should include
655 * an incremental file in the backup instead of the entire file. On return,
656 * *num_blocks_required will be set to the number of blocks that need to be
657 * sent, and the actual block numbers will have been stored in
658 * relative_block_numbers, which should be an array of at least RELSEG_SIZE.
659 * In addition, *truncation_block_length will be set to the value that should
660 * be included in the incremental file.
661 */
664 Oid dboid, Oid spcoid,
665 RelFileNumber relfilenumber, ForkNumber forknum,
666 unsigned segno, size_t size,
667 unsigned *num_blocks_required,
668 BlockNumber *relative_block_numbers,
669 unsigned *truncation_block_length)
670{
671 BlockNumber limit_block;
674 RelFileLocator rlocator;
676 unsigned i;
677 unsigned nblocks;
678
679 /* Should only be called after PrepareForIncrementalBackup. */
680 Assert(ib->buf.data == NULL);
681
682 /*
683 * dboid could be InvalidOid if shared rel, but spcoid and relfilenumber
684 * should have legal values.
685 */
687 Assert(RelFileNumberIsValid(relfilenumber));
688
689 /*
690 * If the file size is too large or not a multiple of BLCKSZ, then
691 * something weird is happening, so give up and send the whole file.
692 */
693 if ((size % BLCKSZ) != 0 || size / BLCKSZ > RELSEG_SIZE)
694 return BACK_UP_FILE_FULLY;
695
696 /*
697 * The free-space map fork is not properly WAL-logged, so we need to
698 * backup the entire file every time.
699 */
700 if (forknum == FSM_FORKNUM)
701 return BACK_UP_FILE_FULLY;
702
703 /*
704 * If this file was not part of the prior backup, back it up fully.
705 *
706 * If this file was created after the prior backup and before the start of
707 * the current backup, then the WAL summary information will tell us to
708 * back up the whole file. However, if this file was created after the
709 * start of the current backup, then the WAL summary won't know anything
710 * about it. Without this logic, we would erroneously conclude that it was
711 * OK to send it incrementally.
712 *
713 * Note that the file could have existed at the time of the prior backup,
714 * gotten deleted, and then a new file with the same name could have been
715 * created. In that case, this logic won't prevent the file from being
716 * backed up incrementally. But, if the deletion happened before the start
717 * of the current backup, the limit block will be 0, inducing a full
718 * backup. If the deletion happened after the start of the current backup,
719 * reconstruction will erroneously combine blocks from the current
720 * lifespan of the file with blocks from the previous lifespan -- but in
721 * this type of case, WAL replay to reach backup consistency should remove
722 * and recreate the file anyway, so the initial bogus contents should not
723 * matter.
724 */
725 if (backup_file_lookup(ib->manifest_files, path) == NULL)
726 {
727 char *ipath;
728
729 ipath = GetIncrementalFilePath(dboid, spcoid, relfilenumber,
730 forknum, segno);
731 if (backup_file_lookup(ib->manifest_files, ipath) == NULL)
732 return BACK_UP_FILE_FULLY;
733 }
734
735 /*
736 * Look up the special block reference table entry for the database as a
737 * whole.
738 */
739 rlocator.spcOid = spcoid;
740 rlocator.dbOid = dboid;
741 rlocator.relNumber = 0;
742 if (BlockRefTableGetEntry(ib->brtab, &rlocator, MAIN_FORKNUM,
743 &limit_block) != NULL)
744 {
745 /*
746 * According to the WAL summary, this database OID/tablespace OID
747 * pairing has been created since the previous backup. So, everything
748 * in it must be backed up fully.
749 */
750 return BACK_UP_FILE_FULLY;
751 }
752
753 /* Look up the block reference table entry for this relfilenode. */
754 rlocator.relNumber = relfilenumber;
755 brtentry = BlockRefTableGetEntry(ib->brtab, &rlocator, forknum,
756 &limit_block);
757
758 /*
759 * If there is no entry, then there have been no WAL-logged changes to the
760 * relation since the predecessor backup was taken, so we can back it up
761 * incrementally and need not include any modified blocks.
762 *
763 * However, if the file is zero-length, we should do a full backup,
764 * because an incremental file is always more than zero length, and it's
765 * silly to take an incremental backup when a full backup would be
766 * smaller.
767 */
768 if (brtentry == NULL)
769 {
770 if (size == 0)
771 return BACK_UP_FILE_FULLY;
773 *truncation_block_length = size / BLCKSZ;
775 }
776
777 /*
778 * If the limit_block is less than or equal to the point where this
779 * segment starts, send the whole file.
780 */
781 if (limit_block <= segno * RELSEG_SIZE)
782 return BACK_UP_FILE_FULLY;
783
784 /*
785 * Get relevant entries from the block reference table entry.
786 *
787 * We shouldn't overflow computing the start or stop block numbers, but if
788 * it manages to happen somehow, detect it and throw an error.
789 */
790 start_blkno = segno * RELSEG_SIZE;
791 stop_blkno = start_blkno + (size / BLCKSZ);
792 if (start_blkno / RELSEG_SIZE != segno || stop_blkno < start_blkno)
795 errmsg_internal("overflow computing block number bounds for segment %u with size %zu",
796 segno, size));
797
798 /*
799 * This will write *absolute* block numbers into the output array, but
800 * we'll transpose them below.
801 */
803 relative_block_numbers, RELSEG_SIZE);
804 Assert(nblocks <= RELSEG_SIZE);
805
806 /*
807 * If we're going to have to send nearly all of the blocks, then just send
808 * the whole file, because that won't require much extra storage or
809 * transfer and will speed up and simplify backup restoration. It's not
810 * clear what threshold is most appropriate here and perhaps it ought to
811 * be configurable, but for now we're just going to say that if we'd need
812 * to send 90% of the blocks anyway, give up and send the whole file.
813 *
814 * NB: If you change the threshold here, at least make sure to back up the
815 * file fully when every single block must be sent, because there's
816 * nothing good about sending an incremental file in that case.
817 */
818 if (nblocks * BLCKSZ > size * 0.9)
819 return BACK_UP_FILE_FULLY;
820
821 /*
822 * Looks like we can send an incremental file, so sort the block numbers
823 * and then transpose them from absolute block numbers to relative block
824 * numbers if necessary.
825 *
826 * NB: If the block reference table was using the bitmap representation
827 * for a given chunk, the block numbers in that chunk will already be
828 * sorted, but when the array-of-offsets representation is used, we can
829 * receive block numbers here out of order.
830 */
831 qsort(relative_block_numbers, nblocks, sizeof(BlockNumber),
833 if (start_blkno != 0)
834 {
835 for (i = 0; i < nblocks; ++i)
836 relative_block_numbers[i] -= start_blkno;
837 }
838 *num_blocks_required = nblocks;
839
840 /*
841 * The truncation block length is the minimum length of the reconstructed
842 * file. Any block numbers below this threshold that are not present in
843 * the backup need to be fetched from the prior backup. At or above this
844 * threshold, blocks should only be included in the result if they are
845 * present in the backup. (This may require inserting zero blocks if the
846 * blocks included in the backup are non-consecutive.)
847 */
848 *truncation_block_length = size / BLCKSZ;
849 if (BlockNumberIsValid(limit_block))
850 {
851 unsigned relative_limit = limit_block - segno * RELSEG_SIZE;
852
853 /*
854 * We can't set a truncation_block_length in excess of the limit block
855 * number (relativized to the current segment). To do so would be to
856 * treat blocks from older backups as valid current contents even if
857 * they were subsequently truncated away.
858 */
859 if (*truncation_block_length < relative_limit)
860 *truncation_block_length = relative_limit;
861
862 /*
863 * We also can't set a truncation_block_length in excess of the
864 * segment size, since the reconstructed file can't be larger than
865 * that.
866 */
867 if (*truncation_block_length > RELSEG_SIZE)
868 *truncation_block_length = RELSEG_SIZE;
869 }
870
871 /* Send it incrementally. */
873}
874
875/*
876 * Compute the size for a header of an incremental file containing a given
877 * number of blocks. The header is rounded to a multiple of BLCKSZ, but
878 * only if the file will store some block data.
879 */
880size_t
882{
883 size_t result;
884
885 /* Make sure we're not going to overflow. */
887
888 /*
889 * Three four byte quantities (magic number, truncation block length,
890 * block count) followed by block numbers.
891 */
892 result = 3 * sizeof(uint32) + (sizeof(BlockNumber) * num_blocks_required);
893
894 /*
895 * Round the header size to a multiple of BLCKSZ - when not a multiple of
896 * BLCKSZ, add the missing fraction of a block. But do this only if the
897 * file will store data for some blocks, otherwise keep it small.
898 */
899 if ((num_blocks_required > 0) && (result % BLCKSZ != 0))
900 result += BLCKSZ - (result % BLCKSZ);
901
902 return result;
903}
904
905/*
906 * Compute the size for an incremental file containing a given number of blocks.
907 */
908size_t
910{
911 size_t result;
912
913 /* Make sure we're not going to overflow. */
915
916 /*
917 * Header with three four byte quantities (magic number, truncation block
918 * length, block count) followed by block numbers, rounded to a multiple
919 * of BLCKSZ (for files with block data), followed by block contents.
920 */
922 result += BLCKSZ * num_blocks_required;
923
924 return result;
925}
926
927/*
928 * Helper function for filemap hash table.
929 */
930static uint32
932{
933 unsigned char *ss = (unsigned char *) s;
934
935 return hash_bytes(ss, strlen(s));
936}
937
938/*
939 * This callback to validate the manifest version for incremental backup.
940 */
941static void
943 int manifest_version)
944{
945 /* Incremental backups don't work with manifest version 1 */
946 if (manifest_version == 1)
947 context->error_cb(context,
948 "backup manifest version 1 does not support incremental backup");
949}
950
951/*
952 * This callback to validate the manifest system identifier against the current
953 * database server.
954 */
955static void
957 uint64 manifest_system_identifier)
958{
959 uint64 system_identifier;
960
961 /* Get system identifier of current system */
962 system_identifier = GetSystemIdentifier();
963
964 if (manifest_system_identifier != system_identifier)
965 context->error_cb(context,
966 "system identifier in backup manifest is %" PRIu64 ", but database system identifier is %" PRIu64,
967 manifest_system_identifier,
968 system_identifier);
969}
970
971/*
972 * This callback is invoked for each file mentioned in the backup manifest.
973 *
974 * We store the path to each file and the size of each file for sanity-checking
975 * purposes. For further details, see comments for IncrementalBackupInfo.
976 */
977static void
979 const char *pathname, uint64 size,
980 pg_checksum_type checksum_type,
981 int checksum_length,
982 uint8 *checksum_payload)
983{
985 backup_file_entry *entry;
986 bool found;
987
988 entry = backup_file_insert(ib->manifest_files, pathname, &found);
989 if (!found)
990 {
991 entry->path = MemoryContextStrdup(ib->manifest_files->ctx,
992 pathname);
993 entry->size = size;
994 }
995}
996
997/*
998 * This callback is invoked for each WAL range mentioned in the backup
999 * manifest.
1000 *
1001 * We're just interested in learning the oldest LSN and the corresponding TLI
1002 * that appear in any WAL range.
1003 */
1004static void
1006 TimeLineID tli, XLogRecPtr start_lsn,
1007 XLogRecPtr end_lsn)
1008{
1011
1012 range->tli = tli;
1013 range->start_lsn = start_lsn;
1014 range->end_lsn = end_lsn;
1015 ib->manifest_wal_ranges = lappend(ib->manifest_wal_ranges, range);
1016}
1017
1018/*
1019 * This callback is invoked if an error occurs while parsing the backup
1020 * manifest.
1021 */
1022static void
1024{
1025 StringInfoData errbuf;
1026
1027 initStringInfo(&errbuf);
1028
1029 for (;;)
1030 {
1031 va_list ap;
1032 int needed;
1033
1034 va_start(ap, fmt);
1035 needed = appendStringInfoVA(&errbuf, fmt, ap);
1036 va_end(ap);
1037 if (needed == 0)
1038 break;
1039 enlargeStringInfo(&errbuf, needed);
1040 }
1041
1042 ereport(ERROR,
1043 errmsg_internal("%s", errbuf.data));
1044}
1045
1046/*
1047 * Quicksort comparator for block numbers.
1048 */
1049static int
1050compare_block_numbers(const void *a, const void *b)
1051{
1052 BlockNumber aa = *(BlockNumber *) a;
1053 BlockNumber bb = *(BlockNumber *) b;
1054
1055 return pg_cmp_u32(aa, bb);
1056}
List * readTimeLineHistory(TimeLineID targetTLI)
Definition timeline.c:76
#define BLOCKS_PER_READ
void AppendIncrementalManifestData(IncrementalBackupInfo *ib, const char *data, int len)
static void manifest_process_version(JsonManifestParseContext *context, int manifest_version)
static pg_noreturn void static int compare_block_numbers(const void *a, const void *b)
static uint32 hash_string_pointer(const char *s)
size_t GetIncrementalHeaderSize(unsigned num_blocks_required)
#define MAX_CHUNK
static void manifest_process_system_identifier(JsonManifestParseContext *context, uint64 manifest_system_identifier)
#define MIN_CHUNK
size_t GetIncrementalFileSize(unsigned num_blocks_required)
static void manifest_process_file(JsonManifestParseContext *context, const char *pathname, uint64 size, pg_checksum_type checksum_type, int checksum_length, uint8 *checksum_payload)
IncrementalBackupInfo * CreateIncrementalBackupInfo(MemoryContext mcxt)
FileBackupMethod GetFileBackupMethod(IncrementalBackupInfo *ib, const char *path, Oid dboid, Oid spcoid, RelFileNumber relfilenumber, ForkNumber forknum, unsigned segno, size_t size, unsigned *num_blocks_required, BlockNumber *relative_block_numbers, unsigned *truncation_block_length)
static pg_noreturn void manifest_report_error(JsonManifestParseContext *context, const char *fmt,...) pg_attribute_printf(2
char * GetIncrementalFilePath(Oid dboid, Oid spcoid, RelFileNumber relfilenumber, ForkNumber forknum, unsigned segno)
static void manifest_process_wal_range(JsonManifestParseContext *context, TimeLineID tli, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
void PrepareForIncrementalBackup(IncrementalBackupInfo *ib, BackupState *backup_state)
void FinalizeIncrementalManifest(IncrementalBackupInfo *ib)
@ BACK_UP_FILE_INCREMENTALLY
@ BACK_UP_FILE_FULLY
BlockRefTableEntry * BlockRefTableGetEntry(BlockRefTable *brtab, const RelFileLocator *rlocator, ForkNumber forknum, BlockNumber *limit_block)
bool BlockRefTableReaderNextRelation(BlockRefTableReader *reader, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *limit_block)
int BlockRefTableEntryGetBlocks(BlockRefTableEntry *entry, BlockNumber start_blkno, BlockNumber stop_blkno, BlockNumber *blocks, int nblocks)
void BlockRefTableMarkBlockModified(BlockRefTable *brtab, const RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blknum)
BlockRefTableReader * CreateBlockRefTableReader(io_callback_fn read_callback, void *read_callback_arg, char *error_filename, report_error_fn error_callback, void *error_callback_arg)
unsigned BlockRefTableReaderGetBlocks(BlockRefTableReader *reader, BlockNumber *blocks, int nblocks)
void BlockRefTableSetLimitBlock(BlockRefTable *brtab, const RelFileLocator *rlocator, ForkNumber forknum, BlockNumber limit_block)
void DestroyBlockRefTableReader(BlockRefTableReader *reader)
void(*) BlockRefTable CreateEmptyBlockRefTable)(void)
uint32 BlockNumber
Definition block.h:31
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition block.h:71
uint8_t uint8
Definition c.h:554
#define pg_noreturn
Definition c.h:164
#define Assert(condition)
Definition c.h:883
#define pg_attribute_printf(f, a)
Definition c.h:242
uint64_t uint64
Definition c.h:557
uint32_t uint32
Definition c.h:556
#define OidIsValid(objectId)
Definition c.h:798
pg_checksum_type
int errmsg_internal(const char *fmt,...)
Definition elog.c:1170
int errdetail(const char *fmt,...)
Definition elog.c:1216
int errhint(const char *fmt,...)
Definition elog.c:1330
int errcode(int sqlerrcode)
Definition elog.c:863
int errmsg(const char *fmt,...)
Definition elog.c:1080
#define DEBUG1
Definition elog.h:30
#define ERROR
Definition elog.h:39
#define ereport(elevel,...)
Definition elog.h:150
char * FilePathName(File file)
Definition fd.c:2496
void FileClose(File file)
Definition fd.c:1962
#define palloc_object(type)
Definition fe_memutils.h:74
#define palloc0_object(type)
Definition fe_memutils.h:75
uint32 hash_bytes(const unsigned char *k, int keylen)
Definition hashfn.c:146
static int pg_cmp_u32(uint32 a, uint32 b)
Definition int.h:719
int b
Definition isn.c:74
int a
Definition isn.c:73
int i
Definition isn.c:77
List * lappend(List *list, void *datum)
Definition list.c:339
List * list_concat(List *list1, const List *list2)
Definition list.c:561
char * MemoryContextStrdup(MemoryContext context, const char *string)
Definition mcxt.c:1768
void pfree(void *pointer)
Definition mcxt.c:1616
void * palloc0(Size size)
Definition mcxt.c:1417
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
JsonManifestParseIncrementalState * json_parse_manifest_incremental_init(JsonManifestParseContext *context)
void json_parse_manifest_incremental_shutdown(JsonManifestParseIncrementalState *incstate)
void json_parse_manifest_incremental_chunk(JsonManifestParseIncrementalState *incstate, const char *chunk, size_t size, bool is_last)
const void size_t len
const void * data
#define lfirst(lc)
Definition pg_list.h:172
static int list_length(const List *l)
Definition pg_list.h:152
#define NIL
Definition pg_list.h:68
static void * list_nth(const List *list, int n)
Definition pg_list.h:299
#define qsort(a, b, c, d)
Definition port.h:495
unsigned int Oid
static int fb(int x)
#define INVALID_PROC_NUMBER
Definition procnumber.h:26
char * psprintf(const char *fmt,...)
Definition psprintf.c:43
static struct cvec * range(struct vars *v, chr a, chr b, int cases)
RelPathStr GetRelationPath(Oid dbOid, Oid spcOid, RelFileNumber relNumber, int procNumber, ForkNumber forkNumber)
Definition relpath.c:143
Oid RelFileNumber
Definition relpath.h:25
ForkNumber
Definition relpath.h:56
@ FSM_FORKNUM
Definition relpath.h:59
@ MAIN_FORKNUM
Definition relpath.h:58
#define RelFileNumberIsValid(relnumber)
Definition relpath.h:27
int appendStringInfoVA(StringInfo str, const char *fmt, va_list args)
Definition stringinfo.c:187
void enlargeStringInfo(StringInfo str, int needed)
Definition stringinfo.c:337
void appendBinaryStringInfo(StringInfo str, const void *data, int datalen)
Definition stringinfo.c:281
void initStringInfo(StringInfo str)
Definition stringinfo.c:97
TimeLineID istarttli
Definition xlogbackup.h:32
TimeLineID starttli
Definition xlogbackup.h:27
XLogRecPtr startpoint
Definition xlogbackup.h:26
XLogRecPtr istartpoint
Definition xlogbackup.h:31
backup_file_hash * manifest_files
JsonManifestParseIncrementalState * inc_state
json_manifest_per_wal_range_callback per_wal_range_cb
json_manifest_system_identifier_callback system_identifier_cb
json_manifest_error_callback error_cb
json_manifest_per_file_callback per_file_cb
json_manifest_version_callback version_cb
Definition pg_list.h:54
RelFileNumber relNumber
char str[REL_PATH_STR_MAXLEN+1]
Definition relpath.h:123
uint32 status
const char * path
uint64 size
void WaitForWalSummarization(XLogRecPtr lsn)
File OpenWalSummaryFile(WalSummaryFile *ws, bool missing_ok)
Definition walsummary.c:205
bool WalSummariesAreComplete(List *wslist, XLogRecPtr start_lsn, XLogRecPtr end_lsn, XLogRecPtr *missing_lsn)
Definition walsummary.c:138
int ReadWalSummary(void *wal_summary_io, void *data, int length)
Definition walsummary.c:273
List * GetWalSummaries(TimeLineID tli, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
Definition walsummary.c:43
void ReportWalSummaryError(void *callback_arg, char *fmt,...)
Definition walsummary.c:322
List * FilterWalSummaries(List *wslist, TimeLineID tli, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
Definition walsummary.c:100
uint64 GetSystemIdentifier(void)
Definition xlog.c:4628
#define XLogRecPtrIsValid(r)
Definition xlogdefs.h:29
#define LSN_FORMAT_ARGS(lsn)
Definition xlogdefs.h:47
uint64 XLogRecPtr
Definition xlogdefs.h:21
#define InvalidXLogRecPtr
Definition xlogdefs.h:28
uint32 TimeLineID
Definition xlogdefs.h:63
static BackupState * backup_state
Definition xlogfuncs.c:41
static List * expectedTLEs