PostgreSQL Source Code git master
Loading...
Searching...
No Matches
ginfast.c File Reference
#include "postgres.h"
#include "access/gin_private.h"
#include "access/ginxlog.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "catalog/pg_am.h"
#include "commands/vacuum.h"
#include "miscadmin.h"
#include "port/pg_bitutils.h"
#include "postmaster/autovacuum.h"
#include "storage/indexfsm.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "utils/acl.h"
#include "utils/fmgrprotos.h"
#include "utils/memutils.h"
#include "utils/rel.h"
Include dependency graph for ginfast.c:

Go to the source code of this file.

Data Structures

struct  KeyArray
 

Macros

#define GIN_PAGE_FREESIZE    ( (Size) BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - MAXALIGN(sizeof(GinPageOpaqueData)) )
 

Typedefs

typedef struct KeyArray KeyArray
 

Functions

static int32 writeListPage (Relation index, Buffer buffer, const IndexTuple *tuples, int32 ntuples, BlockNumber rightlink)
 
static void makeSublist (Relation index, IndexTuple *tuples, int32 ntuples, GinMetaPageData *res)
 
void ginHeapTupleFastInsert (GinState *ginstate, GinTupleCollector *collector)
 
void ginHeapTupleFastCollect (GinState *ginstate, GinTupleCollector *collector, OffsetNumber attnum, Datum value, bool isNull, ItemPointer ht_ctid)
 
static void shiftList (Relation index, Buffer metabuffer, BlockNumber newHead, bool fill_fsm, IndexBulkDeleteResult *stats)
 
static void initKeyArray (KeyArray *keys, int32 maxvalues)
 
static void addDatum (KeyArray *keys, Datum datum, GinNullCategory category)
 
static void processPendingPage (BuildAccumulator *accum, KeyArray *ka, Page page, OffsetNumber startoff)
 
void ginInsertCleanup (GinState *ginstate, bool full_clean, bool fill_fsm, bool forceCleanup, IndexBulkDeleteResult *stats)
 
Datum gin_clean_pending_list (PG_FUNCTION_ARGS)
 

Variables

int gin_pending_list_limit = 0
 

Macro Definition Documentation

◆ GIN_PAGE_FREESIZE

Definition at line 41 of file ginfast.c.

44{
45 Datum *keys; /* expansible array */
46 GinNullCategory *categories; /* another expansible array */
47 int32 nvalues; /* current number of valid entries */
48 int32 maxvalues; /* allocated size of arrays */
49} KeyArray;
50
51
52/*
53 * Build a pending-list page from the given array of tuples, and write it out.
54 *
55 * Returns amount of free space left on the page.
56 */
57static int32
59 const IndexTuple *tuples, int32 ntuples, BlockNumber rightlink)
60{
61 Page page = BufferGetPage(buffer);
62 int32 i,
64 size = 0;
66 off;
67 PGAlignedBlock workspace;
68 char *ptr;
69
71
72 GinInitBuffer(buffer, GIN_LIST);
73
75 ptr = workspace.data;
76
77 for (i = 0; i < ntuples; i++)
78 {
79 int this_size = IndexTupleSize(tuples[i]);
80
81 memcpy(ptr, tuples[i], this_size);
82 ptr += this_size;
83 size += this_size;
84
85 l = PageAddItem(page, tuples[i], this_size, off, false, false);
86
87 if (l == InvalidOffsetNumber)
88 elog(ERROR, "failed to add item to index page in \"%s\"",
90
91 off++;
92 }
93
94 Assert(size <= BLCKSZ); /* else we overran workspace */
95
96 GinPageGetOpaque(page)->rightlink = rightlink;
97
98 /*
99 * tail page may contain only whole row(s) or final part of row placed on
100 * previous pages (a "row" here meaning all the index tuples generated for
101 * one heap tuple)
102 */
103 if (rightlink == InvalidBlockNumber)
104 {
105 GinPageSetFullRow(page);
106 GinPageGetOpaque(page)->maxoff = 1;
107 }
108 else
109 {
110 GinPageGetOpaque(page)->maxoff = 0;
111 }
112
113 MarkBufferDirty(buffer);
114
116 {
119
120 data.rightlink = rightlink;
121 data.ntuples = ntuples;
122
125
127 XLogRegisterBufData(0, workspace.data, size);
128
130 PageSetLSN(page, recptr);
131 }
132
133 /* get free space before releasing buffer */
135
136 UnlockReleaseBuffer(buffer);
137
139
140 return freesize;
141}
142
143static void
144makeSublist(Relation index, IndexTuple *tuples, int32 ntuples,
145 GinMetaPageData *res)
146{
149 int i,
150 size = 0,
151 tupsize;
152 int startTuple = 0;
153
154 Assert(ntuples > 0);
155
156 /*
157 * Split tuples into pages
158 */
159 for (i = 0; i < ntuples; i++)
160 {
162 {
164
166 {
167 res->nPendingPages++;
169 tuples + startTuple,
170 i - startTuple,
172 }
173 else
174 {
176 }
177
179 startTuple = i;
180 size = 0;
181 }
182
183 tupsize = MAXALIGN(IndexTupleSize(tuples[i])) + sizeof(ItemIdData);
184
185 if (size + tupsize > GinListPageSize)
186 {
187 /* won't fit, force a new page and reprocess */
188 i--;
190 }
191 else
192 {
193 size += tupsize;
194 }
195 }
196
197 /*
198 * Write last page
199 */
202 tuples + startTuple,
203 ntuples - startTuple,
205 res->nPendingPages++;
206 /* that was only one heap tuple */
207 res->nPendingHeapTuples = 1;
208}
209
210/*
211 * Write the index tuples contained in *collector into the index's
212 * pending list.
213 *
214 * Function guarantees that all these tuples will be inserted consecutively,
215 * preserving order
216 */
217void
219{
220 Relation index = ginstate->index;
223 GinMetaPageData *metadata = NULL;
224 Buffer buffer = InvalidBuffer;
225 Page page = NULL;
227 bool separateList = false;
228 bool needCleanup = false;
229 int cleanupSize;
230 bool needWal;
231
232 if (collector->ntuples == 0)
233 return;
234
236
237 data.locator = index->rd_locator;
238 data.ntuples = 0;
239 data.newRightlink = data.prevTail = InvalidBlockNumber;
240
243
244 /*
245 * An insertion to the pending list could logically belong anywhere in the
246 * tree, so it conflicts with all serializable scans. All scans acquire a
247 * predicate lock on the metabuffer to represent that. Therefore we'll
248 * check for conflicts in, but not until we have the page locked and are
249 * ready to modify the page.
250 */
251
252 if (collector->sumsize + collector->ntuples * sizeof(ItemIdData) > GinListPageSize)
253 {
254 /*
255 * Total size is greater than one page => make sublist
256 */
257 separateList = true;
258 }
259 else
260 {
262 metadata = GinPageGetMeta(metapage);
263
264 if (metadata->head == InvalidBlockNumber ||
265 collector->sumsize + collector->ntuples * sizeof(ItemIdData) > metadata->tailFreeSize)
266 {
267 /*
268 * Pending list is empty or total size is greater than freespace
269 * on tail page => make sublist
270 *
271 * We unlock metabuffer to keep high concurrency
272 */
273 separateList = true;
275 }
276 }
277
278 if (separateList)
279 {
280 /*
281 * We should make sublist separately and append it to the tail
282 */
284
285 memset(&sublist, 0, sizeof(GinMetaPageData));
286 makeSublist(index, collector->tuples, collector->ntuples, &sublist);
287
288 /*
289 * metapage was unlocked, see above
290 */
292 metadata = GinPageGetMeta(metapage);
293
295
296 if (metadata->head == InvalidBlockNumber)
297 {
298 /*
299 * Main list is empty, so just insert sublist as main list
300 */
302
303 metadata->head = sublist.head;
304 metadata->tail = sublist.tail;
305 metadata->tailFreeSize = sublist.tailFreeSize;
306
307 metadata->nPendingPages = sublist.nPendingPages;
308 metadata->nPendingHeapTuples = sublist.nPendingHeapTuples;
309
310 if (needWal)
312 }
313 else
314 {
315 /*
316 * Merge lists
317 */
318 data.prevTail = metadata->tail;
319 data.newRightlink = sublist.head;
320
321 buffer = ReadBuffer(index, metadata->tail);
322 LockBuffer(buffer, GIN_EXCLUSIVE);
323 page = BufferGetPage(buffer);
324
325 Assert(GinPageGetOpaque(page)->rightlink == InvalidBlockNumber);
326
328
329 GinPageGetOpaque(page)->rightlink = sublist.head;
330
331 MarkBufferDirty(buffer);
332
333 metadata->tail = sublist.tail;
334 metadata->tailFreeSize = sublist.tailFreeSize;
335
336 metadata->nPendingPages += sublist.nPendingPages;
337 metadata->nPendingHeapTuples += sublist.nPendingHeapTuples;
338
339 if (needWal)
340 {
343 }
344 }
345 }
346 else
347 {
348 /*
349 * Insert into tail page. Metapage is already locked
350 */
351 OffsetNumber l,
352 off;
353 int i,
354 tupsize;
355 char *ptr;
356 char *collectordata;
357
359
360 buffer = ReadBuffer(index, metadata->tail);
361 LockBuffer(buffer, GIN_EXCLUSIVE);
362 page = BufferGetPage(buffer);
363
364 off = (PageIsEmpty(page)) ? FirstOffsetNumber :
366
367 collectordata = ptr = (char *) palloc(collector->sumsize);
368
369 data.ntuples = collector->ntuples;
370
372
373 if (needWal)
375
376 /*
377 * Increase counter of heap tuples
378 */
379 Assert(GinPageGetOpaque(page)->maxoff <= metadata->nPendingHeapTuples);
380 GinPageGetOpaque(page)->maxoff++;
381 metadata->nPendingHeapTuples++;
382
383 for (i = 0; i < collector->ntuples; i++)
384 {
385 tupsize = IndexTupleSize(collector->tuples[i]);
386 l = PageAddItem(page, collector->tuples[i], tupsize, off, false, false);
387
388 if (l == InvalidOffsetNumber)
389 elog(ERROR, "failed to add item to index page in \"%s\"",
391
392 memcpy(ptr, collector->tuples[i], tupsize);
393 ptr += tupsize;
394
395 off++;
396 }
397
398 Assert((ptr - collectordata) <= collector->sumsize);
399
400 MarkBufferDirty(buffer);
401
402 if (needWal)
403 {
406 }
407
408 metadata->tailFreeSize = PageGetExactFreeSpace(page);
409 }
410
411 /*
412 * Set pd_lower just past the end of the metadata. This is essential,
413 * because without doing so, metadata will be lost if xlog.c compresses
414 * the page. (We must do this here because pre-v11 versions of PG did not
415 * set the metapage's pd_lower correctly, so a pg_upgraded index might
416 * contain the wrong value.)
417 */
418 ((PageHeader) metapage)->pd_lower =
419 ((char *) metadata + sizeof(GinMetaPageData)) - (char *) metapage;
420
421 /*
422 * Write metabuffer, make xlog entry
423 */
425
426 if (needWal)
427 {
429
430 memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));
431
434
437
438 if (buffer != InvalidBuffer)
439 {
440 PageSetLSN(page, recptr);
441 }
442 }
443
444 if (buffer != InvalidBuffer)
445 UnlockReleaseBuffer(buffer);
446
447 /*
448 * Force pending list cleanup when it becomes too long. And,
449 * ginInsertCleanup could take significant amount of time, so we prefer to
450 * call it when it can do all the work in a single collection cycle. In
451 * non-vacuum mode, it shouldn't require maintenance_work_mem, so fire it
452 * while pending list is still small enough to fit into
453 * gin_pending_list_limit.
454 *
455 * ginInsertCleanup() should not be called inside our CRIT_SECTION.
456 */
458 if (metadata->nPendingPages * GIN_PAGE_FREESIZE > cleanupSize * (Size) 1024)
459 needCleanup = true;
460
462
464
465 /*
466 * Since it could contend with concurrent cleanup process we cleanup
467 * pending list not forcibly.
468 */
469 if (needCleanup)
470 ginInsertCleanup(ginstate, false, true, false, NULL);
471}
472
473/*
474 * Create temporary index tuples for a single indexable item (one index column
475 * for the heap tuple specified by ht_ctid), and append them to the array
476 * in *collector. They will subsequently be written out using
477 * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
478 * temp tuples for a given heap tuple must be written in one call to
479 * ginHeapTupleFastInsert.
480 */
481void
484 OffsetNumber attnum, Datum value, bool isNull,
486{
487 Datum *entries;
488 GinNullCategory *categories;
489 int32 i,
490 nentries;
491
492 /*
493 * Extract the key values that need to be inserted in the index
494 */
495 entries = ginExtractEntries(ginstate, attnum, value, isNull,
496 &nentries, &categories);
497
498 /*
499 * Protect against integer overflow in allocation calculations
500 */
501 if (nentries < 0 ||
502 collector->ntuples + nentries > MaxAllocSize / sizeof(IndexTuple))
503 elog(ERROR, "too many entries for GIN index");
504
505 /*
506 * Allocate/reallocate memory for storing collected tuples
507 */
508 if (collector->tuples == NULL)
509 {
510 /*
511 * Determine the number of elements to allocate in the tuples array
512 * initially. Make it a power of 2 to avoid wasting memory when
513 * resizing (since palloc likes powers of 2).
514 */
515 collector->lentuples = pg_nextpower2_32(Max(16, nentries));
516 collector->tuples = palloc_array(IndexTuple, collector->lentuples);
517 }
518 else if (collector->lentuples < collector->ntuples + nentries)
519 {
520 /*
521 * Advance lentuples to the next suitable power of 2. This won't
522 * overflow, though we could get to a value that exceeds
523 * MaxAllocSize/sizeof(IndexTuple), causing an error in repalloc.
524 */
525 collector->lentuples = pg_nextpower2_32(collector->ntuples + nentries);
526 collector->tuples = repalloc_array(collector->tuples,
527 IndexTuple, collector->lentuples);
528 }
529
530 /*
531 * Build an index tuple for each key value, and add to array. In pending
532 * tuples we just stick the heap TID into t_tid.
533 */
534 for (i = 0; i < nentries; i++)
535 {
536 IndexTuple itup;
537
538 itup = GinFormTuple(ginstate, attnum, entries[i], categories[i],
539 NULL, 0, 0, true);
540 itup->t_tid = *ht_ctid;
541 collector->tuples[collector->ntuples++] = itup;
542 collector->sumsize += IndexTupleSize(itup);
543 }
544}
545
546/*
547 * Deletes pending list pages up to (not including) newHead page.
548 * If newHead == InvalidBlockNumber then function drops the whole list.
549 *
550 * metapage is pinned and exclusive-locked throughout this function.
551 */
552static void
554 bool fill_fsm, IndexBulkDeleteResult *stats)
555{
557 GinMetaPageData *metadata;
559
561 metadata = GinPageGetMeta(metapage);
562 blknoToDelete = metadata->head;
563
564 do
565 {
566 Page page;
567 int i;
572
573 data.ndeleted = 0;
574 while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead)
575 {
576 freespace[data.ndeleted] = blknoToDelete;
577 buffers[data.ndeleted] = ReadBuffer(index, blknoToDelete);
578 LockBuffer(buffers[data.ndeleted], GIN_EXCLUSIVE);
579 page = BufferGetPage(buffers[data.ndeleted]);
580
581 data.ndeleted++;
582
583 Assert(!GinPageIsDeleted(page));
584
585 nDeletedHeapTuples += GinPageGetOpaque(page)->maxoff;
586 blknoToDelete = GinPageGetOpaque(page)->rightlink;
587 }
588
589 if (stats)
590 stats->pages_deleted += data.ndeleted;
591
592 /*
593 * This operation touches an unusually large number of pages, so
594 * prepare the XLogInsert machinery for that before entering the
595 * critical section.
596 */
598 XLogEnsureRecordSpace(data.ndeleted, 0);
599
601
602 metadata->head = blknoToDelete;
603
604 Assert(metadata->nPendingPages >= data.ndeleted);
605 metadata->nPendingPages -= data.ndeleted;
608
610 {
611 metadata->tail = InvalidBlockNumber;
612 metadata->tailFreeSize = 0;
613 metadata->nPendingPages = 0;
614 metadata->nPendingHeapTuples = 0;
615 }
616
617 /*
618 * Set pd_lower just past the end of the metadata. This is essential,
619 * because without doing so, metadata will be lost if xlog.c
620 * compresses the page. (We must do this here because pre-v11
621 * versions of PG did not set the metapage's pd_lower correctly, so a
622 * pg_upgraded index might contain the wrong value.)
623 */
624 ((PageHeader) metapage)->pd_lower =
625 ((char *) metadata + sizeof(GinMetaPageData)) - (char *) metapage;
626
628
629 for (i = 0; i < data.ndeleted; i++)
630 {
631 page = BufferGetPage(buffers[i]);
632 GinPageGetOpaque(page)->flags = GIN_DELETED;
633 MarkBufferDirty(buffers[i]);
634 }
635
637 {
639
643 for (i = 0; i < data.ndeleted; i++)
644 XLogRegisterBuffer(i + 1, buffers[i], REGBUF_WILL_INIT);
645
646 memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));
647
649 sizeof(ginxlogDeleteListPages));
650
653
654 for (i = 0; i < data.ndeleted; i++)
655 {
656 page = BufferGetPage(buffers[i]);
657 PageSetLSN(page, recptr);
658 }
659 }
660
661 for (i = 0; i < data.ndeleted; i++)
662 UnlockReleaseBuffer(buffers[i]);
663
665
666 for (i = 0; fill_fsm && i < data.ndeleted; i++)
667 RecordFreeIndexPage(index, freespace[i]);
668
669 } while (blknoToDelete != newHead);
670}
671
672/* Initialize empty KeyArray */
673static void
674initKeyArray(KeyArray *keys, int32 maxvalues)
675{
676 keys->keys = palloc_array(Datum, maxvalues);
677 keys->categories = palloc_array(GinNullCategory, maxvalues);
678 keys->nvalues = 0;
679 keys->maxvalues = maxvalues;
680}
681
682/* Add datum to KeyArray, resizing if needed */
683static void
684addDatum(KeyArray *keys, Datum datum, GinNullCategory category)
685{
686 if (keys->nvalues >= keys->maxvalues)
687 {
688 keys->maxvalues *= 2;
689 keys->keys = repalloc_array(keys->keys, Datum, keys->maxvalues);
691 }
692
693 keys->keys[keys->nvalues] = datum;
694 keys->categories[keys->nvalues] = category;
695 keys->nvalues++;
696}
697
698/*
699 * Collect data from a pending-list page in preparation for insertion into
700 * the main index.
701 *
702 * Go through all tuples >= startoff on page and collect values in accum
703 *
704 * Note that ka is just workspace --- it does not carry any state across
705 * calls.
706 */
707static void
710{
713 maxoff;
714 OffsetNumber attrnum;
715
716 /* reset *ka to empty */
717 ka->nvalues = 0;
718
719 maxoff = PageGetMaxOffsetNumber(page);
720 Assert(maxoff >= FirstOffsetNumber);
722 attrnum = 0;
723
724 for (i = startoff; i <= maxoff; i = OffsetNumberNext(i))
725 {
726 IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
730
731 /* Check for change of heap TID or attnum */
733
735 {
736 heapptr = itup->t_tid;
737 attrnum = curattnum;
738 }
739 else if (!(ItemPointerEquals(&heapptr, &itup->t_tid) &&
740 curattnum == attrnum))
741 {
742 /*
743 * ginInsertBAEntries can insert several datums per call, but only
744 * for one heap tuple and one column. So call it at a boundary,
745 * and reset ka.
746 */
747 ginInsertBAEntries(accum, &heapptr, attrnum,
748 ka->keys, ka->categories, ka->nvalues);
749 ka->nvalues = 0;
750 heapptr = itup->t_tid;
751 attrnum = curattnum;
752 }
753
754 /* Add key to KeyArray */
757 }
758
759 /* Dump out all remaining keys */
760 ginInsertBAEntries(accum, &heapptr, attrnum,
761 ka->keys, ka->categories, ka->nvalues);
762}
763
764/*
765 * Move tuples from pending pages into regular GIN structure.
766 *
767 * On first glance it looks completely not crash-safe. But if we crash
768 * after posting entries to the main index and before removing them from the
769 * pending list, it's okay because when we redo the posting later on, nothing
770 * bad will happen.
771 *
772 * fill_fsm indicates that ginInsertCleanup should add deleted pages
773 * to FSM otherwise caller is responsible to put deleted pages into
774 * FSM.
775 *
776 * If stats isn't null, we count deleted pending pages into the counts.
777 */
778void
779ginInsertCleanup(GinState *ginstate, bool full_clean,
780 bool fill_fsm, bool forceCleanup,
782{
783 Relation index = ginstate->index;
785 buffer;
787 page;
788 GinMetaPageData *metadata;
790 oldCtx;
791 BuildAccumulator accum;
792 KeyArray datums;
793 BlockNumber blkno,
795 bool cleanupFinish = false;
796 bool fsm_vac = false;
797 int workMemory;
798
799 /*
800 * We would like to prevent concurrent cleanup process. For that we will
801 * lock metapage in exclusive mode using LockPage() call. Nobody other
802 * will use that lock for metapage, so we keep possibility of concurrent
803 * insertion into pending list
804 */
805
806 if (forceCleanup)
807 {
808 /*
809 * We are called from [auto]vacuum/analyze or gin_clean_pending_list()
810 * and we would like to wait concurrent cleanup to finish.
811 */
813 workMemory =
816 }
817 else
818 {
819 /*
820 * We are called from regular insert and if we see concurrent cleanup
821 * just exit in hope that concurrent process will clean up pending
822 * list.
823 */
825 return;
827 }
828
832 metadata = GinPageGetMeta(metapage);
833
834 if (metadata->head == InvalidBlockNumber)
835 {
836 /* Nothing to do */
839 return;
840 }
841
842 /*
843 * Remember a tail page to prevent infinite cleanup if other backends add
844 * new tuples faster than we can cleanup.
845 */
846 blknoFinish = metadata->tail;
847
848 /*
849 * Read and lock head of pending list
850 */
851 blkno = metadata->head;
852 buffer = ReadBuffer(index, blkno);
853 LockBuffer(buffer, GIN_SHARE);
854 page = BufferGetPage(buffer);
855
857
858 /*
859 * Initialize. All temporary space will be in opCtx
860 */
862 "GIN insert cleanup temporary context",
864
866
867 initKeyArray(&datums, 128);
868 ginInitBA(&accum);
869 accum.ginstate = ginstate;
870
871 /*
872 * At the top of this loop, we have pin and lock on the current page of
873 * the pending list. However, we'll release that before exiting the loop.
874 * Note we also have pin but not lock on the metapage.
875 */
876 for (;;)
877 {
878 Assert(!GinPageIsDeleted(page));
879
880 /*
881 * Are we walk through the page which as we remember was a tail when
882 * we start our cleanup? But if caller asks us to clean up whole
883 * pending list then ignore old tail, we will work until list becomes
884 * empty.
885 */
886 if (blkno == blknoFinish && full_clean == false)
887 cleanupFinish = true;
888
889 /*
890 * read page's datums into accum
891 */
892 processPendingPage(&accum, &datums, page, FirstOffsetNumber);
893
894 vacuum_delay_point(false);
895
896 /*
897 * Is it time to flush memory to disk? Flush if we are at the end of
898 * the pending list, or if we have a full row and memory is getting
899 * full.
900 */
901 if (GinPageGetOpaque(page)->rightlink == InvalidBlockNumber ||
902 (GinPageHasFullRow(page) &&
903 accum.allocatedMemory >= workMemory * (Size) 1024))
904 {
906 uint32 nlist;
907 Datum key;
908 GinNullCategory category;
909 OffsetNumber maxoff,
910 attnum;
911
912 /*
913 * Unlock current page to increase performance. Changes of page
914 * will be checked later by comparing maxoff after completion of
915 * memory flush.
916 */
917 maxoff = PageGetMaxOffsetNumber(page);
918 LockBuffer(buffer, GIN_UNLOCK);
919
920 /*
921 * Moving collected data into regular structure can take
922 * significant amount of time - so, run it without locking pending
923 * list.
924 */
925 ginBeginBAScan(&accum);
926 while ((list = ginGetBAEntry(&accum,
927 &attnum, &key, &category, &nlist)) != NULL)
928 {
929 ginEntryInsert(ginstate, attnum, key, category,
930 list, nlist, NULL);
931 vacuum_delay_point(false);
932 }
933
934 /*
935 * Lock the whole list to remove pages
936 */
938 LockBuffer(buffer, GIN_SHARE);
939
940 Assert(!GinPageIsDeleted(page));
941
942 /*
943 * While we left the page unlocked, more stuff might have gotten
944 * added to it. If so, process those entries immediately. There
945 * shouldn't be very many, so we don't worry about the fact that
946 * we're doing this with exclusive lock. Insertion algorithm
947 * guarantees that inserted row(s) will not continue on next page.
948 * NOTE: intentionally no vacuum_delay_point in this loop.
949 */
950 if (PageGetMaxOffsetNumber(page) != maxoff)
951 {
952 ginInitBA(&accum);
953 processPendingPage(&accum, &datums, page, maxoff + 1);
954
955 ginBeginBAScan(&accum);
956 while ((list = ginGetBAEntry(&accum,
957 &attnum, &key, &category, &nlist)) != NULL)
958 ginEntryInsert(ginstate, attnum, key, category,
959 list, nlist, NULL);
960 }
961
962 /*
963 * Remember next page - it will become the new list head
964 */
965 blkno = GinPageGetOpaque(page)->rightlink;
966 UnlockReleaseBuffer(buffer); /* shiftList will do exclusive
967 * locking */
968
969 /*
970 * remove read pages from pending list, at this point all content
971 * of read pages is in regular structure
972 */
973 shiftList(index, metabuffer, blkno, fill_fsm, stats);
974
975 /* At this point, some pending pages have been freed up */
976 fsm_vac = true;
977
978 Assert(blkno == metadata->head);
980
981 /*
982 * if we removed the whole pending list or we cleanup tail (which
983 * we remembered on start our cleanup process) then just exit
984 */
985 if (blkno == InvalidBlockNumber || cleanupFinish)
986 break;
987
988 /*
989 * release memory used so far and reinit state
990 */
992 initKeyArray(&datums, datums.maxvalues);
993 ginInitBA(&accum);
994 }
995 else
996 {
997 blkno = GinPageGetOpaque(page)->rightlink;
998 UnlockReleaseBuffer(buffer);
999 }
1000
1001 /*
1002 * Read next page in pending list
1003 */
1004 vacuum_delay_point(false);
1005 buffer = ReadBuffer(index, blkno);
1006 LockBuffer(buffer, GIN_SHARE);
1007 page = BufferGetPage(buffer);
1008 }
1009
1012
1013 /*
1014 * As pending list pages can have a high churn rate, it is desirable to
1015 * recycle them immediately to the FreeSpaceMap when ordinary backends
1016 * clean the list.
1017 */
1018 if (fsm_vac && fill_fsm)
1020
1021 /* Clean up temporary space */
1024}
1025
1026/*
1027 * SQL-callable function to clean the insert pending list
1028 */
1029Datum
1031{
1032 Oid indexoid = PG_GETARG_OID(0);
1033 Relation indexRel = index_open(indexoid, RowExclusiveLock);
1035
1036 if (RecoveryInProgress())
1037 ereport(ERROR,
1039 errmsg("recovery is in progress"),
1040 errhint("GIN pending list cannot be cleaned up during recovery.")));
1041
1042 /* Must be a GIN index */
1043 if (indexRel->rd_rel->relkind != RELKIND_INDEX ||
1044 indexRel->rd_rel->relam != GIN_AM_OID)
1045 ereport(ERROR,
1047 errmsg("\"%s\" is not a GIN index",
1048 RelationGetRelationName(indexRel))));
1049
1050 /*
1051 * Reject attempts to read non-local temporary relations; we would be
1052 * likely to get wrong data since we have no visibility into the owning
1053 * session's local buffers.
1054 */
1055 if (RELATION_IS_OTHER_TEMP(indexRel))
1056 ereport(ERROR,
1058 errmsg("cannot access temporary indexes of other sessions")));
1059
1060 /* User must own the index (comparable to privileges needed for VACUUM) */
1063 RelationGetRelationName(indexRel));
1064
1065 memset(&stats, 0, sizeof(stats));
1066
1067 /*
1068 * Can't assume anything about the content of an !indisready index. Make
1069 * those a no-op, not an error, so users can just run this function on all
1070 * indexes of the access method. Since an indisready&&!indisvalid index
1071 * is merely awaiting missed aminsert calls, we're capable of processing
1072 * it. Decline to do so, out of an abundance of caution.
1073 */
1074 if (indexRel->rd_index->indisvalid)
1075 {
1076 GinState ginstate;
1077
1078 initGinState(&ginstate, indexRel);
1079 ginInsertCleanup(&ginstate, true, true, true, &stats);
1080 }
1081 else
1084 errmsg("index \"%s\" is not valid",
1085 RelationGetRelationName(indexRel))));
1086
1087 index_close(indexRel, RowExclusiveLock);
1088
1090}
@ ACLCHECK_NOT_OWNER
Definition acl.h:185
void aclcheck_error(AclResult aclerr, ObjectType objtype, const char *objectname)
Definition aclchk.c:2654
bool object_ownercheck(Oid classid, Oid objectid, Oid roleid)
Definition aclchk.c:4090
int autovacuum_work_mem
Definition autovacuum.c:120
uint32 BlockNumber
Definition block.h:31
#define InvalidBlockNumber
Definition block.h:33
int Buffer
Definition buf.h:23
#define InvalidBuffer
Definition buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition bufmgr.c:4356
void ReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5501
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5518
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3056
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition bufmgr.c:864
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:466
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:328
Size PageGetExactFreeSpace(const PageData *page)
Definition bufpage.c:957
static bool PageIsEmpty(const PageData *page)
Definition bufpage.h:223
PageHeaderData * PageHeader
Definition bufpage.h:173
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition bufpage.h:243
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition bufpage.h:353
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:390
PageData * Page
Definition bufpage.h:81
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition bufpage.h:471
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition bufpage.h:371
#define MAXALIGN(LEN)
Definition c.h:826
#define Max(x, y)
Definition c.h:991
#define Assert(condition)
Definition c.h:873
int64_t int64
Definition c.h:543
int32_t int32
Definition c.h:542
uint32_t uint32
Definition c.h:546
size_t Size
Definition c.h:619
int errhint(const char *fmt,...)
Definition elog.c:1330
int errcode(int sqlerrcode)
Definition elog.c:863
int errmsg(const char *fmt,...)
Definition elog.c:1080
#define DEBUG1
Definition elog.h:30
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
#define MaxAllocSize
Definition fe_memutils.h:22
#define repalloc_array(pointer, type, count)
Definition fe_memutils.h:78
#define palloc_array(type, count)
Definition fe_memutils.h:76
#define PG_GETARG_OID(n)
Definition fmgr.h:275
#define PG_RETURN_INT64(x)
Definition fmgr.h:370
#define PG_FUNCTION_ARGS
Definition fmgr.h:193
#define GinGetPendingListCleanupSize(relation)
Definition gin_private.h:40
#define GIN_UNLOCK
Definition gin_private.h:50
#define GIN_EXCLUSIVE
Definition gin_private.h:52
#define GIN_SHARE
Definition gin_private.h:51
#define GinListPageSize
Definition ginblock.h:327
#define GIN_METAPAGE_BLKNO
Definition ginblock.h:51
#define GinPageHasFullRow(page)
Definition ginblock.h:119
#define GinPageGetOpaque(page)
Definition ginblock.h:110
#define GIN_DELETED
Definition ginblock.h:43
#define GIN_LIST
Definition ginblock.h:45
signed char GinNullCategory
Definition ginblock.h:206
#define GinPageGetMeta(p)
Definition ginblock.h:104
#define GinPageIsDeleted(page)
Definition ginblock.h:124
#define GinPageSetFullRow(page)
Definition ginblock.h:120
void ginBeginBAScan(BuildAccumulator *accum)
Definition ginbulk.c:256
ItemPointerData * ginGetBAEntry(BuildAccumulator *accum, OffsetNumber *attnum, Datum *key, GinNullCategory *category, uint32 *n)
Definition ginbulk.c:267
void ginInsertBAEntries(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum, Datum *entries, GinNullCategory *categories, int32 nentries)
Definition ginbulk.c:209
void ginInitBA(BuildAccumulator *accum)
Definition ginbulk.c:109
IndexTuple GinFormTuple(GinState *ginstate, OffsetNumber attnum, Datum key, GinNullCategory category, Pointer data, Size dataSize, int nipd, bool errorTooBig)
#define GIN_PAGE_FREESIZE
Definition ginfast.c:41
static int32 writeListPage(Relation index, Buffer buffer, const IndexTuple *tuples, int32 ntuples, BlockNumber rightlink)
Definition ginfast.c:59
Datum gin_clean_pending_list(PG_FUNCTION_ARGS)
Definition ginfast.c:1031
void ginInsertCleanup(GinState *ginstate, bool full_clean, bool fill_fsm, bool forceCleanup, IndexBulkDeleteResult *stats)
Definition ginfast.c:780
void ginHeapTupleFastCollect(GinState *ginstate, GinTupleCollector *collector, OffsetNumber attnum, Datum value, bool isNull, ItemPointer ht_ctid)
Definition ginfast.c:483
static void processPendingPage(BuildAccumulator *accum, KeyArray *ka, Page page, OffsetNumber startoff)
Definition ginfast.c:709
static void initKeyArray(KeyArray *keys, int32 maxvalues)
Definition ginfast.c:675
static void makeSublist(Relation index, IndexTuple *tuples, int32 ntuples, GinMetaPageData *res)
Definition ginfast.c:145
static void shiftList(Relation index, Buffer metabuffer, BlockNumber newHead, bool fill_fsm, IndexBulkDeleteResult *stats)
Definition ginfast.c:554
static void addDatum(KeyArray *keys, Datum datum, GinNullCategory category)
Definition ginfast.c:685
void ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
Definition ginfast.c:219
void ginEntryInsert(GinState *ginstate, OffsetNumber attnum, Datum key, GinNullCategory category, ItemPointerData *items, uint32 nitem, GinStatsData *buildStats)
Definition gininsert.c:346
OffsetNumber gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple)
Definition ginutil.c:232
Buffer GinNewBuffer(Relation index)
Definition ginutil.c:306
void GinInitBuffer(Buffer b, uint32 f)
Definition ginutil.c:356
Datum * ginExtractEntries(GinState *ginstate, OffsetNumber attnum, Datum value, bool isNull, int32 *nentries, GinNullCategory **categories)
Definition ginutil.c:451
Datum gintuple_get_key(GinState *ginstate, IndexTuple tuple, GinNullCategory *category)
Definition ginutil.c:265
void initGinState(GinState *state, Relation index)
Definition ginutil.c:103
static MemoryContext opCtx
Definition ginxlog.c:22
#define XLOG_GIN_UPDATE_META_PAGE
Definition ginxlog.h:162
#define GIN_NDELETE_AT_ONCE
Definition ginxlog.h:205
#define XLOG_GIN_INSERT_LISTPAGE
Definition ginxlog.h:180
#define XLOG_GIN_DELETE_LISTPAGE
Definition ginxlog.h:197
int maintenance_work_mem
Definition globals.c:133
int work_mem
Definition globals.c:131
void index_close(Relation relation, LOCKMODE lockmode)
Definition indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition indexam.c:133
void IndexFreeSpaceMapVacuum(Relation rel)
Definition indexfsm.c:71
void RecordFreeIndexPage(Relation rel, BlockNumber freeBlock)
Definition indexfsm.c:52
static struct @172 value
int i
Definition isn.c:77
bool ItemPointerEquals(const ItemPointerData *pointer1, const ItemPointerData *pointer2)
Definition itemptr.c:35
static void ItemPointerSetInvalid(ItemPointerData *pointer)
Definition itemptr.h:184
static bool ItemPointerIsValid(const ItemPointerData *pointer)
Definition itemptr.h:83
IndexTupleData * IndexTuple
Definition itup.h:53
static Size IndexTupleSize(const IndexTupleData *itup)
Definition itup.h:71
bool ConditionalLockPage(Relation relation, BlockNumber blkno, LOCKMODE lockmode)
Definition lmgr.c:526
void LockPage(Relation relation, BlockNumber blkno, LOCKMODE lockmode)
Definition lmgr.c:507
void UnlockPage(Relation relation, BlockNumber blkno, LOCKMODE lockmode)
Definition lmgr.c:542
#define ExclusiveLock
Definition lockdefs.h:42
#define RowExclusiveLock
Definition lockdefs.h:38
void MemoryContextReset(MemoryContext context)
Definition mcxt.c:403
void * palloc(Size size)
Definition mcxt.c:1387
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
void MemoryContextDelete(MemoryContext context)
Definition mcxt.c:472
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
#define AmAutoVacuumWorkerProcess()
Definition miscadmin.h:383
#define START_CRIT_SECTION()
Definition miscadmin.h:150
#define END_CRIT_SECTION()
Definition miscadmin.h:152
Oid GetUserId(void)
Definition miscinit.c:469
#define InvalidOffsetNumber
Definition off.h:26
#define OffsetNumberNext(offsetNumber)
Definition off.h:52
uint16 OffsetNumber
Definition off.h:24
#define FirstOffsetNumber
Definition off.h:27
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
@ OBJECT_INDEX
int16 attnum
static uint32 pg_nextpower2_32(uint32 num)
const void * data
uint64_t Datum
Definition postgres.h:70
unsigned int Oid
void CheckForSerializableConflictIn(Relation relation, const ItemPointerData *tid, BlockNumber blkno)
Definition predicate.c:4334
static int fb(int x)
#define RelationGetRelationName(relation)
Definition rel.h:548
#define RelationNeedsWAL(relation)
Definition rel.h:637
#define RELATION_IS_OTHER_TEMP(relation)
Definition rel.h:667
GinState * ginstate
BlockNumber tail
Definition ginblock.h:62
uint32 tailFreeSize
Definition ginblock.h:67
BlockNumber nPendingPages
Definition ginblock.h:73
int64 nPendingHeapTuples
Definition ginblock.h:74
BlockNumber head
Definition ginblock.h:61
Relation index
Definition gin_private.h:60
BlockNumber pages_deleted
Definition genam.h:88
ItemPointerData t_tid
Definition itup.h:37
Datum * keys
Definition ginfast.c:46
GinNullCategory * categories
Definition ginfast.c:47
int32 nvalues
Definition ginfast.c:48
int32 maxvalues
Definition ginfast.c:49
char data[BLCKSZ]
Definition c.h:1110
Form_pg_index rd_index
Definition rel.h:192
Form_pg_class rd_rel
Definition rel.h:111
BlockNumber rightlink
Definition ginxlog.h:187
Definition type.h:96
void vacuum_delay_point(bool is_analyze)
Definition vacuum.c:2426
bool RecoveryInProgress(void)
Definition xlog.c:6460
uint64 XLogRecPtr
Definition xlogdefs.h:21
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition xloginsert.c:478
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition xloginsert.c:409
void XLogRegisterData(const void *data, uint32 len)
Definition xloginsert.c:368
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition xloginsert.c:245
void XLogBeginInsert(void)
Definition xloginsert.c:152
void XLogEnsureRecordSpace(int max_block_id, int ndatas)
Definition xloginsert.c:178
#define REGBUF_STANDARD
Definition xloginsert.h:35
#define REGBUF_WILL_INIT
Definition xloginsert.h:34

Typedef Documentation

◆ KeyArray

Function Documentation

◆ addDatum()

static void addDatum ( KeyArray keys,
Datum  datum,
GinNullCategory  category 
)
static

Definition at line 685 of file ginfast.c.

686{
687 if (keys->nvalues >= keys->maxvalues)
688 {
689 keys->maxvalues *= 2;
690 keys->keys = repalloc_array(keys->keys, Datum, keys->maxvalues);
692 }
693
694 keys->keys[keys->nvalues] = datum;
695 keys->categories[keys->nvalues] = category;
696 keys->nvalues++;
697}

References KeyArray::categories, KeyArray::keys, KeyArray::maxvalues, KeyArray::nvalues, and repalloc_array.

Referenced by processPendingPage().

◆ gin_clean_pending_list()

Datum gin_clean_pending_list ( PG_FUNCTION_ARGS  )

Definition at line 1031 of file ginfast.c.

1032{
1033 Oid indexoid = PG_GETARG_OID(0);
1034 Relation indexRel = index_open(indexoid, RowExclusiveLock);
1036
1037 if (RecoveryInProgress())
1038 ereport(ERROR,
1040 errmsg("recovery is in progress"),
1041 errhint("GIN pending list cannot be cleaned up during recovery.")));
1042
1043 /* Must be a GIN index */
1044 if (indexRel->rd_rel->relkind != RELKIND_INDEX ||
1045 indexRel->rd_rel->relam != GIN_AM_OID)
1046 ereport(ERROR,
1048 errmsg("\"%s\" is not a GIN index",
1049 RelationGetRelationName(indexRel))));
1050
1051 /*
1052 * Reject attempts to read non-local temporary relations; we would be
1053 * likely to get wrong data since we have no visibility into the owning
1054 * session's local buffers.
1055 */
1056 if (RELATION_IS_OTHER_TEMP(indexRel))
1057 ereport(ERROR,
1059 errmsg("cannot access temporary indexes of other sessions")));
1060
1061 /* User must own the index (comparable to privileges needed for VACUUM) */
1064 RelationGetRelationName(indexRel));
1065
1066 memset(&stats, 0, sizeof(stats));
1067
1068 /*
1069 * Can't assume anything about the content of an !indisready index. Make
1070 * those a no-op, not an error, so users can just run this function on all
1071 * indexes of the access method. Since an indisready&&!indisvalid index
1072 * is merely awaiting missed aminsert calls, we're capable of processing
1073 * it. Decline to do so, out of an abundance of caution.
1074 */
1075 if (indexRel->rd_index->indisvalid)
1076 {
1077 GinState ginstate;
1078
1079 initGinState(&ginstate, indexRel);
1080 ginInsertCleanup(&ginstate, true, true, true, &stats);
1081 }
1082 else
1085 errmsg("index \"%s\" is not valid",
1086 RelationGetRelationName(indexRel))));
1087
1088 index_close(indexRel, RowExclusiveLock);
1089
1091}

References aclcheck_error(), ACLCHECK_NOT_OWNER, DEBUG1, ereport, errcode(), errhint(), errmsg(), ERROR, fb(), GetUserId(), ginInsertCleanup(), index_close(), index_open(), initGinState(), OBJECT_INDEX, object_ownercheck(), IndexBulkDeleteResult::pages_deleted, PG_GETARG_OID, PG_RETURN_INT64, RelationData::rd_index, RelationData::rd_rel, RecoveryInProgress(), RELATION_IS_OTHER_TEMP, RelationGetRelationName, and RowExclusiveLock.

◆ ginHeapTupleFastCollect()

void ginHeapTupleFastCollect ( GinState ginstate,
GinTupleCollector collector,
OffsetNumber  attnum,
Datum  value,
bool  isNull,
ItemPointer  ht_ctid 
)

Definition at line 483 of file ginfast.c.

487{
488 Datum *entries;
489 GinNullCategory *categories;
490 int32 i,
491 nentries;
492
493 /*
494 * Extract the key values that need to be inserted in the index
495 */
496 entries = ginExtractEntries(ginstate, attnum, value, isNull,
497 &nentries, &categories);
498
499 /*
500 * Protect against integer overflow in allocation calculations
501 */
502 if (nentries < 0 ||
503 collector->ntuples + nentries > MaxAllocSize / sizeof(IndexTuple))
504 elog(ERROR, "too many entries for GIN index");
505
506 /*
507 * Allocate/reallocate memory for storing collected tuples
508 */
509 if (collector->tuples == NULL)
510 {
511 /*
512 * Determine the number of elements to allocate in the tuples array
513 * initially. Make it a power of 2 to avoid wasting memory when
514 * resizing (since palloc likes powers of 2).
515 */
516 collector->lentuples = pg_nextpower2_32(Max(16, nentries));
517 collector->tuples = palloc_array(IndexTuple, collector->lentuples);
518 }
519 else if (collector->lentuples < collector->ntuples + nentries)
520 {
521 /*
522 * Advance lentuples to the next suitable power of 2. This won't
523 * overflow, though we could get to a value that exceeds
524 * MaxAllocSize/sizeof(IndexTuple), causing an error in repalloc.
525 */
526 collector->lentuples = pg_nextpower2_32(collector->ntuples + nentries);
527 collector->tuples = repalloc_array(collector->tuples,
528 IndexTuple, collector->lentuples);
529 }
530
531 /*
532 * Build an index tuple for each key value, and add to array. In pending
533 * tuples we just stick the heap TID into t_tid.
534 */
535 for (i = 0; i < nentries; i++)
536 {
537 IndexTuple itup;
538
539 itup = GinFormTuple(ginstate, attnum, entries[i], categories[i],
540 NULL, 0, 0, true);
541 itup->t_tid = *ht_ctid;
542 collector->tuples[collector->ntuples++] = itup;
543 collector->sumsize += IndexTupleSize(itup);
544 }
545}

References attnum, elog, ERROR, fb(), ginExtractEntries(), GinFormTuple(), i, IndexTupleSize(), Max, MaxAllocSize, palloc_array, pg_nextpower2_32(), repalloc_array, IndexTupleData::t_tid, and value.

Referenced by gininsert().

◆ ginHeapTupleFastInsert()

void ginHeapTupleFastInsert ( GinState ginstate,
GinTupleCollector collector 
)

Definition at line 219 of file ginfast.c.

220{
221 Relation index = ginstate->index;
224 GinMetaPageData *metadata = NULL;
225 Buffer buffer = InvalidBuffer;
226 Page page = NULL;
228 bool separateList = false;
229 bool needCleanup = false;
230 int cleanupSize;
231 bool needWal;
232
233 if (collector->ntuples == 0)
234 return;
235
237
238 data.locator = index->rd_locator;
239 data.ntuples = 0;
240 data.newRightlink = data.prevTail = InvalidBlockNumber;
241
244
245 /*
246 * An insertion to the pending list could logically belong anywhere in the
247 * tree, so it conflicts with all serializable scans. All scans acquire a
248 * predicate lock on the metabuffer to represent that. Therefore we'll
249 * check for conflicts in, but not until we have the page locked and are
250 * ready to modify the page.
251 */
252
253 if (collector->sumsize + collector->ntuples * sizeof(ItemIdData) > GinListPageSize)
254 {
255 /*
256 * Total size is greater than one page => make sublist
257 */
258 separateList = true;
259 }
260 else
261 {
263 metadata = GinPageGetMeta(metapage);
264
265 if (metadata->head == InvalidBlockNumber ||
266 collector->sumsize + collector->ntuples * sizeof(ItemIdData) > metadata->tailFreeSize)
267 {
268 /*
269 * Pending list is empty or total size is greater than freespace
270 * on tail page => make sublist
271 *
272 * We unlock metabuffer to keep high concurrency
273 */
274 separateList = true;
276 }
277 }
278
279 if (separateList)
280 {
281 /*
282 * We should make sublist separately and append it to the tail
283 */
285
286 memset(&sublist, 0, sizeof(GinMetaPageData));
287 makeSublist(index, collector->tuples, collector->ntuples, &sublist);
288
289 /*
290 * metapage was unlocked, see above
291 */
293 metadata = GinPageGetMeta(metapage);
294
296
297 if (metadata->head == InvalidBlockNumber)
298 {
299 /*
300 * Main list is empty, so just insert sublist as main list
301 */
303
304 metadata->head = sublist.head;
305 metadata->tail = sublist.tail;
306 metadata->tailFreeSize = sublist.tailFreeSize;
307
308 metadata->nPendingPages = sublist.nPendingPages;
309 metadata->nPendingHeapTuples = sublist.nPendingHeapTuples;
310
311 if (needWal)
313 }
314 else
315 {
316 /*
317 * Merge lists
318 */
319 data.prevTail = metadata->tail;
320 data.newRightlink = sublist.head;
321
322 buffer = ReadBuffer(index, metadata->tail);
323 LockBuffer(buffer, GIN_EXCLUSIVE);
324 page = BufferGetPage(buffer);
325
326 Assert(GinPageGetOpaque(page)->rightlink == InvalidBlockNumber);
327
329
330 GinPageGetOpaque(page)->rightlink = sublist.head;
331
332 MarkBufferDirty(buffer);
333
334 metadata->tail = sublist.tail;
335 metadata->tailFreeSize = sublist.tailFreeSize;
336
337 metadata->nPendingPages += sublist.nPendingPages;
338 metadata->nPendingHeapTuples += sublist.nPendingHeapTuples;
339
340 if (needWal)
341 {
344 }
345 }
346 }
347 else
348 {
349 /*
350 * Insert into tail page. Metapage is already locked
351 */
352 OffsetNumber l,
353 off;
354 int i,
355 tupsize;
356 char *ptr;
357 char *collectordata;
358
360
361 buffer = ReadBuffer(index, metadata->tail);
362 LockBuffer(buffer, GIN_EXCLUSIVE);
363 page = BufferGetPage(buffer);
364
365 off = (PageIsEmpty(page)) ? FirstOffsetNumber :
367
368 collectordata = ptr = (char *) palloc(collector->sumsize);
369
370 data.ntuples = collector->ntuples;
371
373
374 if (needWal)
376
377 /*
378 * Increase counter of heap tuples
379 */
380 Assert(GinPageGetOpaque(page)->maxoff <= metadata->nPendingHeapTuples);
381 GinPageGetOpaque(page)->maxoff++;
382 metadata->nPendingHeapTuples++;
383
384 for (i = 0; i < collector->ntuples; i++)
385 {
386 tupsize = IndexTupleSize(collector->tuples[i]);
387 l = PageAddItem(page, collector->tuples[i], tupsize, off, false, false);
388
389 if (l == InvalidOffsetNumber)
390 elog(ERROR, "failed to add item to index page in \"%s\"",
392
393 memcpy(ptr, collector->tuples[i], tupsize);
394 ptr += tupsize;
395
396 off++;
397 }
398
399 Assert((ptr - collectordata) <= collector->sumsize);
400
401 MarkBufferDirty(buffer);
402
403 if (needWal)
404 {
407 }
408
409 metadata->tailFreeSize = PageGetExactFreeSpace(page);
410 }
411
412 /*
413 * Set pd_lower just past the end of the metadata. This is essential,
414 * because without doing so, metadata will be lost if xlog.c compresses
415 * the page. (We must do this here because pre-v11 versions of PG did not
416 * set the metapage's pd_lower correctly, so a pg_upgraded index might
417 * contain the wrong value.)
418 */
419 ((PageHeader) metapage)->pd_lower =
420 ((char *) metadata + sizeof(GinMetaPageData)) - (char *) metapage;
421
422 /*
423 * Write metabuffer, make xlog entry
424 */
426
427 if (needWal)
428 {
430
431 memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));
432
435
438
439 if (buffer != InvalidBuffer)
440 {
441 PageSetLSN(page, recptr);
442 }
443 }
444
445 if (buffer != InvalidBuffer)
446 UnlockReleaseBuffer(buffer);
447
448 /*
449 * Force pending list cleanup when it becomes too long. And,
450 * ginInsertCleanup could take significant amount of time, so we prefer to
451 * call it when it can do all the work in a single collection cycle. In
452 * non-vacuum mode, it shouldn't require maintenance_work_mem, so fire it
453 * while pending list is still small enough to fit into
454 * gin_pending_list_limit.
455 *
456 * ginInsertCleanup() should not be called inside our CRIT_SECTION.
457 */
459 if (metadata->nPendingPages * GIN_PAGE_FREESIZE > cleanupSize * (Size) 1024)
460 needCleanup = true;
461
463
465
466 /*
467 * Since it could contend with concurrent cleanup process we cleanup
468 * pending list not forcibly.
469 */
470 if (needCleanup)
471 ginInsertCleanup(ginstate, false, true, false, NULL);
472}

References Assert, BufferGetPage(), CheckForSerializableConflictIn(), data, elog, END_CRIT_SECTION, ERROR, fb(), FirstOffsetNumber, GIN_EXCLUSIVE, GIN_METAPAGE_BLKNO, GIN_PAGE_FREESIZE, GIN_UNLOCK, GinGetPendingListCleanupSize, ginInsertCleanup(), GinListPageSize, GinPageGetMeta, GinPageGetOpaque, GinMetaPageData::head, i, GinState::index, IndexTupleSize(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, LockBuffer(), makeSublist(), MarkBufferDirty(), GinMetaPageData::nPendingHeapTuples, GinMetaPageData::nPendingPages, OffsetNumberNext, PageAddItem, PageGetExactFreeSpace(), PageGetMaxOffsetNumber(), PageIsEmpty(), PageSetLSN(), palloc(), ReadBuffer(), REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetRelationName, RelationNeedsWAL, START_CRIT_SECTION, GinMetaPageData::tail, GinMetaPageData::tailFreeSize, UnlockReleaseBuffer(), XLOG_GIN_UPDATE_META_PAGE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by gininsert().

◆ ginInsertCleanup()

void ginInsertCleanup ( GinState ginstate,
bool  full_clean,
bool  fill_fsm,
bool  forceCleanup,
IndexBulkDeleteResult stats 
)

Definition at line 780 of file ginfast.c.

783{
784 Relation index = ginstate->index;
786 buffer;
788 page;
789 GinMetaPageData *metadata;
791 oldCtx;
792 BuildAccumulator accum;
793 KeyArray datums;
794 BlockNumber blkno,
796 bool cleanupFinish = false;
797 bool fsm_vac = false;
798 int workMemory;
799
800 /*
801 * We would like to prevent concurrent cleanup process. For that we will
802 * lock metapage in exclusive mode using LockPage() call. Nobody other
803 * will use that lock for metapage, so we keep possibility of concurrent
804 * insertion into pending list
805 */
806
807 if (forceCleanup)
808 {
809 /*
810 * We are called from [auto]vacuum/analyze or gin_clean_pending_list()
811 * and we would like to wait concurrent cleanup to finish.
812 */
814 workMemory =
817 }
818 else
819 {
820 /*
821 * We are called from regular insert and if we see concurrent cleanup
822 * just exit in hope that concurrent process will clean up pending
823 * list.
824 */
826 return;
828 }
829
833 metadata = GinPageGetMeta(metapage);
834
835 if (metadata->head == InvalidBlockNumber)
836 {
837 /* Nothing to do */
840 return;
841 }
842
843 /*
844 * Remember a tail page to prevent infinite cleanup if other backends add
845 * new tuples faster than we can cleanup.
846 */
847 blknoFinish = metadata->tail;
848
849 /*
850 * Read and lock head of pending list
851 */
852 blkno = metadata->head;
853 buffer = ReadBuffer(index, blkno);
854 LockBuffer(buffer, GIN_SHARE);
855 page = BufferGetPage(buffer);
856
858
859 /*
860 * Initialize. All temporary space will be in opCtx
861 */
863 "GIN insert cleanup temporary context",
865
867
868 initKeyArray(&datums, 128);
869 ginInitBA(&accum);
870 accum.ginstate = ginstate;
871
872 /*
873 * At the top of this loop, we have pin and lock on the current page of
874 * the pending list. However, we'll release that before exiting the loop.
875 * Note we also have pin but not lock on the metapage.
876 */
877 for (;;)
878 {
879 Assert(!GinPageIsDeleted(page));
880
881 /*
882 * Are we walk through the page which as we remember was a tail when
883 * we start our cleanup? But if caller asks us to clean up whole
884 * pending list then ignore old tail, we will work until list becomes
885 * empty.
886 */
887 if (blkno == blknoFinish && full_clean == false)
888 cleanupFinish = true;
889
890 /*
891 * read page's datums into accum
892 */
893 processPendingPage(&accum, &datums, page, FirstOffsetNumber);
894
895 vacuum_delay_point(false);
896
897 /*
898 * Is it time to flush memory to disk? Flush if we are at the end of
899 * the pending list, or if we have a full row and memory is getting
900 * full.
901 */
902 if (GinPageGetOpaque(page)->rightlink == InvalidBlockNumber ||
903 (GinPageHasFullRow(page) &&
904 accum.allocatedMemory >= workMemory * (Size) 1024))
905 {
907 uint32 nlist;
908 Datum key;
909 GinNullCategory category;
910 OffsetNumber maxoff,
911 attnum;
912
913 /*
914 * Unlock current page to increase performance. Changes of page
915 * will be checked later by comparing maxoff after completion of
916 * memory flush.
917 */
918 maxoff = PageGetMaxOffsetNumber(page);
919 LockBuffer(buffer, GIN_UNLOCK);
920
921 /*
922 * Moving collected data into regular structure can take
923 * significant amount of time - so, run it without locking pending
924 * list.
925 */
926 ginBeginBAScan(&accum);
927 while ((list = ginGetBAEntry(&accum,
928 &attnum, &key, &category, &nlist)) != NULL)
929 {
930 ginEntryInsert(ginstate, attnum, key, category,
931 list, nlist, NULL);
932 vacuum_delay_point(false);
933 }
934
935 /*
936 * Lock the whole list to remove pages
937 */
939 LockBuffer(buffer, GIN_SHARE);
940
941 Assert(!GinPageIsDeleted(page));
942
943 /*
944 * While we left the page unlocked, more stuff might have gotten
945 * added to it. If so, process those entries immediately. There
946 * shouldn't be very many, so we don't worry about the fact that
947 * we're doing this with exclusive lock. Insertion algorithm
948 * guarantees that inserted row(s) will not continue on next page.
949 * NOTE: intentionally no vacuum_delay_point in this loop.
950 */
951 if (PageGetMaxOffsetNumber(page) != maxoff)
952 {
953 ginInitBA(&accum);
954 processPendingPage(&accum, &datums, page, maxoff + 1);
955
956 ginBeginBAScan(&accum);
957 while ((list = ginGetBAEntry(&accum,
958 &attnum, &key, &category, &nlist)) != NULL)
959 ginEntryInsert(ginstate, attnum, key, category,
960 list, nlist, NULL);
961 }
962
963 /*
964 * Remember next page - it will become the new list head
965 */
966 blkno = GinPageGetOpaque(page)->rightlink;
967 UnlockReleaseBuffer(buffer); /* shiftList will do exclusive
968 * locking */
969
970 /*
971 * remove read pages from pending list, at this point all content
972 * of read pages is in regular structure
973 */
974 shiftList(index, metabuffer, blkno, fill_fsm, stats);
975
976 /* At this point, some pending pages have been freed up */
977 fsm_vac = true;
978
979 Assert(blkno == metadata->head);
981
982 /*
983 * if we removed the whole pending list or we cleanup tail (which
984 * we remembered on start our cleanup process) then just exit
985 */
986 if (blkno == InvalidBlockNumber || cleanupFinish)
987 break;
988
989 /*
990 * release memory used so far and reinit state
991 */
993 initKeyArray(&datums, datums.maxvalues);
994 ginInitBA(&accum);
995 }
996 else
997 {
998 blkno = GinPageGetOpaque(page)->rightlink;
999 UnlockReleaseBuffer(buffer);
1000 }
1001
1002 /*
1003 * Read next page in pending list
1004 */
1005 vacuum_delay_point(false);
1006 buffer = ReadBuffer(index, blkno);
1007 LockBuffer(buffer, GIN_SHARE);
1008 page = BufferGetPage(buffer);
1009 }
1010
1013
1014 /*
1015 * As pending list pages can have a high churn rate, it is desirable to
1016 * recycle them immediately to the FreeSpaceMap when ordinary backends
1017 * clean the list.
1018 */
1019 if (fsm_vac && fill_fsm)
1021
1022 /* Clean up temporary space */
1025}

References BuildAccumulator::allocatedMemory, ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, AmAutoVacuumWorkerProcess, Assert, attnum, autovacuum_work_mem, BufferGetPage(), ConditionalLockPage(), CurrentMemoryContext, ExclusiveLock, fb(), FirstOffsetNumber, GIN_EXCLUSIVE, GIN_METAPAGE_BLKNO, GIN_SHARE, GIN_UNLOCK, ginBeginBAScan(), ginEntryInsert(), ginGetBAEntry(), ginInitBA(), GinPageGetMeta, GinPageGetOpaque, GinPageHasFullRow, GinPageIsDeleted, BuildAccumulator::ginstate, GinMetaPageData::head, GinState::index, IndexFreeSpaceMapVacuum(), initKeyArray(), InvalidBlockNumber, LockBuffer(), LockPage(), maintenance_work_mem, KeyArray::maxvalues, MemoryContextDelete(), MemoryContextReset(), MemoryContextSwitchTo(), opCtx, PageGetMaxOffsetNumber(), processPendingPage(), ReadBuffer(), ReleaseBuffer(), shiftList(), GinMetaPageData::tail, UnlockPage(), UnlockReleaseBuffer(), vacuum_delay_point(), and work_mem.

Referenced by gin_clean_pending_list(), ginbulkdelete(), ginHeapTupleFastInsert(), and ginvacuumcleanup().

◆ initKeyArray()

static void initKeyArray ( KeyArray keys,
int32  maxvalues 
)
static

Definition at line 675 of file ginfast.c.

676{
677 keys->keys = palloc_array(Datum, maxvalues);
678 keys->categories = palloc_array(GinNullCategory, maxvalues);
679 keys->nvalues = 0;
680 keys->maxvalues = maxvalues;
681}

References KeyArray::categories, KeyArray::keys, KeyArray::maxvalues, KeyArray::nvalues, and palloc_array.

Referenced by ginInsertCleanup().

◆ makeSublist()

static void makeSublist ( Relation  index,
IndexTuple tuples,
int32  ntuples,
GinMetaPageData res 
)
static

Definition at line 145 of file ginfast.c.

147{
150 int i,
151 size = 0,
152 tupsize;
153 int startTuple = 0;
154
155 Assert(ntuples > 0);
156
157 /*
158 * Split tuples into pages
159 */
160 for (i = 0; i < ntuples; i++)
161 {
163 {
165
167 {
168 res->nPendingPages++;
170 tuples + startTuple,
171 i - startTuple,
173 }
174 else
175 {
177 }
178
180 startTuple = i;
181 size = 0;
182 }
183
184 tupsize = MAXALIGN(IndexTupleSize(tuples[i])) + sizeof(ItemIdData);
185
186 if (size + tupsize > GinListPageSize)
187 {
188 /* won't fit, force a new page and reprocess */
189 i--;
191 }
192 else
193 {
194 size += tupsize;
195 }
196 }
197
198 /*
199 * Write last page
200 */
203 tuples + startTuple,
204 ntuples - startTuple,
206 res->nPendingPages++;
207 /* that was only one heap tuple */
208 res->nPendingHeapTuples = 1;
209}

References Assert, BufferGetBlockNumber(), fb(), GinListPageSize, GinNewBuffer(), GinMetaPageData::head, i, IndexTupleSize(), InvalidBlockNumber, InvalidBuffer, MAXALIGN, GinMetaPageData::nPendingHeapTuples, GinMetaPageData::nPendingPages, GinMetaPageData::tail, GinMetaPageData::tailFreeSize, and writeListPage().

Referenced by ginHeapTupleFastInsert().

◆ processPendingPage()

static void processPendingPage ( BuildAccumulator accum,
KeyArray ka,
Page  page,
OffsetNumber  startoff 
)
static

Definition at line 709 of file ginfast.c.

711{
714 maxoff;
715 OffsetNumber attrnum;
716
717 /* reset *ka to empty */
718 ka->nvalues = 0;
719
720 maxoff = PageGetMaxOffsetNumber(page);
721 Assert(maxoff >= FirstOffsetNumber);
723 attrnum = 0;
724
725 for (i = startoff; i <= maxoff; i = OffsetNumberNext(i))
726 {
727 IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
731
732 /* Check for change of heap TID or attnum */
734
736 {
737 heapptr = itup->t_tid;
738 attrnum = curattnum;
739 }
740 else if (!(ItemPointerEquals(&heapptr, &itup->t_tid) &&
741 curattnum == attrnum))
742 {
743 /*
744 * ginInsertBAEntries can insert several datums per call, but only
745 * for one heap tuple and one column. So call it at a boundary,
746 * and reset ka.
747 */
748 ginInsertBAEntries(accum, &heapptr, attrnum,
749 ka->keys, ka->categories, ka->nvalues);
750 ka->nvalues = 0;
751 heapptr = itup->t_tid;
752 attrnum = curattnum;
753 }
754
755 /* Add key to KeyArray */
758 }
759
760 /* Dump out all remaining keys */
761 ginInsertBAEntries(accum, &heapptr, attrnum,
762 ka->keys, ka->categories, ka->nvalues);
763}

References addDatum(), Assert, fb(), FirstOffsetNumber, ginInsertBAEntries(), BuildAccumulator::ginstate, gintuple_get_attrnum(), gintuple_get_key(), i, ItemPointerEquals(), ItemPointerIsValid(), ItemPointerSetInvalid(), OffsetNumberNext, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), and IndexTupleData::t_tid.

Referenced by ginInsertCleanup().

◆ shiftList()

static void shiftList ( Relation  index,
Buffer  metabuffer,
BlockNumber  newHead,
bool  fill_fsm,
IndexBulkDeleteResult stats 
)
static

Definition at line 554 of file ginfast.c.

556{
558 GinMetaPageData *metadata;
560
562 metadata = GinPageGetMeta(metapage);
563 blknoToDelete = metadata->head;
564
565 do
566 {
567 Page page;
568 int i;
573
574 data.ndeleted = 0;
575 while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead)
576 {
577 freespace[data.ndeleted] = blknoToDelete;
578 buffers[data.ndeleted] = ReadBuffer(index, blknoToDelete);
579 LockBuffer(buffers[data.ndeleted], GIN_EXCLUSIVE);
580 page = BufferGetPage(buffers[data.ndeleted]);
581
582 data.ndeleted++;
583
584 Assert(!GinPageIsDeleted(page));
585
586 nDeletedHeapTuples += GinPageGetOpaque(page)->maxoff;
587 blknoToDelete = GinPageGetOpaque(page)->rightlink;
588 }
589
590 if (stats)
591 stats->pages_deleted += data.ndeleted;
592
593 /*
594 * This operation touches an unusually large number of pages, so
595 * prepare the XLogInsert machinery for that before entering the
596 * critical section.
597 */
599 XLogEnsureRecordSpace(data.ndeleted, 0);
600
602
603 metadata->head = blknoToDelete;
604
605 Assert(metadata->nPendingPages >= data.ndeleted);
606 metadata->nPendingPages -= data.ndeleted;
609
611 {
612 metadata->tail = InvalidBlockNumber;
613 metadata->tailFreeSize = 0;
614 metadata->nPendingPages = 0;
615 metadata->nPendingHeapTuples = 0;
616 }
617
618 /*
619 * Set pd_lower just past the end of the metadata. This is essential,
620 * because without doing so, metadata will be lost if xlog.c
621 * compresses the page. (We must do this here because pre-v11
622 * versions of PG did not set the metapage's pd_lower correctly, so a
623 * pg_upgraded index might contain the wrong value.)
624 */
625 ((PageHeader) metapage)->pd_lower =
626 ((char *) metadata + sizeof(GinMetaPageData)) - (char *) metapage;
627
629
630 for (i = 0; i < data.ndeleted; i++)
631 {
632 page = BufferGetPage(buffers[i]);
633 GinPageGetOpaque(page)->flags = GIN_DELETED;
634 MarkBufferDirty(buffers[i]);
635 }
636
638 {
640
644 for (i = 0; i < data.ndeleted; i++)
645 XLogRegisterBuffer(i + 1, buffers[i], REGBUF_WILL_INIT);
646
647 memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));
648
650 sizeof(ginxlogDeleteListPages));
651
654
655 for (i = 0; i < data.ndeleted; i++)
656 {
657 page = BufferGetPage(buffers[i]);
658 PageSetLSN(page, recptr);
659 }
660 }
661
662 for (i = 0; i < data.ndeleted; i++)
663 UnlockReleaseBuffer(buffers[i]);
664
666
667 for (i = 0; fill_fsm && i < data.ndeleted; i++)
668 RecordFreeIndexPage(index, freespace[i]);
669
670 } while (blknoToDelete != newHead);
671}

References Assert, BufferGetPage(), data, END_CRIT_SECTION, fb(), GIN_DELETED, GIN_EXCLUSIVE, GIN_NDELETE_AT_ONCE, GinPageGetMeta, GinPageGetOpaque, GinPageIsDeleted, GinMetaPageData::head, i, InvalidBlockNumber, LockBuffer(), MarkBufferDirty(), ginxlogDeleteListPages::ndeleted, GinMetaPageData::nPendingHeapTuples, GinMetaPageData::nPendingPages, IndexBulkDeleteResult::pages_deleted, PageSetLSN(), ReadBuffer(), RecordFreeIndexPage(), REGBUF_STANDARD, REGBUF_WILL_INIT, RelationNeedsWAL, START_CRIT_SECTION, GinMetaPageData::tail, GinMetaPageData::tailFreeSize, UnlockReleaseBuffer(), XLOG_GIN_DELETE_LISTPAGE, XLogBeginInsert(), XLogEnsureRecordSpace(), XLogInsert(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by ginInsertCleanup().

◆ writeListPage()

static int32 writeListPage ( Relation  index,
Buffer  buffer,
const IndexTuple tuples,
int32  ntuples,
BlockNumber  rightlink 
)
static

Definition at line 59 of file ginfast.c.

61{
62 Page page = BufferGetPage(buffer);
63 int32 i,
65 size = 0;
67 off;
68 PGAlignedBlock workspace;
69 char *ptr;
70
72
73 GinInitBuffer(buffer, GIN_LIST);
74
76 ptr = workspace.data;
77
78 for (i = 0; i < ntuples; i++)
79 {
80 int this_size = IndexTupleSize(tuples[i]);
81
82 memcpy(ptr, tuples[i], this_size);
83 ptr += this_size;
84 size += this_size;
85
86 l = PageAddItem(page, tuples[i], this_size, off, false, false);
87
88 if (l == InvalidOffsetNumber)
89 elog(ERROR, "failed to add item to index page in \"%s\"",
91
92 off++;
93 }
94
95 Assert(size <= BLCKSZ); /* else we overran workspace */
96
97 GinPageGetOpaque(page)->rightlink = rightlink;
98
99 /*
100 * tail page may contain only whole row(s) or final part of row placed on
101 * previous pages (a "row" here meaning all the index tuples generated for
102 * one heap tuple)
103 */
104 if (rightlink == InvalidBlockNumber)
105 {
106 GinPageSetFullRow(page);
107 GinPageGetOpaque(page)->maxoff = 1;
108 }
109 else
110 {
111 GinPageGetOpaque(page)->maxoff = 0;
112 }
113
114 MarkBufferDirty(buffer);
115
117 {
120
121 data.rightlink = rightlink;
122 data.ntuples = ntuples;
123
126
128 XLogRegisterBufData(0, workspace.data, size);
129
131 PageSetLSN(page, recptr);
132 }
133
134 /* get free space before releasing buffer */
136
137 UnlockReleaseBuffer(buffer);
138
140
141 return freesize;
142}

References Assert, BufferGetPage(), PGAlignedBlock::data, data, elog, END_CRIT_SECTION, ERROR, fb(), FirstOffsetNumber, GIN_LIST, GinInitBuffer(), GinPageGetOpaque, GinPageSetFullRow, i, IndexTupleSize(), InvalidBlockNumber, InvalidOffsetNumber, MarkBufferDirty(), PageAddItem, PageGetExactFreeSpace(), PageSetLSN(), REGBUF_WILL_INIT, RelationGetRelationName, RelationNeedsWAL, ginxlogInsertListPage::rightlink, START_CRIT_SECTION, UnlockReleaseBuffer(), XLOG_GIN_INSERT_LISTPAGE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by makeSublist().

Variable Documentation

◆ gin_pending_list_limit

int gin_pending_list_limit = 0

Definition at line 39 of file ginfast.c.