PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
localbuf.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * localbuf.c
4 * local buffer manager. Fast buffer manager for temporary tables,
5 * which never need to be WAL-logged or checkpointed, etc.
6 *
7 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994-5, Regents of the University of California
9 *
10 *
11 * IDENTIFICATION
12 * src/backend/storage/buffer/localbuf.c
13 *
14 *-------------------------------------------------------------------------
15 */
16#include "postgres.h"
17
18#include "access/parallel.h"
19#include "executor/instrument.h"
20#include "pgstat.h"
22#include "storage/bufmgr.h"
23#include "storage/fd.h"
24#include "utils/guc_hooks.h"
25#include "utils/memutils.h"
26#include "utils/resowner.h"
27
28
29/*#define LBDEBUG*/
30
31/* entry for buffer lookup hashtable */
32typedef struct
33{
34 BufferTag key; /* Tag of a disk page */
35 int id; /* Associated local buffer's index */
37
38/* Note: this macro only works on local buffers, not shared ones! */
39#define LocalBufHdrGetBlock(bufHdr) \
40 LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
41
42int NLocBuffer = 0; /* until buffers are initialized */
43
47
48static int nextFreeLocalBufId = 0;
49
50static HTAB *LocalBufHash = NULL;
51
52/* number of local buffers pinned at least once */
53static int NLocalPinnedBuffers = 0;
54
55
56static void InitLocalBuffers(void);
57static Block GetLocalBufferStorage(void);
58static Buffer GetLocalVictimBuffer(void);
59
60
61/*
62 * PrefetchLocalBuffer -
63 * initiate asynchronous read of a block of a relation
64 *
65 * Do PrefetchBuffer's work for temporary relations.
66 * No-op if prefetching isn't compiled in.
67 */
70 BlockNumber blockNum)
71{
72 PrefetchBufferResult result = {InvalidBuffer, false};
73 BufferTag newTag; /* identity of requested block */
74 LocalBufferLookupEnt *hresult;
75
76 InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
77
78 /* Initialize local buffers if first request in this session */
79 if (LocalBufHash == NULL)
81
82 /* See if the desired buffer already exists */
83 hresult = (LocalBufferLookupEnt *)
84 hash_search(LocalBufHash, &newTag, HASH_FIND, NULL);
85
86 if (hresult)
87 {
88 /* Yes, so nothing to do */
89 result.recent_buffer = -hresult->id - 1;
90 }
91 else
92 {
93#ifdef USE_PREFETCH
94 /* Not in buffers, so initiate prefetch */
95 if ((io_direct_flags & IO_DIRECT_DATA) == 0 &&
96 smgrprefetch(smgr, forkNum, blockNum, 1))
97 {
98 result.initiated_io = true;
99 }
100#endif /* USE_PREFETCH */
101 }
102
103 return result;
104}
105
106
107/*
108 * LocalBufferAlloc -
109 * Find or create a local buffer for the given page of the given relation.
110 *
111 * API is similar to bufmgr.c's BufferAlloc, except that we do not need to do
112 * any locking since this is all local. We support only default access
113 * strategy (hence, usage_count is always advanced).
114 */
117 bool *foundPtr)
118{
119 BufferTag newTag; /* identity of requested block */
120 LocalBufferLookupEnt *hresult;
121 BufferDesc *bufHdr;
122 Buffer victim_buffer;
123 int bufid;
124 bool found;
125
126 InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
127
128 /* Initialize local buffers if first request in this session */
129 if (LocalBufHash == NULL)
131
133
134 /* See if the desired buffer already exists */
135 hresult = (LocalBufferLookupEnt *)
136 hash_search(LocalBufHash, &newTag, HASH_FIND, NULL);
137
138 if (hresult)
139 {
140 bufid = hresult->id;
141 bufHdr = GetLocalBufferDescriptor(bufid);
142 Assert(BufferTagsEqual(&bufHdr->tag, &newTag));
143
144 *foundPtr = PinLocalBuffer(bufHdr, true);
145 }
146 else
147 {
148 uint32 buf_state;
149
150 victim_buffer = GetLocalVictimBuffer();
151 bufid = -victim_buffer - 1;
152 bufHdr = GetLocalBufferDescriptor(bufid);
153
154 hresult = (LocalBufferLookupEnt *)
155 hash_search(LocalBufHash, &newTag, HASH_ENTER, &found);
156 if (found) /* shouldn't happen */
157 elog(ERROR, "local buffer hash table corrupted");
158 hresult->id = bufid;
159
160 /*
161 * it's all ours now.
162 */
163 bufHdr->tag = newTag;
164
165 buf_state = pg_atomic_read_u32(&bufHdr->state);
166 buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
167 buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
168 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
169
170 *foundPtr = false;
171 }
172
173 return bufHdr;
174}
175
176static Buffer
178{
179 int victim_bufid;
180 int trycounter;
181 uint32 buf_state;
182 BufferDesc *bufHdr;
183
185
186 /*
187 * Need to get a new buffer. We use a clock sweep algorithm (essentially
188 * the same as what freelist.c does now...)
189 */
190 trycounter = NLocBuffer;
191 for (;;)
192 {
193 victim_bufid = nextFreeLocalBufId;
194
197
198 bufHdr = GetLocalBufferDescriptor(victim_bufid);
199
200 if (LocalRefCount[victim_bufid] == 0)
201 {
202 buf_state = pg_atomic_read_u32(&bufHdr->state);
203
204 if (BUF_STATE_GET_USAGECOUNT(buf_state) > 0)
205 {
206 buf_state -= BUF_USAGECOUNT_ONE;
207 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
208 trycounter = NLocBuffer;
209 }
210 else
211 {
212 /* Found a usable buffer */
213 PinLocalBuffer(bufHdr, false);
214 break;
215 }
216 }
217 else if (--trycounter == 0)
219 (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
220 errmsg("no empty local buffer available")));
221 }
222
223 /*
224 * lazy memory allocation: allocate space on first use of a buffer.
225 */
226 if (LocalBufHdrGetBlock(bufHdr) == NULL)
227 {
228 /* Set pointer for use by BufferGetBlock() macro */
230 }
231
232 /*
233 * this buffer is not referenced but it might still be dirty. if that's
234 * the case, write it out before reusing it!
235 */
236 if (buf_state & BM_DIRTY)
237 {
238 instr_time io_start;
239 SMgrRelation oreln;
240 Page localpage = (char *) LocalBufHdrGetBlock(bufHdr);
241
242 /* Find smgr relation for buffer */
244
245 PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
246
248
249 /* And write... */
250 smgrwrite(oreln,
251 BufTagGetForkNum(&bufHdr->tag),
252 bufHdr->tag.blockNum,
253 localpage,
254 false);
255
256 /* Temporary table I/O does not use Buffer Access Strategies */
258 IOOP_WRITE, io_start, 1);
259
260 /* Mark not-dirty now in case we error out below */
261 buf_state &= ~BM_DIRTY;
262 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
263
265 }
266
267 /*
268 * Remove the victim buffer from the hashtable and mark as invalid.
269 */
270 if (buf_state & BM_TAG_VALID)
271 {
272 LocalBufferLookupEnt *hresult;
273
274 hresult = (LocalBufferLookupEnt *)
275 hash_search(LocalBufHash, &bufHdr->tag, HASH_REMOVE, NULL);
276 if (!hresult) /* shouldn't happen */
277 elog(ERROR, "local buffer hash table corrupted");
278 /* mark buffer invalid just in case hash insert fails */
279 ClearBufferTag(&bufHdr->tag);
280 buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
281 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
283 }
284
285 return BufferDescriptorGetBuffer(bufHdr);
286}
287
288/* see LimitAdditionalPins() */
289void
291{
292 uint32 max_pins;
293
294 if (*additional_pins <= 1)
295 return;
296
297 /*
298 * In contrast to LimitAdditionalPins() other backends don't play a role
299 * here. We can allow up to NLocBuffer pins in total, but it might not be
300 * initialized yet so read num_temp_buffers.
301 */
303
304 if (*additional_pins >= max_pins)
305 *additional_pins = max_pins;
306}
307
308/*
309 * Implementation of ExtendBufferedRelBy() and ExtendBufferedRelTo() for
310 * temporary buffers.
311 */
314 ForkNumber fork,
315 uint32 flags,
316 uint32 extend_by,
317 BlockNumber extend_upto,
318 Buffer *buffers,
319 uint32 *extended_by)
320{
321 BlockNumber first_block;
322 instr_time io_start;
323
324 /* Initialize local buffers if first request in this session */
325 if (LocalBufHash == NULL)
327
328 LimitAdditionalLocalPins(&extend_by);
329
330 for (uint32 i = 0; i < extend_by; i++)
331 {
332 BufferDesc *buf_hdr;
333 Block buf_block;
334
335 buffers[i] = GetLocalVictimBuffer();
336 buf_hdr = GetLocalBufferDescriptor(-buffers[i] - 1);
337 buf_block = LocalBufHdrGetBlock(buf_hdr);
338
339 /* new buffers are zero-filled */
340 MemSet((char *) buf_block, 0, BLCKSZ);
341 }
342
343 first_block = smgrnblocks(bmr.smgr, fork);
344
345 if (extend_upto != InvalidBlockNumber)
346 {
347 /*
348 * In contrast to shared relations, nothing could change the relation
349 * size concurrently. Thus we shouldn't end up finding that we don't
350 * need to do anything.
351 */
352 Assert(first_block <= extend_upto);
353
354 Assert((uint64) first_block + extend_by <= extend_upto);
355 }
356
357 /* Fail if relation is already at maximum possible length */
358 if ((uint64) first_block + extend_by >= MaxBlockNumber)
360 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
361 errmsg("cannot extend relation %s beyond %u blocks",
362 relpath(bmr.smgr->smgr_rlocator, fork),
364
365 for (uint32 i = 0; i < extend_by; i++)
366 {
367 int victim_buf_id;
368 BufferDesc *victim_buf_hdr;
369 BufferTag tag;
370 LocalBufferLookupEnt *hresult;
371 bool found;
372
373 victim_buf_id = -buffers[i] - 1;
374 victim_buf_hdr = GetLocalBufferDescriptor(victim_buf_id);
375
376 /* in case we need to pin an existing buffer below */
378
379 InitBufferTag(&tag, &bmr.smgr->smgr_rlocator.locator, fork, first_block + i);
380
381 hresult = (LocalBufferLookupEnt *)
382 hash_search(LocalBufHash, &tag, HASH_ENTER, &found);
383 if (found)
384 {
385 BufferDesc *existing_hdr;
386 uint32 buf_state;
387
389
390 existing_hdr = GetLocalBufferDescriptor(hresult->id);
391 PinLocalBuffer(existing_hdr, false);
392 buffers[i] = BufferDescriptorGetBuffer(existing_hdr);
393
394 buf_state = pg_atomic_read_u32(&existing_hdr->state);
395 Assert(buf_state & BM_TAG_VALID);
396 Assert(!(buf_state & BM_DIRTY));
397 buf_state &= ~BM_VALID;
398 pg_atomic_unlocked_write_u32(&existing_hdr->state, buf_state);
399 }
400 else
401 {
402 uint32 buf_state = pg_atomic_read_u32(&victim_buf_hdr->state);
403
404 Assert(!(buf_state & (BM_VALID | BM_TAG_VALID | BM_DIRTY | BM_JUST_DIRTIED)));
405
406 victim_buf_hdr->tag = tag;
407
408 buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
409
410 pg_atomic_unlocked_write_u32(&victim_buf_hdr->state, buf_state);
411
412 hresult->id = victim_buf_id;
413 }
414 }
415
417
418 /* actually extend relation */
419 smgrzeroextend(bmr.smgr, fork, first_block, extend_by, false);
420
422 io_start, extend_by);
423
424 for (uint32 i = 0; i < extend_by; i++)
425 {
426 Buffer buf = buffers[i];
427 BufferDesc *buf_hdr;
428 uint32 buf_state;
429
430 buf_hdr = GetLocalBufferDescriptor(-buf - 1);
431
432 buf_state = pg_atomic_read_u32(&buf_hdr->state);
433 buf_state |= BM_VALID;
434 pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
435 }
436
437 *extended_by = extend_by;
438
440
441 return first_block;
442}
443
444/*
445 * MarkLocalBufferDirty -
446 * mark a local buffer dirty
447 */
448void
450{
451 int bufid;
452 BufferDesc *bufHdr;
453 uint32 buf_state;
454
455 Assert(BufferIsLocal(buffer));
456
457#ifdef LBDEBUG
458 fprintf(stderr, "LB DIRTY %d\n", buffer);
459#endif
460
461 bufid = -buffer - 1;
462
463 Assert(LocalRefCount[bufid] > 0);
464
465 bufHdr = GetLocalBufferDescriptor(bufid);
466
467 buf_state = pg_atomic_read_u32(&bufHdr->state);
468
469 if (!(buf_state & BM_DIRTY))
471
472 buf_state |= BM_DIRTY;
473
474 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
475}
476
477/*
478 * DropRelationLocalBuffers
479 * This function removes from the buffer pool all the pages of the
480 * specified relation that have block numbers >= firstDelBlock.
481 * (In particular, with firstDelBlock = 0, all pages are removed.)
482 * Dirty pages are simply dropped, without bothering to write them
483 * out first. Therefore, this is NOT rollback-able, and so should be
484 * used only with extreme caution!
485 *
486 * See DropRelationBuffers in bufmgr.c for more notes.
487 */
488void
490 BlockNumber firstDelBlock)
491{
492 int i;
493
494 for (i = 0; i < NLocBuffer; i++)
495 {
497 LocalBufferLookupEnt *hresult;
498 uint32 buf_state;
499
500 buf_state = pg_atomic_read_u32(&bufHdr->state);
501
502 if ((buf_state & BM_TAG_VALID) &&
503 BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
504 BufTagGetForkNum(&bufHdr->tag) == forkNum &&
505 bufHdr->tag.blockNum >= firstDelBlock)
506 {
507 if (LocalRefCount[i] != 0)
508 elog(ERROR, "block %u of %s is still referenced (local %u)",
509 bufHdr->tag.blockNum,
512 BufTagGetForkNum(&bufHdr->tag)),
514
515 /* Remove entry from hashtable */
516 hresult = (LocalBufferLookupEnt *)
517 hash_search(LocalBufHash, &bufHdr->tag, HASH_REMOVE, NULL);
518 if (!hresult) /* shouldn't happen */
519 elog(ERROR, "local buffer hash table corrupted");
520 /* Mark buffer invalid */
521 ClearBufferTag(&bufHdr->tag);
522 buf_state &= ~BUF_FLAG_MASK;
523 buf_state &= ~BUF_USAGECOUNT_MASK;
524 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
525 }
526 }
527}
528
529/*
530 * DropRelationAllLocalBuffers
531 * This function removes from the buffer pool all pages of all forks
532 * of the specified relation.
533 *
534 * See DropRelationsAllBuffers in bufmgr.c for more notes.
535 */
536void
538{
539 int i;
540
541 for (i = 0; i < NLocBuffer; i++)
542 {
544 LocalBufferLookupEnt *hresult;
545 uint32 buf_state;
546
547 buf_state = pg_atomic_read_u32(&bufHdr->state);
548
549 if ((buf_state & BM_TAG_VALID) &&
550 BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator))
551 {
552 if (LocalRefCount[i] != 0)
553 elog(ERROR, "block %u of %s is still referenced (local %u)",
554 bufHdr->tag.blockNum,
557 BufTagGetForkNum(&bufHdr->tag)),
559 /* Remove entry from hashtable */
560 hresult = (LocalBufferLookupEnt *)
561 hash_search(LocalBufHash, &bufHdr->tag, HASH_REMOVE, NULL);
562 if (!hresult) /* shouldn't happen */
563 elog(ERROR, "local buffer hash table corrupted");
564 /* Mark buffer invalid */
565 ClearBufferTag(&bufHdr->tag);
566 buf_state &= ~BUF_FLAG_MASK;
567 buf_state &= ~BUF_USAGECOUNT_MASK;
568 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
569 }
570 }
571}
572
573/*
574 * InitLocalBuffers -
575 * init the local buffer cache. Since most queries (esp. multi-user ones)
576 * don't involve local buffers, we delay allocating actual memory for the
577 * buffers until we need them; just make the buffer headers here.
578 */
579static void
581{
582 int nbufs = num_temp_buffers;
583 HASHCTL info;
584 int i;
585
586 /*
587 * Parallel workers can't access data in temporary tables, because they
588 * have no visibility into the local buffers of their leader. This is a
589 * convenient, low-cost place to provide a backstop check for that. Note
590 * that we don't wish to prevent a parallel worker from accessing catalog
591 * metadata about a temp table, so checks at higher levels would be
592 * inappropriate.
593 */
594 if (IsParallelWorker())
596 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
597 errmsg("cannot access temporary tables during a parallel operation")));
598
599 /* Allocate and zero buffer headers and auxiliary arrays */
601 LocalBufferBlockPointers = (Block *) calloc(nbufs, sizeof(Block));
602 LocalRefCount = (int32 *) calloc(nbufs, sizeof(int32));
605 (errcode(ERRCODE_OUT_OF_MEMORY),
606 errmsg("out of memory")));
607
609
610 /* initialize fields that need to start off nonzero */
611 for (i = 0; i < nbufs; i++)
612 {
614
615 /*
616 * negative to indicate local buffer. This is tricky: shared buffers
617 * start with 0. We have to start with -2. (Note that the routine
618 * BufferDescriptorGetBuffer adds 1 to buf_id so our first buffer id
619 * is -1.)
620 */
621 buf->buf_id = -i - 2;
622
623 /*
624 * Intentionally do not initialize the buffer's atomic variable
625 * (besides zeroing the underlying memory above). That way we get
626 * errors on platforms without atomics, if somebody (re-)introduces
627 * atomic operations for local buffers.
628 */
629 }
630
631 /* Create the lookup hash table */
632 info.keysize = sizeof(BufferTag);
633 info.entrysize = sizeof(LocalBufferLookupEnt);
634
635 LocalBufHash = hash_create("Local Buffer Lookup Table",
636 nbufs,
637 &info,
639
640 if (!LocalBufHash)
641 elog(ERROR, "could not initialize local buffer hash table");
642
643 /* Initialization done, mark buffers allocated */
644 NLocBuffer = nbufs;
645}
646
647/*
648 * XXX: We could have a slightly more efficient version of PinLocalBuffer()
649 * that does not support adjusting the usagecount - but so far it does not
650 * seem worth the trouble.
651 *
652 * Note that ResourceOwnerEnlarge() must have been done already.
653 */
654bool
655PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
656{
657 uint32 buf_state;
658 Buffer buffer = BufferDescriptorGetBuffer(buf_hdr);
659 int bufid = -buffer - 1;
660
661 buf_state = pg_atomic_read_u32(&buf_hdr->state);
662
663 if (LocalRefCount[bufid] == 0)
664 {
666 if (adjust_usagecount &&
668 {
669 buf_state += BUF_USAGECOUNT_ONE;
670 pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
671 }
672 }
673 LocalRefCount[bufid]++;
676
677 return buf_state & BM_VALID;
678}
679
680void
682{
685}
686
687void
689{
690 int buffid = -buffer - 1;
691
692 Assert(BufferIsLocal(buffer));
693 Assert(LocalRefCount[buffid] > 0);
695
696 if (--LocalRefCount[buffid] == 0)
698}
699
700/*
701 * GUC check_hook for temp_buffers
702 */
703bool
705{
706 /*
707 * Once local buffers have been initialized, it's too late to change this.
708 * However, if this is only a test call, allow it.
709 */
711 {
712 GUC_check_errdetail("\"temp_buffers\" cannot be changed after any temporary tables have been accessed in the session.");
713 return false;
714 }
715 return true;
716}
717
718/*
719 * GetLocalBufferStorage - allocate memory for a local buffer
720 *
721 * The idea of this function is to aggregate our requests for storage
722 * so that the memory manager doesn't see a whole lot of relatively small
723 * requests. Since we'll never give back a local buffer once it's created
724 * within a particular process, no point in burdening memmgr with separately
725 * managed chunks.
726 */
727static Block
729{
730 static char *cur_block = NULL;
731 static int next_buf_in_block = 0;
732 static int num_bufs_in_block = 0;
733 static int total_bufs_allocated = 0;
734 static MemoryContext LocalBufferContext = NULL;
735
736 char *this_buf;
737
738 Assert(total_bufs_allocated < NLocBuffer);
739
740 if (next_buf_in_block >= num_bufs_in_block)
741 {
742 /* Need to make a new request to memmgr */
743 int num_bufs;
744
745 /*
746 * We allocate local buffers in a context of their own, so that the
747 * space eaten for them is easily recognizable in MemoryContextStats
748 * output. Create the context on first use.
749 */
750 if (LocalBufferContext == NULL)
751 LocalBufferContext =
753 "LocalBufferContext",
755
756 /* Start with a 16-buffer request; subsequent ones double each time */
757 num_bufs = Max(num_bufs_in_block * 2, 16);
758 /* But not more than what we need for all remaining local bufs */
759 num_bufs = Min(num_bufs, NLocBuffer - total_bufs_allocated);
760 /* And don't overflow MaxAllocSize, either */
761 num_bufs = Min(num_bufs, MaxAllocSize / BLCKSZ);
762
763 /* Buffers should be I/O aligned. */
764 cur_block = (char *)
766 MemoryContextAlloc(LocalBufferContext,
767 num_bufs * BLCKSZ + PG_IO_ALIGN_SIZE));
768 next_buf_in_block = 0;
769 num_bufs_in_block = num_bufs;
770 }
771
772 /* Allocate next buffer in current memory block */
773 this_buf = cur_block + next_buf_in_block * BLCKSZ;
774 next_buf_in_block++;
775 total_bufs_allocated++;
776
777 return (Block) this_buf;
778}
779
780/*
781 * CheckForLocalBufferLeaks - ensure this backend holds no local buffer pins
782 *
783 * This is just like CheckForBufferLeaks(), but for local buffers.
784 */
785static void
787{
788#ifdef USE_ASSERT_CHECKING
789 if (LocalRefCount)
790 {
791 int RefCountErrors = 0;
792 int i;
793
794 for (i = 0; i < NLocBuffer; i++)
795 {
796 if (LocalRefCount[i] != 0)
797 {
798 Buffer b = -i - 1;
799 char *s;
800
802 elog(WARNING, "local buffer refcount leak: %s", s);
803 pfree(s);
804
805 RefCountErrors++;
806 }
807 }
808 Assert(RefCountErrors == 0);
809 }
810#endif
811}
812
813/*
814 * AtEOXact_LocalBuffers - clean up at end of transaction.
815 *
816 * This is just like AtEOXact_Buffers, but for local buffers.
817 */
818void
820{
822}
823
824/*
825 * AtProcExit_LocalBuffers - ensure we have dropped pins during backend exit.
826 *
827 * This is just like AtProcExit_Buffers, but for local buffers.
828 */
829void
831{
832 /*
833 * We shouldn't be holding any remaining pins; if we are, and assertions
834 * aren't enabled, we'll fail later in DropRelationBuffers while trying to
835 * drop the temp rels.
836 */
838}
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:295
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:239
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
#define MaxBlockNumber
Definition: block.h:35
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BM_MAX_USAGE_COUNT
Definition: buf_internals.h:77
static void InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blockNum)
#define BM_TAG_VALID
Definition: buf_internals.h:62
#define BUF_USAGECOUNT_MASK
Definition: buf_internals.h:44
static ForkNumber BufTagGetForkNum(const BufferTag *tag)
static bool BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
static bool BufTagMatchesRelFileLocator(const BufferTag *tag, const RelFileLocator *rlocator)
#define BUF_FLAG_MASK
Definition: buf_internals.h:47
#define BM_DIRTY
Definition: buf_internals.h:60
#define BM_JUST_DIRTIED
Definition: buf_internals.h:65
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:51
static void ClearBufferTag(BufferTag *tag)
static void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
struct buftag BufferTag
static void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:45
static RelFileLocator BufTagGetRelFileLocator(const BufferTag *tag)
#define BM_VALID
Definition: buf_internals.h:61
static BufferDesc * GetLocalBufferDescriptor(uint32 id)
static Buffer BufferDescriptorGetBuffer(const BufferDesc *bdesc)
bool track_io_timing
Definition: bufmgr.c:143
char * DebugPrintBufferRefcount(Buffer buffer)
Definition: bufmgr.c:3665
void * Block
Definition: bufmgr.h:25
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1531
Pointer Page
Definition: bufpage.h:81
#define Min(x, y)
Definition: c.h:958
#define TYPEALIGN(ALIGNVAL, LEN)
Definition: c.h:758
#define Max(x, y)
Definition: c.h:952
#define Assert(condition)
Definition: c.h:812
int32_t int32
Definition: c.h:481
uint64_t uint64
Definition: c.h:486
uint32_t uint32
Definition: c.h:485
#define MemSet(start, val, len)
Definition: c.h:974
#define fprintf(file, fmt, msg)
Definition: cubescan.l:21
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define FATAL
Definition: elog.h:41
#define WARNING
Definition: elog.h:36
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
int io_direct_flags
Definition: fd.c:167
#define IO_DIRECT_DATA
Definition: fd.h:54
#define MaxAllocSize
Definition: fe_memutils.h:22
ProcNumber MyProcNumber
Definition: globals.c:89
#define newval
#define GUC_check_errdetail
Definition: guc.h:476
GucSource
Definition: guc.h:108
@ PGC_S_TEST
Definition: guc.h:121
int num_temp_buffers
Definition: guc_tables.c:535
#define calloc(a, b)
Definition: header.h:55
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
#define IsParallelWorker()
Definition: parallel.h:60
BufferUsage pgBufferUsage
Definition: instrument.c:20
int b
Definition: isn.c:69
int i
Definition: isn.c:72
int32 * LocalRefCount
Definition: localbuf.c:46
void UnpinLocalBuffer(Buffer buffer)
Definition: localbuf.c:681
static HTAB * LocalBufHash
Definition: localbuf.c:50
static int NLocalPinnedBuffers
Definition: localbuf.c:53
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:819
#define LocalBufHdrGetBlock(bufHdr)
Definition: localbuf.c:39
static void CheckForLocalBufferLeaks(void)
Definition: localbuf.c:786
void DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber firstDelBlock)
Definition: localbuf.c:489
static Block GetLocalBufferStorage(void)
Definition: localbuf.c:728
static int nextFreeLocalBufId
Definition: localbuf.c:48
bool check_temp_buffers(int *newval, void **extra, GucSource source)
Definition: localbuf.c:704
void AtProcExit_LocalBuffers(void)
Definition: localbuf.c:830
bool PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
Definition: localbuf.c:655
static void InitLocalBuffers(void)
Definition: localbuf.c:580
void LimitAdditionalLocalPins(uint32 *additional_pins)
Definition: localbuf.c:290
static Buffer GetLocalVictimBuffer(void)
Definition: localbuf.c:177
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:449
void DropRelationAllLocalBuffers(RelFileLocator rlocator)
Definition: localbuf.c:537
int NLocBuffer
Definition: localbuf.c:42
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:69
BlockNumber ExtendBufferedRelLocal(BufferManagerRelation bmr, ForkNumber fork, uint32 flags, uint32 extend_by, BlockNumber extend_upto, Buffer *buffers, uint32 *extended_by)
Definition: localbuf.c:313
Block * LocalBufferBlockPointers
Definition: localbuf.c:45
void UnpinLocalBufferNoOwner(Buffer buffer)
Definition: localbuf.c:688
BufferDesc * LocalBufferDescriptors
Definition: localbuf.c:44
BufferDesc * LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, bool *foundPtr)
Definition: localbuf.c:116
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1181
void pfree(void *pointer)
Definition: mcxt.c:1521
MemoryContext TopMemoryContext
Definition: mcxt.c:149
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
#define PG_IO_ALIGN_SIZE
static rewind_source * source
Definition: pg_rewind.c:89
static char * buf
Definition: pg_test_fsync.c:72
@ IOOBJECT_TEMP_RELATION
Definition: pgstat.h:331
@ IOCONTEXT_NORMAL
Definition: pgstat.h:340
@ IOOP_EXTEND
Definition: pgstat.h:349
@ IOOP_EVICT
Definition: pgstat.h:348
@ IOOP_WRITE
Definition: pgstat.h:354
instr_time pgstat_prepare_io_time(bool track_io_guc)
Definition: pgstat_io.c:100
void pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op, instr_time start_time, uint32 cnt)
Definition: pgstat_io.c:122
void pgstat_count_io_op(IOObject io_object, IOContext io_context, IOOp io_op)
Definition: pgstat_io.c:69
ForkNumber
Definition: relpath.h:56
#define relpath(rlocator, forknum)
Definition: relpath.h:102
#define relpathbackend(rlocator, backend, forknum)
Definition: relpath.h:93
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition: resowner.c:442
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:677
SMgrRelation smgropen(RelFileLocator rlocator, ProcNumber backend)
Definition: smgr.c:201
void smgrzeroextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, int nblocks, bool skipFsync)
Definition: smgr.c:563
bool smgrprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, int nblocks)
Definition: smgr.c:588
static void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const void *buffer, bool skipFsync)
Definition: smgr.h:124
BufferTag tag
pg_atomic_uint32 state
struct SMgrRelationData * smgr
Definition: bufmgr.h:103
int64 local_blks_written
Definition: instrument.h:33
int64 local_blks_dirtied
Definition: instrument.h:32
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
Definition: dynahash.c:220
Buffer recent_buffer
Definition: bufmgr.h:60
RelFileLocator locator
RelFileLocatorBackend smgr_rlocator
Definition: smgr.h:37
BlockNumber blockNum
Definition: buf_internals.h:97