PostgreSQL Source Code git master
localbuf.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * localbuf.c
4 * local buffer manager. Fast buffer manager for temporary tables,
5 * which never need to be WAL-logged or checkpointed, etc.
6 *
7 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994-5, Regents of the University of California
9 *
10 *
11 * IDENTIFICATION
12 * src/backend/storage/buffer/localbuf.c
13 *
14 *-------------------------------------------------------------------------
15 */
16#include "postgres.h"
17
18#include "access/parallel.h"
19#include "executor/instrument.h"
20#include "pgstat.h"
22#include "storage/bufmgr.h"
23#include "storage/fd.h"
24#include "utils/guc_hooks.h"
25#include "utils/memutils.h"
26#include "utils/resowner.h"
27
28
29/*#define LBDEBUG*/
30
31/* entry for buffer lookup hashtable */
32typedef struct
33{
34 BufferTag key; /* Tag of a disk page */
35 int id; /* Associated local buffer's index */
37
38/* Note: this macro only works on local buffers, not shared ones! */
39#define LocalBufHdrGetBlock(bufHdr) \
40 LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
41
42int NLocBuffer = 0; /* until buffers are initialized */
43
47
48static int nextFreeLocalBufId = 0;
49
50static HTAB *LocalBufHash = NULL;
51
52/* number of local buffers pinned at least once */
53static int NLocalPinnedBuffers = 0;
54
55
56static void InitLocalBuffers(void);
57static Block GetLocalBufferStorage(void);
58static Buffer GetLocalVictimBuffer(void);
59
60
61/*
62 * PrefetchLocalBuffer -
63 * initiate asynchronous read of a block of a relation
64 *
65 * Do PrefetchBuffer's work for temporary relations.
66 * No-op if prefetching isn't compiled in.
67 */
70 BlockNumber blockNum)
71{
72 PrefetchBufferResult result = {InvalidBuffer, false};
73 BufferTag newTag; /* identity of requested block */
74 LocalBufferLookupEnt *hresult;
75
76 InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
77
78 /* Initialize local buffers if first request in this session */
79 if (LocalBufHash == NULL)
81
82 /* See if the desired buffer already exists */
83 hresult = (LocalBufferLookupEnt *)
84 hash_search(LocalBufHash, &newTag, HASH_FIND, NULL);
85
86 if (hresult)
87 {
88 /* Yes, so nothing to do */
89 result.recent_buffer = -hresult->id - 1;
90 }
91 else
92 {
93#ifdef USE_PREFETCH
94 /* Not in buffers, so initiate prefetch */
95 if ((io_direct_flags & IO_DIRECT_DATA) == 0 &&
96 smgrprefetch(smgr, forkNum, blockNum, 1))
97 {
98 result.initiated_io = true;
99 }
100#endif /* USE_PREFETCH */
101 }
102
103 return result;
104}
105
106
107/*
108 * LocalBufferAlloc -
109 * Find or create a local buffer for the given page of the given relation.
110 *
111 * API is similar to bufmgr.c's BufferAlloc, except that we do not need to do
112 * any locking since this is all local. We support only default access
113 * strategy (hence, usage_count is always advanced).
114 */
117 bool *foundPtr)
118{
119 BufferTag newTag; /* identity of requested block */
120 LocalBufferLookupEnt *hresult;
121 BufferDesc *bufHdr;
122 Buffer victim_buffer;
123 int bufid;
124 bool found;
125
126 InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
127
128 /* Initialize local buffers if first request in this session */
129 if (LocalBufHash == NULL)
131
133
134 /* See if the desired buffer already exists */
135 hresult = (LocalBufferLookupEnt *)
136 hash_search(LocalBufHash, &newTag, HASH_FIND, NULL);
137
138 if (hresult)
139 {
140 bufid = hresult->id;
141 bufHdr = GetLocalBufferDescriptor(bufid);
142 Assert(BufferTagsEqual(&bufHdr->tag, &newTag));
143
144 *foundPtr = PinLocalBuffer(bufHdr, true);
145 }
146 else
147 {
148 uint32 buf_state;
149
150 victim_buffer = GetLocalVictimBuffer();
151 bufid = -victim_buffer - 1;
152 bufHdr = GetLocalBufferDescriptor(bufid);
153
154 hresult = (LocalBufferLookupEnt *)
155 hash_search(LocalBufHash, &newTag, HASH_ENTER, &found);
156 if (found) /* shouldn't happen */
157 elog(ERROR, "local buffer hash table corrupted");
158 hresult->id = bufid;
159
160 /*
161 * it's all ours now.
162 */
163 bufHdr->tag = newTag;
164
165 buf_state = pg_atomic_read_u32(&bufHdr->state);
166 buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
167 buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
168 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
169
170 *foundPtr = false;
171 }
172
173 return bufHdr;
174}
175
176static Buffer
178{
179 int victim_bufid;
180 int trycounter;
181 uint32 buf_state;
182 BufferDesc *bufHdr;
183
185
186 /*
187 * Need to get a new buffer. We use a clock sweep algorithm (essentially
188 * the same as what freelist.c does now...)
189 */
190 trycounter = NLocBuffer;
191 for (;;)
192 {
193 victim_bufid = nextFreeLocalBufId;
194
197
198 bufHdr = GetLocalBufferDescriptor(victim_bufid);
199
200 if (LocalRefCount[victim_bufid] == 0)
201 {
202 buf_state = pg_atomic_read_u32(&bufHdr->state);
203
204 if (BUF_STATE_GET_USAGECOUNT(buf_state) > 0)
205 {
206 buf_state -= BUF_USAGECOUNT_ONE;
207 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
208 trycounter = NLocBuffer;
209 }
210 else
211 {
212 /* Found a usable buffer */
213 PinLocalBuffer(bufHdr, false);
214 break;
215 }
216 }
217 else if (--trycounter == 0)
219 (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
220 errmsg("no empty local buffer available")));
221 }
222
223 /*
224 * lazy memory allocation: allocate space on first use of a buffer.
225 */
226 if (LocalBufHdrGetBlock(bufHdr) == NULL)
227 {
228 /* Set pointer for use by BufferGetBlock() macro */
230 }
231
232 /*
233 * this buffer is not referenced but it might still be dirty. if that's
234 * the case, write it out before reusing it!
235 */
236 if (buf_state & BM_DIRTY)
237 {
238 instr_time io_start;
239 SMgrRelation oreln;
240 Page localpage = (char *) LocalBufHdrGetBlock(bufHdr);
241
242 /* Find smgr relation for buffer */
244
245 PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
246
248
249 /* And write... */
250 smgrwrite(oreln,
251 BufTagGetForkNum(&bufHdr->tag),
252 bufHdr->tag.blockNum,
253 localpage,
254 false);
255
256 /* Temporary table I/O does not use Buffer Access Strategies */
258 IOOP_WRITE, io_start, 1, BLCKSZ);
259
260 /* Mark not-dirty now in case we error out below */
261 buf_state &= ~BM_DIRTY;
262 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
263
265 }
266
267 /*
268 * Remove the victim buffer from the hashtable and mark as invalid.
269 */
270 if (buf_state & BM_TAG_VALID)
271 {
272 LocalBufferLookupEnt *hresult;
273
274 hresult = (LocalBufferLookupEnt *)
275 hash_search(LocalBufHash, &bufHdr->tag, HASH_REMOVE, NULL);
276 if (!hresult) /* shouldn't happen */
277 elog(ERROR, "local buffer hash table corrupted");
278 /* mark buffer invalid just in case hash insert fails */
279 ClearBufferTag(&bufHdr->tag);
280 buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
281 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
282
284 }
285
286 return BufferDescriptorGetBuffer(bufHdr);
287}
288
289/* see LimitAdditionalPins() */
290void
292{
293 uint32 max_pins;
294
295 if (*additional_pins <= 1)
296 return;
297
298 /*
299 * In contrast to LimitAdditionalPins() other backends don't play a role
300 * here. We can allow up to NLocBuffer pins in total, but it might not be
301 * initialized yet so read num_temp_buffers.
302 */
304
305 if (*additional_pins >= max_pins)
306 *additional_pins = max_pins;
307}
308
309/*
310 * Implementation of ExtendBufferedRelBy() and ExtendBufferedRelTo() for
311 * temporary buffers.
312 */
315 ForkNumber fork,
316 uint32 flags,
317 uint32 extend_by,
318 BlockNumber extend_upto,
319 Buffer *buffers,
320 uint32 *extended_by)
321{
322 BlockNumber first_block;
323 instr_time io_start;
324
325 /* Initialize local buffers if first request in this session */
326 if (LocalBufHash == NULL)
328
329 LimitAdditionalLocalPins(&extend_by);
330
331 for (uint32 i = 0; i < extend_by; i++)
332 {
333 BufferDesc *buf_hdr;
334 Block buf_block;
335
336 buffers[i] = GetLocalVictimBuffer();
337 buf_hdr = GetLocalBufferDescriptor(-buffers[i] - 1);
338 buf_block = LocalBufHdrGetBlock(buf_hdr);
339
340 /* new buffers are zero-filled */
341 MemSet((char *) buf_block, 0, BLCKSZ);
342 }
343
344 first_block = smgrnblocks(bmr.smgr, fork);
345
346 if (extend_upto != InvalidBlockNumber)
347 {
348 /*
349 * In contrast to shared relations, nothing could change the relation
350 * size concurrently. Thus we shouldn't end up finding that we don't
351 * need to do anything.
352 */
353 Assert(first_block <= extend_upto);
354
355 Assert((uint64) first_block + extend_by <= extend_upto);
356 }
357
358 /* Fail if relation is already at maximum possible length */
359 if ((uint64) first_block + extend_by >= MaxBlockNumber)
361 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
362 errmsg("cannot extend relation %s beyond %u blocks",
363 relpath(bmr.smgr->smgr_rlocator, fork),
365
366 for (uint32 i = 0; i < extend_by; i++)
367 {
368 int victim_buf_id;
369 BufferDesc *victim_buf_hdr;
370 BufferTag tag;
371 LocalBufferLookupEnt *hresult;
372 bool found;
373
374 victim_buf_id = -buffers[i] - 1;
375 victim_buf_hdr = GetLocalBufferDescriptor(victim_buf_id);
376
377 /* in case we need to pin an existing buffer below */
379
380 InitBufferTag(&tag, &bmr.smgr->smgr_rlocator.locator, fork, first_block + i);
381
382 hresult = (LocalBufferLookupEnt *)
383 hash_search(LocalBufHash, &tag, HASH_ENTER, &found);
384 if (found)
385 {
386 BufferDesc *existing_hdr;
387 uint32 buf_state;
388
390
391 existing_hdr = GetLocalBufferDescriptor(hresult->id);
392 PinLocalBuffer(existing_hdr, false);
393 buffers[i] = BufferDescriptorGetBuffer(existing_hdr);
394
395 buf_state = pg_atomic_read_u32(&existing_hdr->state);
396 Assert(buf_state & BM_TAG_VALID);
397 Assert(!(buf_state & BM_DIRTY));
398 buf_state &= ~BM_VALID;
399 pg_atomic_unlocked_write_u32(&existing_hdr->state, buf_state);
400 }
401 else
402 {
403 uint32 buf_state = pg_atomic_read_u32(&victim_buf_hdr->state);
404
405 Assert(!(buf_state & (BM_VALID | BM_TAG_VALID | BM_DIRTY | BM_JUST_DIRTIED)));
406
407 victim_buf_hdr->tag = tag;
408
409 buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
410
411 pg_atomic_unlocked_write_u32(&victim_buf_hdr->state, buf_state);
412
413 hresult->id = victim_buf_id;
414 }
415 }
416
418
419 /* actually extend relation */
420 smgrzeroextend(bmr.smgr, fork, first_block, extend_by, false);
421
423 io_start, 1, extend_by * BLCKSZ);
424
425 for (uint32 i = 0; i < extend_by; i++)
426 {
427 Buffer buf = buffers[i];
428 BufferDesc *buf_hdr;
429 uint32 buf_state;
430
431 buf_hdr = GetLocalBufferDescriptor(-buf - 1);
432
433 buf_state = pg_atomic_read_u32(&buf_hdr->state);
434 buf_state |= BM_VALID;
435 pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
436 }
437
438 *extended_by = extend_by;
439
441
442 return first_block;
443}
444
445/*
446 * MarkLocalBufferDirty -
447 * mark a local buffer dirty
448 */
449void
451{
452 int bufid;
453 BufferDesc *bufHdr;
454 uint32 buf_state;
455
456 Assert(BufferIsLocal(buffer));
457
458#ifdef LBDEBUG
459 fprintf(stderr, "LB DIRTY %d\n", buffer);
460#endif
461
462 bufid = -buffer - 1;
463
464 Assert(LocalRefCount[bufid] > 0);
465
466 bufHdr = GetLocalBufferDescriptor(bufid);
467
468 buf_state = pg_atomic_read_u32(&bufHdr->state);
469
470 if (!(buf_state & BM_DIRTY))
472
473 buf_state |= BM_DIRTY;
474
475 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
476}
477
478/*
479 * DropRelationLocalBuffers
480 * This function removes from the buffer pool all the pages of the
481 * specified relation that have block numbers >= firstDelBlock.
482 * (In particular, with firstDelBlock = 0, all pages are removed.)
483 * Dirty pages are simply dropped, without bothering to write them
484 * out first. Therefore, this is NOT rollback-able, and so should be
485 * used only with extreme caution!
486 *
487 * See DropRelationBuffers in bufmgr.c for more notes.
488 */
489void
491 BlockNumber firstDelBlock)
492{
493 int i;
494
495 for (i = 0; i < NLocBuffer; i++)
496 {
498 LocalBufferLookupEnt *hresult;
499 uint32 buf_state;
500
501 buf_state = pg_atomic_read_u32(&bufHdr->state);
502
503 if ((buf_state & BM_TAG_VALID) &&
504 BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
505 BufTagGetForkNum(&bufHdr->tag) == forkNum &&
506 bufHdr->tag.blockNum >= firstDelBlock)
507 {
508 if (LocalRefCount[i] != 0)
509 elog(ERROR, "block %u of %s is still referenced (local %u)",
510 bufHdr->tag.blockNum,
513 BufTagGetForkNum(&bufHdr->tag)),
515
516 /* Remove entry from hashtable */
517 hresult = (LocalBufferLookupEnt *)
518 hash_search(LocalBufHash, &bufHdr->tag, HASH_REMOVE, NULL);
519 if (!hresult) /* shouldn't happen */
520 elog(ERROR, "local buffer hash table corrupted");
521 /* Mark buffer invalid */
522 ClearBufferTag(&bufHdr->tag);
523 buf_state &= ~BUF_FLAG_MASK;
524 buf_state &= ~BUF_USAGECOUNT_MASK;
525 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
526 }
527 }
528}
529
530/*
531 * DropRelationAllLocalBuffers
532 * This function removes from the buffer pool all pages of all forks
533 * of the specified relation.
534 *
535 * See DropRelationsAllBuffers in bufmgr.c for more notes.
536 */
537void
539{
540 int i;
541
542 for (i = 0; i < NLocBuffer; i++)
543 {
545 LocalBufferLookupEnt *hresult;
546 uint32 buf_state;
547
548 buf_state = pg_atomic_read_u32(&bufHdr->state);
549
550 if ((buf_state & BM_TAG_VALID) &&
551 BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator))
552 {
553 if (LocalRefCount[i] != 0)
554 elog(ERROR, "block %u of %s is still referenced (local %u)",
555 bufHdr->tag.blockNum,
558 BufTagGetForkNum(&bufHdr->tag)),
560 /* Remove entry from hashtable */
561 hresult = (LocalBufferLookupEnt *)
562 hash_search(LocalBufHash, &bufHdr->tag, HASH_REMOVE, NULL);
563 if (!hresult) /* shouldn't happen */
564 elog(ERROR, "local buffer hash table corrupted");
565 /* Mark buffer invalid */
566 ClearBufferTag(&bufHdr->tag);
567 buf_state &= ~BUF_FLAG_MASK;
568 buf_state &= ~BUF_USAGECOUNT_MASK;
569 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
570 }
571 }
572}
573
574/*
575 * InitLocalBuffers -
576 * init the local buffer cache. Since most queries (esp. multi-user ones)
577 * don't involve local buffers, we delay allocating actual memory for the
578 * buffers until we need them; just make the buffer headers here.
579 */
580static void
582{
583 int nbufs = num_temp_buffers;
584 HASHCTL info;
585 int i;
586
587 /*
588 * Parallel workers can't access data in temporary tables, because they
589 * have no visibility into the local buffers of their leader. This is a
590 * convenient, low-cost place to provide a backstop check for that. Note
591 * that we don't wish to prevent a parallel worker from accessing catalog
592 * metadata about a temp table, so checks at higher levels would be
593 * inappropriate.
594 */
595 if (IsParallelWorker())
597 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
598 errmsg("cannot access temporary tables during a parallel operation")));
599
600 /* Allocate and zero buffer headers and auxiliary arrays */
602 LocalBufferBlockPointers = (Block *) calloc(nbufs, sizeof(Block));
603 LocalRefCount = (int32 *) calloc(nbufs, sizeof(int32));
606 (errcode(ERRCODE_OUT_OF_MEMORY),
607 errmsg("out of memory")));
608
610
611 /* initialize fields that need to start off nonzero */
612 for (i = 0; i < nbufs; i++)
613 {
615
616 /*
617 * negative to indicate local buffer. This is tricky: shared buffers
618 * start with 0. We have to start with -2. (Note that the routine
619 * BufferDescriptorGetBuffer adds 1 to buf_id so our first buffer id
620 * is -1.)
621 */
622 buf->buf_id = -i - 2;
623
624 /*
625 * Intentionally do not initialize the buffer's atomic variable
626 * (besides zeroing the underlying memory above). That way we get
627 * errors on platforms without atomics, if somebody (re-)introduces
628 * atomic operations for local buffers.
629 */
630 }
631
632 /* Create the lookup hash table */
633 info.keysize = sizeof(BufferTag);
634 info.entrysize = sizeof(LocalBufferLookupEnt);
635
636 LocalBufHash = hash_create("Local Buffer Lookup Table",
637 nbufs,
638 &info,
640
641 if (!LocalBufHash)
642 elog(ERROR, "could not initialize local buffer hash table");
643
644 /* Initialization done, mark buffers allocated */
645 NLocBuffer = nbufs;
646}
647
648/*
649 * XXX: We could have a slightly more efficient version of PinLocalBuffer()
650 * that does not support adjusting the usagecount - but so far it does not
651 * seem worth the trouble.
652 *
653 * Note that ResourceOwnerEnlarge() must have been done already.
654 */
655bool
656PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
657{
658 uint32 buf_state;
659 Buffer buffer = BufferDescriptorGetBuffer(buf_hdr);
660 int bufid = -buffer - 1;
661
662 buf_state = pg_atomic_read_u32(&buf_hdr->state);
663
664 if (LocalRefCount[bufid] == 0)
665 {
667 if (adjust_usagecount &&
669 {
670 buf_state += BUF_USAGECOUNT_ONE;
671 pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
672 }
673 }
674 LocalRefCount[bufid]++;
677
678 return buf_state & BM_VALID;
679}
680
681void
683{
686}
687
688void
690{
691 int buffid = -buffer - 1;
692
693 Assert(BufferIsLocal(buffer));
694 Assert(LocalRefCount[buffid] > 0);
696
697 if (--LocalRefCount[buffid] == 0)
699}
700
701/*
702 * GUC check_hook for temp_buffers
703 */
704bool
706{
707 /*
708 * Once local buffers have been initialized, it's too late to change this.
709 * However, if this is only a test call, allow it.
710 */
712 {
713 GUC_check_errdetail("\"temp_buffers\" cannot be changed after any temporary tables have been accessed in the session.");
714 return false;
715 }
716 return true;
717}
718
719/*
720 * GetLocalBufferStorage - allocate memory for a local buffer
721 *
722 * The idea of this function is to aggregate our requests for storage
723 * so that the memory manager doesn't see a whole lot of relatively small
724 * requests. Since we'll never give back a local buffer once it's created
725 * within a particular process, no point in burdening memmgr with separately
726 * managed chunks.
727 */
728static Block
730{
731 static char *cur_block = NULL;
732 static int next_buf_in_block = 0;
733 static int num_bufs_in_block = 0;
734 static int total_bufs_allocated = 0;
735 static MemoryContext LocalBufferContext = NULL;
736
737 char *this_buf;
738
739 Assert(total_bufs_allocated < NLocBuffer);
740
741 if (next_buf_in_block >= num_bufs_in_block)
742 {
743 /* Need to make a new request to memmgr */
744 int num_bufs;
745
746 /*
747 * We allocate local buffers in a context of their own, so that the
748 * space eaten for them is easily recognizable in MemoryContextStats
749 * output. Create the context on first use.
750 */
751 if (LocalBufferContext == NULL)
752 LocalBufferContext =
754 "LocalBufferContext",
756
757 /* Start with a 16-buffer request; subsequent ones double each time */
758 num_bufs = Max(num_bufs_in_block * 2, 16);
759 /* But not more than what we need for all remaining local bufs */
760 num_bufs = Min(num_bufs, NLocBuffer - total_bufs_allocated);
761 /* And don't overflow MaxAllocSize, either */
762 num_bufs = Min(num_bufs, MaxAllocSize / BLCKSZ);
763
764 /* Buffers should be I/O aligned. */
765 cur_block = (char *)
767 MemoryContextAlloc(LocalBufferContext,
768 num_bufs * BLCKSZ + PG_IO_ALIGN_SIZE));
769 next_buf_in_block = 0;
770 num_bufs_in_block = num_bufs;
771 }
772
773 /* Allocate next buffer in current memory block */
774 this_buf = cur_block + next_buf_in_block * BLCKSZ;
775 next_buf_in_block++;
776 total_bufs_allocated++;
777
778 return (Block) this_buf;
779}
780
781/*
782 * CheckForLocalBufferLeaks - ensure this backend holds no local buffer pins
783 *
784 * This is just like CheckForBufferLeaks(), but for local buffers.
785 */
786static void
788{
789#ifdef USE_ASSERT_CHECKING
790 if (LocalRefCount)
791 {
792 int RefCountErrors = 0;
793 int i;
794
795 for (i = 0; i < NLocBuffer; i++)
796 {
797 if (LocalRefCount[i] != 0)
798 {
799 Buffer b = -i - 1;
800 char *s;
801
803 elog(WARNING, "local buffer refcount leak: %s", s);
804 pfree(s);
805
806 RefCountErrors++;
807 }
808 }
809 Assert(RefCountErrors == 0);
810 }
811#endif
812}
813
814/*
815 * AtEOXact_LocalBuffers - clean up at end of transaction.
816 *
817 * This is just like AtEOXact_Buffers, but for local buffers.
818 */
819void
821{
823}
824
825/*
826 * AtProcExit_LocalBuffers - ensure we have dropped pins during backend exit.
827 *
828 * This is just like AtProcExit_Buffers, but for local buffers.
829 */
830void
832{
833 /*
834 * We shouldn't be holding any remaining pins; if we are, and assertions
835 * aren't enabled, we'll fail later in DropRelationBuffers while trying to
836 * drop the temp rels.
837 */
839}
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:295
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:239
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
#define MaxBlockNumber
Definition: block.h:35
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BM_MAX_USAGE_COUNT
Definition: buf_internals.h:77
static void InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blockNum)
#define BM_TAG_VALID
Definition: buf_internals.h:62
#define BUF_USAGECOUNT_MASK
Definition: buf_internals.h:44
static ForkNumber BufTagGetForkNum(const BufferTag *tag)
static bool BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
static bool BufTagMatchesRelFileLocator(const BufferTag *tag, const RelFileLocator *rlocator)
#define BUF_FLAG_MASK
Definition: buf_internals.h:47
#define BM_DIRTY
Definition: buf_internals.h:60
#define BM_JUST_DIRTIED
Definition: buf_internals.h:65
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:51
static void ClearBufferTag(BufferTag *tag)
static void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
struct buftag BufferTag
static void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:45
static RelFileLocator BufTagGetRelFileLocator(const BufferTag *tag)
#define BM_VALID
Definition: buf_internals.h:61
static BufferDesc * GetLocalBufferDescriptor(uint32 id)
static Buffer BufferDescriptorGetBuffer(const BufferDesc *bdesc)
bool track_io_timing
Definition: bufmgr.c:143
char * DebugPrintBufferRefcount(Buffer buffer)
Definition: bufmgr.c:3665
void * Block
Definition: bufmgr.h:25
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1531
Pointer Page
Definition: bufpage.h:81
#define Min(x, y)
Definition: c.h:961
#define TYPEALIGN(ALIGNVAL, LEN)
Definition: c.h:761
#define Max(x, y)
Definition: c.h:955
#define Assert(condition)
Definition: c.h:815
int32_t int32
Definition: c.h:484
uint64_t uint64
Definition: c.h:489
uint32_t uint32
Definition: c.h:488
#define MemSet(start, val, len)
Definition: c.h:977
#define fprintf(file, fmt, msg)
Definition: cubescan.l:21
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define FATAL
Definition: elog.h:41
#define WARNING
Definition: elog.h:36
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
int io_direct_flags
Definition: fd.c:167
#define IO_DIRECT_DATA
Definition: fd.h:54
#define MaxAllocSize
Definition: fe_memutils.h:22
ProcNumber MyProcNumber
Definition: globals.c:89
#define newval
#define GUC_check_errdetail
Definition: guc.h:476
GucSource
Definition: guc.h:108
@ PGC_S_TEST
Definition: guc.h:121
int num_temp_buffers
Definition: guc_tables.c:535
#define calloc(a, b)
Definition: header.h:55
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
#define IsParallelWorker()
Definition: parallel.h:60
BufferUsage pgBufferUsage
Definition: instrument.c:20
int b
Definition: isn.c:69
int i
Definition: isn.c:72
int32 * LocalRefCount
Definition: localbuf.c:46
void UnpinLocalBuffer(Buffer buffer)
Definition: localbuf.c:682
static HTAB * LocalBufHash
Definition: localbuf.c:50
static int NLocalPinnedBuffers
Definition: localbuf.c:53
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:820
#define LocalBufHdrGetBlock(bufHdr)
Definition: localbuf.c:39
static void CheckForLocalBufferLeaks(void)
Definition: localbuf.c:787
void DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber firstDelBlock)
Definition: localbuf.c:490
static Block GetLocalBufferStorage(void)
Definition: localbuf.c:729
static int nextFreeLocalBufId
Definition: localbuf.c:48
bool check_temp_buffers(int *newval, void **extra, GucSource source)
Definition: localbuf.c:705
void AtProcExit_LocalBuffers(void)
Definition: localbuf.c:831
bool PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
Definition: localbuf.c:656
static void InitLocalBuffers(void)
Definition: localbuf.c:581
void LimitAdditionalLocalPins(uint32 *additional_pins)
Definition: localbuf.c:291
static Buffer GetLocalVictimBuffer(void)
Definition: localbuf.c:177
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:450
void DropRelationAllLocalBuffers(RelFileLocator rlocator)
Definition: localbuf.c:538
int NLocBuffer
Definition: localbuf.c:42
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:69
BlockNumber ExtendBufferedRelLocal(BufferManagerRelation bmr, ForkNumber fork, uint32 flags, uint32 extend_by, BlockNumber extend_upto, Buffer *buffers, uint32 *extended_by)
Definition: localbuf.c:314
Block * LocalBufferBlockPointers
Definition: localbuf.c:45
void UnpinLocalBufferNoOwner(Buffer buffer)
Definition: localbuf.c:689
BufferDesc * LocalBufferDescriptors
Definition: localbuf.c:44
BufferDesc * LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, bool *foundPtr)
Definition: localbuf.c:116
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1181
void pfree(void *pointer)
Definition: mcxt.c:1521
MemoryContext TopMemoryContext
Definition: mcxt.c:149
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
#define PG_IO_ALIGN_SIZE
static rewind_source * source
Definition: pg_rewind.c:89
static char * buf
Definition: pg_test_fsync.c:72
@ IOOBJECT_TEMP_RELATION
Definition: pgstat.h:276
@ IOCONTEXT_NORMAL
Definition: pgstat.h:285
@ IOOP_EXTEND
Definition: pgstat.h:310
@ IOOP_EVICT
Definition: pgstat.h:303
@ IOOP_WRITE
Definition: pgstat.h:312
instr_time pgstat_prepare_io_time(bool track_io_guc)
Definition: pgstat_io.c:96
void pgstat_count_io_op(IOObject io_object, IOContext io_context, IOOp io_op, uint32 cnt, uint64 bytes)
Definition: pgstat_io.c:68
void pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op, instr_time start_time, uint32 cnt, uint64 bytes)
Definition: pgstat_io.c:118
ForkNumber
Definition: relpath.h:56
#define relpath(rlocator, forknum)
Definition: relpath.h:102
#define relpathbackend(rlocator, backend, forknum)
Definition: relpath.h:93
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition: resowner.c:442
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:677
SMgrRelation smgropen(RelFileLocator rlocator, ProcNumber backend)
Definition: smgr.c:201
void smgrzeroextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, int nblocks, bool skipFsync)
Definition: smgr.c:563
bool smgrprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, int nblocks)
Definition: smgr.c:588
static void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const void *buffer, bool skipFsync)
Definition: smgr.h:124
BufferTag tag
pg_atomic_uint32 state
struct SMgrRelationData * smgr
Definition: bufmgr.h:103
int64 local_blks_written
Definition: instrument.h:33
int64 local_blks_dirtied
Definition: instrument.h:32
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
Definition: dynahash.c:220
Buffer recent_buffer
Definition: bufmgr.h:60
RelFileLocator locator
RelFileLocatorBackend smgr_rlocator
Definition: smgr.h:37
BlockNumber blockNum
Definition: buf_internals.h:97