PostgreSQL Source Code  git master
localbuf.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * localbuf.c
4  * local buffer manager. Fast buffer manager for temporary tables,
5  * which never need to be WAL-logged or checkpointed, etc.
6  *
7  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994-5, Regents of the University of California
9  *
10  *
11  * IDENTIFICATION
12  * src/backend/storage/buffer/localbuf.c
13  *
14  *-------------------------------------------------------------------------
15  */
16 #include "postgres.h"
17 
18 #include "access/parallel.h"
19 #include "catalog/catalog.h"
20 #include "executor/instrument.h"
21 #include "pgstat.h"
22 #include "storage/buf_internals.h"
23 #include "storage/bufmgr.h"
24 #include "utils/guc_hooks.h"
25 #include "utils/memutils.h"
26 #include "utils/resowner_private.h"
27 
28 
29 /*#define LBDEBUG*/
30 
31 /* entry for buffer lookup hashtable */
32 typedef struct
33 {
34  BufferTag key; /* Tag of a disk page */
35  int id; /* Associated local buffer's index */
37 
38 /* Note: this macro only works on local buffers, not shared ones! */
39 #define LocalBufHdrGetBlock(bufHdr) \
40  LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
41 
42 int NLocBuffer = 0; /* until buffers are initialized */
43 
47 
48 static int nextFreeLocalBufId = 0;
49 
50 static HTAB *LocalBufHash = NULL;
51 
52 /* number of local buffers pinned at least once */
53 static int NLocalPinnedBuffers = 0;
54 
55 
56 static void InitLocalBuffers(void);
57 static Block GetLocalBufferStorage(void);
58 static Buffer GetLocalVictimBuffer(void);
59 
60 
61 /*
62  * PrefetchLocalBuffer -
63  * initiate asynchronous read of a block of a relation
64  *
65  * Do PrefetchBuffer's work for temporary relations.
66  * No-op if prefetching isn't compiled in.
67  */
70  BlockNumber blockNum)
71 {
72  PrefetchBufferResult result = {InvalidBuffer, false};
73  BufferTag newTag; /* identity of requested block */
74  LocalBufferLookupEnt *hresult;
75 
76  InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
77 
78  /* Initialize local buffers if first request in this session */
79  if (LocalBufHash == NULL)
81 
82  /* See if the desired buffer already exists */
83  hresult = (LocalBufferLookupEnt *)
84  hash_search(LocalBufHash, &newTag, HASH_FIND, NULL);
85 
86  if (hresult)
87  {
88  /* Yes, so nothing to do */
89  result.recent_buffer = -hresult->id - 1;
90  }
91  else
92  {
93 #ifdef USE_PREFETCH
94  /* Not in buffers, so initiate prefetch */
95  if ((io_direct_flags & IO_DIRECT_DATA) == 0 &&
96  smgrprefetch(smgr, forkNum, blockNum))
97  {
98  result.initiated_io = true;
99  }
100 #endif /* USE_PREFETCH */
101  }
102 
103  return result;
104 }
105 
106 
107 /*
108  * LocalBufferAlloc -
109  * Find or create a local buffer for the given page of the given relation.
110  *
111  * API is similar to bufmgr.c's BufferAlloc, except that we do not need
112  * to do any locking since this is all local. Also, IO_IN_PROGRESS
113  * does not get set. Lastly, we support only default access strategy
114  * (hence, usage_count is always advanced).
115  */
116 BufferDesc *
118  bool *foundPtr)
119 {
120  BufferTag newTag; /* identity of requested block */
121  LocalBufferLookupEnt *hresult;
122  BufferDesc *bufHdr;
123  Buffer victim_buffer;
124  int bufid;
125  bool found;
126 
127  InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
128 
129  /* Initialize local buffers if first request in this session */
130  if (LocalBufHash == NULL)
132 
133  /* See if the desired buffer already exists */
134  hresult = (LocalBufferLookupEnt *)
135  hash_search(LocalBufHash, &newTag, HASH_FIND, NULL);
136 
137  if (hresult)
138  {
139  bufid = hresult->id;
140  bufHdr = GetLocalBufferDescriptor(bufid);
141  Assert(BufferTagsEqual(&bufHdr->tag, &newTag));
142 
143  *foundPtr = PinLocalBuffer(bufHdr, true);
144  }
145  else
146  {
147  uint32 buf_state;
148 
149  victim_buffer = GetLocalVictimBuffer();
150  bufid = -victim_buffer - 1;
151  bufHdr = GetLocalBufferDescriptor(bufid);
152 
153  hresult = (LocalBufferLookupEnt *)
154  hash_search(LocalBufHash, &newTag, HASH_ENTER, &found);
155  if (found) /* shouldn't happen */
156  elog(ERROR, "local buffer hash table corrupted");
157  hresult->id = bufid;
158 
159  /*
160  * it's all ours now.
161  */
162  bufHdr->tag = newTag;
163 
164  buf_state = pg_atomic_read_u32(&bufHdr->state);
165  buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
166  buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
167  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
168 
169  *foundPtr = false;
170  }
171 
172  return bufHdr;
173 }
174 
175 static Buffer
177 {
178  int victim_bufid;
179  int trycounter;
180  uint32 buf_state;
181  BufferDesc *bufHdr;
182 
184 
185  /*
186  * Need to get a new buffer. We use a clock sweep algorithm (essentially
187  * the same as what freelist.c does now...)
188  */
189  trycounter = NLocBuffer;
190  for (;;)
191  {
192  victim_bufid = nextFreeLocalBufId;
193 
195  nextFreeLocalBufId = 0;
196 
197  bufHdr = GetLocalBufferDescriptor(victim_bufid);
198 
199  if (LocalRefCount[victim_bufid] == 0)
200  {
201  buf_state = pg_atomic_read_u32(&bufHdr->state);
202 
203  if (BUF_STATE_GET_USAGECOUNT(buf_state) > 0)
204  {
205  buf_state -= BUF_USAGECOUNT_ONE;
206  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
207  trycounter = NLocBuffer;
208  }
209  else
210  {
211  /* Found a usable buffer */
212  PinLocalBuffer(bufHdr, false);
213  break;
214  }
215  }
216  else if (--trycounter == 0)
217  ereport(ERROR,
218  (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
219  errmsg("no empty local buffer available")));
220  }
221 
222  /*
223  * lazy memory allocation: allocate space on first use of a buffer.
224  */
225  if (LocalBufHdrGetBlock(bufHdr) == NULL)
226  {
227  /* Set pointer for use by BufferGetBlock() macro */
229  }
230 
231  /*
232  * this buffer is not referenced but it might still be dirty. if that's
233  * the case, write it out before reusing it!
234  */
235  if (buf_state & BM_DIRTY)
236  {
237  instr_time io_start;
238  SMgrRelation oreln;
239  Page localpage = (char *) LocalBufHdrGetBlock(bufHdr);
240 
241  /* Find smgr relation for buffer */
242  oreln = smgropen(BufTagGetRelFileLocator(&bufHdr->tag), MyBackendId);
243 
244  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
245 
246  io_start = pgstat_prepare_io_time();
247 
248  /* And write... */
249  smgrwrite(oreln,
250  BufTagGetForkNum(&bufHdr->tag),
251  bufHdr->tag.blockNum,
252  localpage,
253  false);
254 
255  /* Temporary table I/O does not use Buffer Access Strategies */
257  IOOP_WRITE, io_start, 1);
258 
259  /* Mark not-dirty now in case we error out below */
260  buf_state &= ~BM_DIRTY;
261  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
262 
264  }
265 
266  /*
267  * Remove the victim buffer from the hashtable and mark as invalid.
268  */
269  if (buf_state & BM_TAG_VALID)
270  {
271  LocalBufferLookupEnt *hresult;
272 
273  hresult = (LocalBufferLookupEnt *)
274  hash_search(LocalBufHash, &bufHdr->tag, HASH_REMOVE, NULL);
275  if (!hresult) /* shouldn't happen */
276  elog(ERROR, "local buffer hash table corrupted");
277  /* mark buffer invalid just in case hash insert fails */
278  ClearBufferTag(&bufHdr->tag);
279  buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
280  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
282  }
283 
284  return BufferDescriptorGetBuffer(bufHdr);
285 }
286 
287 /* see LimitAdditionalPins() */
288 static void
290 {
291  uint32 max_pins;
292 
293  if (*additional_pins <= 1)
294  return;
295 
296  /*
297  * In contrast to LimitAdditionalPins() other backends don't play a role
298  * here. We can allow up to NLocBuffer pins in total.
299  */
300  max_pins = (NLocBuffer - NLocalPinnedBuffers);
301 
302  if (*additional_pins >= max_pins)
303  *additional_pins = max_pins;
304 }
305 
306 /*
307  * Implementation of ExtendBufferedRelBy() and ExtendBufferedRelTo() for
308  * temporary buffers.
309  */
312  ForkNumber fork,
313  uint32 flags,
314  uint32 extend_by,
315  BlockNumber extend_upto,
316  Buffer *buffers,
317  uint32 *extended_by)
318 {
319  BlockNumber first_block;
320  instr_time io_start;
321 
322  /* Initialize local buffers if first request in this session */
323  if (LocalBufHash == NULL)
325 
326  LimitAdditionalLocalPins(&extend_by);
327 
328  for (uint32 i = 0; i < extend_by; i++)
329  {
330  BufferDesc *buf_hdr;
331  Block buf_block;
332 
333  buffers[i] = GetLocalVictimBuffer();
334  buf_hdr = GetLocalBufferDescriptor(-buffers[i] - 1);
335  buf_block = LocalBufHdrGetBlock(buf_hdr);
336 
337  /* new buffers are zero-filled */
338  MemSet((char *) buf_block, 0, BLCKSZ);
339  }
340 
341  first_block = smgrnblocks(bmr.smgr, fork);
342 
343  if (extend_upto != InvalidBlockNumber)
344  {
345  /*
346  * In contrast to shared relations, nothing could change the relation
347  * size concurrently. Thus we shouldn't end up finding that we don't
348  * need to do anything.
349  */
350  Assert(first_block <= extend_upto);
351 
352  Assert((uint64) first_block + extend_by <= extend_upto);
353  }
354 
355  /* Fail if relation is already at maximum possible length */
356  if ((uint64) first_block + extend_by >= MaxBlockNumber)
357  ereport(ERROR,
358  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
359  errmsg("cannot extend relation %s beyond %u blocks",
360  relpath(bmr.smgr->smgr_rlocator, fork),
361  MaxBlockNumber)));
362 
363  for (uint32 i = 0; i < extend_by; i++)
364  {
365  int victim_buf_id;
366  BufferDesc *victim_buf_hdr;
367  BufferTag tag;
368  LocalBufferLookupEnt *hresult;
369  bool found;
370 
371  victim_buf_id = -buffers[i] - 1;
372  victim_buf_hdr = GetLocalBufferDescriptor(victim_buf_id);
373 
374  InitBufferTag(&tag, &bmr.smgr->smgr_rlocator.locator, fork, first_block + i);
375 
376  hresult = (LocalBufferLookupEnt *)
377  hash_search(LocalBufHash, (void *) &tag, HASH_ENTER, &found);
378  if (found)
379  {
380  BufferDesc *existing_hdr = GetLocalBufferDescriptor(hresult->id);
381  uint32 buf_state;
382 
384 
385  existing_hdr = GetLocalBufferDescriptor(hresult->id);
386  PinLocalBuffer(existing_hdr, false);
387  buffers[i] = BufferDescriptorGetBuffer(existing_hdr);
388 
389  buf_state = pg_atomic_read_u32(&existing_hdr->state);
390  Assert(buf_state & BM_TAG_VALID);
391  Assert(!(buf_state & BM_DIRTY));
392  buf_state &= BM_VALID;
393  pg_atomic_unlocked_write_u32(&existing_hdr->state, buf_state);
394  }
395  else
396  {
397  uint32 buf_state = pg_atomic_read_u32(&victim_buf_hdr->state);
398 
399  Assert(!(buf_state & (BM_VALID | BM_TAG_VALID | BM_DIRTY | BM_JUST_DIRTIED)));
400 
401  victim_buf_hdr->tag = tag;
402 
403  buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
404 
405  pg_atomic_unlocked_write_u32(&victim_buf_hdr->state, buf_state);
406 
407  hresult->id = victim_buf_id;
408  }
409  }
410 
411  io_start = pgstat_prepare_io_time();
412 
413  /* actually extend relation */
414  smgrzeroextend(bmr.smgr, fork, first_block, extend_by, false);
415 
417  io_start, extend_by);
418 
419  for (uint32 i = 0; i < extend_by; i++)
420  {
421  Buffer buf = buffers[i];
422  BufferDesc *buf_hdr;
423  uint32 buf_state;
424 
425  buf_hdr = GetLocalBufferDescriptor(-buf - 1);
426 
427  buf_state = pg_atomic_read_u32(&buf_hdr->state);
428  buf_state |= BM_VALID;
429  pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
430  }
431 
432  *extended_by = extend_by;
433 
434  pgBufferUsage.local_blks_written += extend_by;
435 
436  return first_block;
437 }
438 
439 /*
440  * MarkLocalBufferDirty -
441  * mark a local buffer dirty
442  */
443 void
445 {
446  int bufid;
447  BufferDesc *bufHdr;
448  uint32 buf_state;
449 
450  Assert(BufferIsLocal(buffer));
451 
452 #ifdef LBDEBUG
453  fprintf(stderr, "LB DIRTY %d\n", buffer);
454 #endif
455 
456  bufid = -buffer - 1;
457 
458  Assert(LocalRefCount[bufid] > 0);
459 
460  bufHdr = GetLocalBufferDescriptor(bufid);
461 
462  buf_state = pg_atomic_read_u32(&bufHdr->state);
463 
464  if (!(buf_state & BM_DIRTY))
466 
467  buf_state |= BM_DIRTY;
468 
469  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
470 }
471 
472 /*
473  * DropRelationLocalBuffers
474  * This function removes from the buffer pool all the pages of the
475  * specified relation that have block numbers >= firstDelBlock.
476  * (In particular, with firstDelBlock = 0, all pages are removed.)
477  * Dirty pages are simply dropped, without bothering to write them
478  * out first. Therefore, this is NOT rollback-able, and so should be
479  * used only with extreme caution!
480  *
481  * See DropRelationBuffers in bufmgr.c for more notes.
482  */
483 void
485  BlockNumber firstDelBlock)
486 {
487  int i;
488 
489  for (i = 0; i < NLocBuffer; i++)
490  {
492  LocalBufferLookupEnt *hresult;
493  uint32 buf_state;
494 
495  buf_state = pg_atomic_read_u32(&bufHdr->state);
496 
497  if ((buf_state & BM_TAG_VALID) &&
498  BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
499  BufTagGetForkNum(&bufHdr->tag) == forkNum &&
500  bufHdr->tag.blockNum >= firstDelBlock)
501  {
502  if (LocalRefCount[i] != 0)
503  elog(ERROR, "block %u of %s is still referenced (local %u)",
504  bufHdr->tag.blockNum,
506  MyBackendId,
507  BufTagGetForkNum(&bufHdr->tag)),
508  LocalRefCount[i]);
509 
510  /* Remove entry from hashtable */
511  hresult = (LocalBufferLookupEnt *)
512  hash_search(LocalBufHash, &bufHdr->tag, HASH_REMOVE, NULL);
513  if (!hresult) /* shouldn't happen */
514  elog(ERROR, "local buffer hash table corrupted");
515  /* Mark buffer invalid */
516  ClearBufferTag(&bufHdr->tag);
517  buf_state &= ~BUF_FLAG_MASK;
518  buf_state &= ~BUF_USAGECOUNT_MASK;
519  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
520  }
521  }
522 }
523 
524 /*
525  * DropRelationAllLocalBuffers
526  * This function removes from the buffer pool all pages of all forks
527  * of the specified relation.
528  *
529  * See DropRelationsAllBuffers in bufmgr.c for more notes.
530  */
531 void
533 {
534  int i;
535 
536  for (i = 0; i < NLocBuffer; i++)
537  {
539  LocalBufferLookupEnt *hresult;
540  uint32 buf_state;
541 
542  buf_state = pg_atomic_read_u32(&bufHdr->state);
543 
544  if ((buf_state & BM_TAG_VALID) &&
545  BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator))
546  {
547  if (LocalRefCount[i] != 0)
548  elog(ERROR, "block %u of %s is still referenced (local %u)",
549  bufHdr->tag.blockNum,
551  MyBackendId,
552  BufTagGetForkNum(&bufHdr->tag)),
553  LocalRefCount[i]);
554  /* Remove entry from hashtable */
555  hresult = (LocalBufferLookupEnt *)
556  hash_search(LocalBufHash, &bufHdr->tag, HASH_REMOVE, NULL);
557  if (!hresult) /* shouldn't happen */
558  elog(ERROR, "local buffer hash table corrupted");
559  /* Mark buffer invalid */
560  ClearBufferTag(&bufHdr->tag);
561  buf_state &= ~BUF_FLAG_MASK;
562  buf_state &= ~BUF_USAGECOUNT_MASK;
563  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
564  }
565  }
566 }
567 
568 /*
569  * InitLocalBuffers -
570  * init the local buffer cache. Since most queries (esp. multi-user ones)
571  * don't involve local buffers, we delay allocating actual memory for the
572  * buffers until we need them; just make the buffer headers here.
573  */
574 static void
576 {
577  int nbufs = num_temp_buffers;
578  HASHCTL info;
579  int i;
580 
581  /*
582  * Parallel workers can't access data in temporary tables, because they
583  * have no visibility into the local buffers of their leader. This is a
584  * convenient, low-cost place to provide a backstop check for that. Note
585  * that we don't wish to prevent a parallel worker from accessing catalog
586  * metadata about a temp table, so checks at higher levels would be
587  * inappropriate.
588  */
589  if (IsParallelWorker())
590  ereport(ERROR,
591  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
592  errmsg("cannot access temporary tables during a parallel operation")));
593 
594  /* Allocate and zero buffer headers and auxiliary arrays */
595  LocalBufferDescriptors = (BufferDesc *) calloc(nbufs, sizeof(BufferDesc));
596  LocalBufferBlockPointers = (Block *) calloc(nbufs, sizeof(Block));
597  LocalRefCount = (int32 *) calloc(nbufs, sizeof(int32));
599  ereport(FATAL,
600  (errcode(ERRCODE_OUT_OF_MEMORY),
601  errmsg("out of memory")));
602 
603  nextFreeLocalBufId = 0;
604 
605  /* initialize fields that need to start off nonzero */
606  for (i = 0; i < nbufs; i++)
607  {
609 
610  /*
611  * negative to indicate local buffer. This is tricky: shared buffers
612  * start with 0. We have to start with -2. (Note that the routine
613  * BufferDescriptorGetBuffer adds 1 to buf_id so our first buffer id
614  * is -1.)
615  */
616  buf->buf_id = -i - 2;
617 
618  /*
619  * Intentionally do not initialize the buffer's atomic variable
620  * (besides zeroing the underlying memory above). That way we get
621  * errors on platforms without atomics, if somebody (re-)introduces
622  * atomic operations for local buffers.
623  */
624  }
625 
626  /* Create the lookup hash table */
627  info.keysize = sizeof(BufferTag);
628  info.entrysize = sizeof(LocalBufferLookupEnt);
629 
630  LocalBufHash = hash_create("Local Buffer Lookup Table",
631  nbufs,
632  &info,
634 
635  if (!LocalBufHash)
636  elog(ERROR, "could not initialize local buffer hash table");
637 
638  /* Initialization done, mark buffers allocated */
639  NLocBuffer = nbufs;
640 }
641 
642 /*
643  * XXX: We could have a slightly more efficient version of PinLocalBuffer()
644  * that does not support adjusting the usagecount - but so far it does not
645  * seem worth the trouble.
646  */
647 bool
648 PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
649 {
650  uint32 buf_state;
651  Buffer buffer = BufferDescriptorGetBuffer(buf_hdr);
652  int bufid = -buffer - 1;
653 
654  buf_state = pg_atomic_read_u32(&buf_hdr->state);
655 
656  if (LocalRefCount[bufid] == 0)
657  {
659  if (adjust_usagecount &&
661  {
662  buf_state += BUF_USAGECOUNT_ONE;
663  pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
664  }
665  }
666  LocalRefCount[bufid]++;
668  BufferDescriptorGetBuffer(buf_hdr));
669 
670  return buf_state & BM_VALID;
671 }
672 
673 void
675 {
676  int buffid = -buffer - 1;
677 
678  Assert(BufferIsLocal(buffer));
679  Assert(LocalRefCount[buffid] > 0);
681 
683  if (--LocalRefCount[buffid] == 0)
685 }
686 
687 /*
688  * GUC check_hook for temp_buffers
689  */
690 bool
692 {
693  /*
694  * Once local buffers have been initialized, it's too late to change this.
695  * However, if this is only a test call, allow it.
696  */
697  if (source != PGC_S_TEST && NLocBuffer && NLocBuffer != *newval)
698  {
699  GUC_check_errdetail("\"temp_buffers\" cannot be changed after any temporary tables have been accessed in the session.");
700  return false;
701  }
702  return true;
703 }
704 
705 /*
706  * GetLocalBufferStorage - allocate memory for a local buffer
707  *
708  * The idea of this function is to aggregate our requests for storage
709  * so that the memory manager doesn't see a whole lot of relatively small
710  * requests. Since we'll never give back a local buffer once it's created
711  * within a particular process, no point in burdening memmgr with separately
712  * managed chunks.
713  */
714 static Block
716 {
717  static char *cur_block = NULL;
718  static int next_buf_in_block = 0;
719  static int num_bufs_in_block = 0;
720  static int total_bufs_allocated = 0;
721  static MemoryContext LocalBufferContext = NULL;
722 
723  char *this_buf;
724 
725  Assert(total_bufs_allocated < NLocBuffer);
726 
727  if (next_buf_in_block >= num_bufs_in_block)
728  {
729  /* Need to make a new request to memmgr */
730  int num_bufs;
731 
732  /*
733  * We allocate local buffers in a context of their own, so that the
734  * space eaten for them is easily recognizable in MemoryContextStats
735  * output. Create the context on first use.
736  */
737  if (LocalBufferContext == NULL)
738  LocalBufferContext =
740  "LocalBufferContext",
742 
743  /* Start with a 16-buffer request; subsequent ones double each time */
744  num_bufs = Max(num_bufs_in_block * 2, 16);
745  /* But not more than what we need for all remaining local bufs */
746  num_bufs = Min(num_bufs, NLocBuffer - total_bufs_allocated);
747  /* And don't overflow MaxAllocSize, either */
748  num_bufs = Min(num_bufs, MaxAllocSize / BLCKSZ);
749 
750  /* Buffers should be I/O aligned. */
751  cur_block = (char *)
753  MemoryContextAlloc(LocalBufferContext,
754  num_bufs * BLCKSZ + PG_IO_ALIGN_SIZE));
755  next_buf_in_block = 0;
756  num_bufs_in_block = num_bufs;
757  }
758 
759  /* Allocate next buffer in current memory block */
760  this_buf = cur_block + next_buf_in_block * BLCKSZ;
761  next_buf_in_block++;
762  total_bufs_allocated++;
763 
764  return (Block) this_buf;
765 }
766 
767 /*
768  * CheckForLocalBufferLeaks - ensure this backend holds no local buffer pins
769  *
770  * This is just like CheckForBufferLeaks(), but for local buffers.
771  */
772 static void
774 {
775 #ifdef USE_ASSERT_CHECKING
776  if (LocalRefCount)
777  {
778  int RefCountErrors = 0;
779  int i;
780 
781  for (i = 0; i < NLocBuffer; i++)
782  {
783  if (LocalRefCount[i] != 0)
784  {
785  Buffer b = -i - 1;
786 
788  RefCountErrors++;
789  }
790  }
791  Assert(RefCountErrors == 0);
792  }
793 #endif
794 }
795 
796 /*
797  * AtEOXact_LocalBuffers - clean up at end of transaction.
798  *
799  * This is just like AtEOXact_Buffers, but for local buffers.
800  */
801 void
802 AtEOXact_LocalBuffers(bool isCommit)
803 {
805 }
806 
807 /*
808  * AtProcExit_LocalBuffers - ensure we have dropped pins during backend exit.
809  *
810  * This is just like AtProcExit_Buffers, but for local buffers.
811  */
812 void
814 {
815  /*
816  * We shouldn't be holding any remaining pins; if we are, and assertions
817  * aren't enabled, we'll fail later in DropRelationBuffers while trying to
818  * drop the temp rels.
819  */
821 }
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:272
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:236
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
#define MaxBlockNumber
Definition: block.h:35
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BM_MAX_USAGE_COUNT
Definition: buf_internals.h:77
static void InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blockNum)
#define BM_TAG_VALID
Definition: buf_internals.h:62
#define BUF_USAGECOUNT_MASK
Definition: buf_internals.h:44
static ForkNumber BufTagGetForkNum(const BufferTag *tag)
static BufferDesc * GetLocalBufferDescriptor(uint32 id)
static bool BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
static bool BufTagMatchesRelFileLocator(const BufferTag *tag, const RelFileLocator *rlocator)
#define BUF_FLAG_MASK
Definition: buf_internals.h:47
#define BM_DIRTY
Definition: buf_internals.h:60
#define BM_JUST_DIRTIED
Definition: buf_internals.h:65
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:51
static void ClearBufferTag(BufferTag *tag)
struct buftag BufferTag
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:45
static RelFileLocator BufTagGetRelFileLocator(const BufferTag *tag)
#define BM_VALID
Definition: buf_internals.h:61
static Buffer BufferDescriptorGetBuffer(const BufferDesc *bdesc)
void PrintBufferLeakWarning(Buffer buffer)
Definition: bufmgr.c:3232
void * Block
Definition: bufmgr.h:24
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1542
Pointer Page
Definition: bufpage.h:78
unsigned int uint32
Definition: c.h:495
#define Min(x, y)
Definition: c.h:993
#define TYPEALIGN(ALIGNVAL, LEN)
Definition: c.h:793
signed int int32
Definition: c.h:483
#define Max(x, y)
Definition: c.h:987
#define MemSet(start, val, len)
Definition: c.h:1009
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:953
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:350
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define FATAL
Definition: elog.h:41
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int io_direct_flags
Definition: fd.c:168
#define IO_DIRECT_DATA
Definition: fd.h:52
BackendId MyBackendId
Definition: globals.c:85
#define newval
#define GUC_check_errdetail
Definition: guc.h:436
GucSource
Definition: guc.h:108
@ PGC_S_TEST
Definition: guc.h:121
int num_temp_buffers
Definition: guc_tables.c:533
#define calloc(a, b)
Definition: header.h:55
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
#define IsParallelWorker()
Definition: parallel.h:61
BufferUsage pgBufferUsage
Definition: instrument.c:20
int b
Definition: isn.c:70
int i
Definition: isn.c:73
Assert(fmt[strlen(fmt) - 1] !='\n')
int32 * LocalRefCount
Definition: localbuf.c:46
void UnpinLocalBuffer(Buffer buffer)
Definition: localbuf.c:674
BufferDesc * LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, bool *foundPtr)
Definition: localbuf.c:117
static HTAB * LocalBufHash
Definition: localbuf.c:50
static int NLocalPinnedBuffers
Definition: localbuf.c:53
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:802
#define LocalBufHdrGetBlock(bufHdr)
Definition: localbuf.c:39
static void CheckForLocalBufferLeaks(void)
Definition: localbuf.c:773
void DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber firstDelBlock)
Definition: localbuf.c:484
static Block GetLocalBufferStorage(void)
Definition: localbuf.c:715
static void LimitAdditionalLocalPins(uint32 *additional_pins)
Definition: localbuf.c:289
static int nextFreeLocalBufId
Definition: localbuf.c:48
bool check_temp_buffers(int *newval, void **extra, GucSource source)
Definition: localbuf.c:691
void AtProcExit_LocalBuffers(void)
Definition: localbuf.c:813
bool PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
Definition: localbuf.c:648
static void InitLocalBuffers(void)
Definition: localbuf.c:575
static Buffer GetLocalVictimBuffer(void)
Definition: localbuf.c:176
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:444
void DropRelationAllLocalBuffers(RelFileLocator rlocator)
Definition: localbuf.c:532
int NLocBuffer
Definition: localbuf.c:42
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:69
BlockNumber ExtendBufferedRelLocal(BufferManagerRelation bmr, ForkNumber fork, uint32 flags, uint32 extend_by, BlockNumber extend_upto, Buffer *buffers, uint32 *extended_by)
Definition: localbuf.c:311
Block * LocalBufferBlockPointers
Definition: localbuf.c:45
BufferDesc * LocalBufferDescriptors
Definition: localbuf.c:44
MemoryContext TopMemoryContext
Definition: mcxt.c:141
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1021
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:153
#define MaxAllocSize
Definition: memutils.h:40
#define PG_IO_ALIGN_SIZE
static rewind_source * source
Definition: pg_rewind.c:89
static char * buf
Definition: pg_test_fsync.c:67
@ IOOBJECT_TEMP_RELATION
Definition: pgstat.h:279
@ IOCONTEXT_NORMAL
Definition: pgstat.h:288
@ IOOP_EXTEND
Definition: pgstat.h:297
@ IOOP_EVICT
Definition: pgstat.h:296
@ IOOP_WRITE
Definition: pgstat.h:302
void pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op, instr_time start_time, uint32 cnt)
Definition: pgstat_io.c:112
instr_time pgstat_prepare_io_time(void)
Definition: pgstat_io.c:96
void pgstat_count_io_op(IOObject io_object, IOContext io_context, IOOp io_op)
Definition: pgstat_io.c:77
#define fprintf
Definition: port.h:242
ForkNumber
Definition: relpath.h:48
#define relpath(rlocator, forknum)
Definition: relpath.h:94
#define relpathbackend(rlocator, backend, forknum)
Definition: relpath.h:85
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:994
ResourceOwner CurrentResourceOwner
Definition: resowner.c:147
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:972
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:985
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:609
bool smgrprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
Definition: smgr.c:548
void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const void *buffer, bool skipFsync)
Definition: smgr.c:584
SMgrRelation smgropen(RelFileLocator rlocator, BackendId backend)
Definition: smgr.c:150
void smgrzeroextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, int nblocks, bool skipFsync)
Definition: smgr.c:523
BufferTag tag
pg_atomic_uint32 state
struct SMgrRelationData * smgr
Definition: bufmgr.h:102
int64 local_blks_written
Definition: instrument.h:33
int64 local_blks_dirtied
Definition: instrument.h:32
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
Definition: dynahash.c:220
Buffer recent_buffer
Definition: bufmgr.h:59
RelFileLocator locator
RelFileLocatorBackend smgr_rlocator
Definition: smgr.h:42
BlockNumber blockNum
Definition: buf_internals.h:97