PostgreSQL Source Code  git master
localbuf.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * localbuf.c
4  * local buffer manager. Fast buffer manager for temporary tables,
5  * which never need to be WAL-logged or checkpointed, etc.
6  *
7  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994-5, Regents of the University of California
9  *
10  *
11  * IDENTIFICATION
12  * src/backend/storage/buffer/localbuf.c
13  *
14  *-------------------------------------------------------------------------
15  */
16 #include "postgres.h"
17 
18 #include "access/parallel.h"
19 #include "catalog/catalog.h"
20 #include "executor/instrument.h"
21 #include "pgstat.h"
22 #include "storage/buf_internals.h"
23 #include "storage/bufmgr.h"
24 #include "storage/fd.h"
25 #include "utils/guc_hooks.h"
26 #include "utils/memutils.h"
27 #include "utils/resowner.h"
28 
29 
30 /*#define LBDEBUG*/
31 
32 /* entry for buffer lookup hashtable */
33 typedef struct
34 {
35  BufferTag key; /* Tag of a disk page */
36  int id; /* Associated local buffer's index */
38 
39 /* Note: this macro only works on local buffers, not shared ones! */
40 #define LocalBufHdrGetBlock(bufHdr) \
41  LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
42 
43 int NLocBuffer = 0; /* until buffers are initialized */
44 
48 
49 static int nextFreeLocalBufId = 0;
50 
51 static HTAB *LocalBufHash = NULL;
52 
53 /* number of local buffers pinned at least once */
54 static int NLocalPinnedBuffers = 0;
55 
56 
57 static void InitLocalBuffers(void);
58 static Block GetLocalBufferStorage(void);
59 static Buffer GetLocalVictimBuffer(void);
60 
61 
62 /*
63  * PrefetchLocalBuffer -
64  * initiate asynchronous read of a block of a relation
65  *
66  * Do PrefetchBuffer's work for temporary relations.
67  * No-op if prefetching isn't compiled in.
68  */
71  BlockNumber blockNum)
72 {
73  PrefetchBufferResult result = {InvalidBuffer, false};
74  BufferTag newTag; /* identity of requested block */
75  LocalBufferLookupEnt *hresult;
76 
77  InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
78 
79  /* Initialize local buffers if first request in this session */
80  if (LocalBufHash == NULL)
82 
83  /* See if the desired buffer already exists */
84  hresult = (LocalBufferLookupEnt *)
85  hash_search(LocalBufHash, &newTag, HASH_FIND, NULL);
86 
87  if (hresult)
88  {
89  /* Yes, so nothing to do */
90  result.recent_buffer = -hresult->id - 1;
91  }
92  else
93  {
94 #ifdef USE_PREFETCH
95  /* Not in buffers, so initiate prefetch */
96  if ((io_direct_flags & IO_DIRECT_DATA) == 0 &&
97  smgrprefetch(smgr, forkNum, blockNum, 1))
98  {
99  result.initiated_io = true;
100  }
101 #endif /* USE_PREFETCH */
102  }
103 
104  return result;
105 }
106 
107 
108 /*
109  * LocalBufferAlloc -
110  * Find or create a local buffer for the given page of the given relation.
111  *
112  * API is similar to bufmgr.c's BufferAlloc, except that we do not need
113  * to do any locking since this is all local. Also, IO_IN_PROGRESS
114  * does not get set. Lastly, we support only default access strategy
115  * (hence, usage_count is always advanced).
116  */
117 BufferDesc *
119  bool *foundPtr)
120 {
121  BufferTag newTag; /* identity of requested block */
122  LocalBufferLookupEnt *hresult;
123  BufferDesc *bufHdr;
124  Buffer victim_buffer;
125  int bufid;
126  bool found;
127 
128  InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
129 
130  /* Initialize local buffers if first request in this session */
131  if (LocalBufHash == NULL)
133 
135 
136  /* See if the desired buffer already exists */
137  hresult = (LocalBufferLookupEnt *)
138  hash_search(LocalBufHash, &newTag, HASH_FIND, NULL);
139 
140  if (hresult)
141  {
142  bufid = hresult->id;
143  bufHdr = GetLocalBufferDescriptor(bufid);
144  Assert(BufferTagsEqual(&bufHdr->tag, &newTag));
145 
146  *foundPtr = PinLocalBuffer(bufHdr, true);
147  }
148  else
149  {
150  uint32 buf_state;
151 
152  victim_buffer = GetLocalVictimBuffer();
153  bufid = -victim_buffer - 1;
154  bufHdr = GetLocalBufferDescriptor(bufid);
155 
156  hresult = (LocalBufferLookupEnt *)
157  hash_search(LocalBufHash, &newTag, HASH_ENTER, &found);
158  if (found) /* shouldn't happen */
159  elog(ERROR, "local buffer hash table corrupted");
160  hresult->id = bufid;
161 
162  /*
163  * it's all ours now.
164  */
165  bufHdr->tag = newTag;
166 
167  buf_state = pg_atomic_read_u32(&bufHdr->state);
168  buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
169  buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
170  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
171 
172  *foundPtr = false;
173  }
174 
175  return bufHdr;
176 }
177 
178 static Buffer
180 {
181  int victim_bufid;
182  int trycounter;
183  uint32 buf_state;
184  BufferDesc *bufHdr;
185 
187 
188  /*
189  * Need to get a new buffer. We use a clock sweep algorithm (essentially
190  * the same as what freelist.c does now...)
191  */
192  trycounter = NLocBuffer;
193  for (;;)
194  {
195  victim_bufid = nextFreeLocalBufId;
196 
198  nextFreeLocalBufId = 0;
199 
200  bufHdr = GetLocalBufferDescriptor(victim_bufid);
201 
202  if (LocalRefCount[victim_bufid] == 0)
203  {
204  buf_state = pg_atomic_read_u32(&bufHdr->state);
205 
206  if (BUF_STATE_GET_USAGECOUNT(buf_state) > 0)
207  {
208  buf_state -= BUF_USAGECOUNT_ONE;
209  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
210  trycounter = NLocBuffer;
211  }
212  else
213  {
214  /* Found a usable buffer */
215  PinLocalBuffer(bufHdr, false);
216  break;
217  }
218  }
219  else if (--trycounter == 0)
220  ereport(ERROR,
221  (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
222  errmsg("no empty local buffer available")));
223  }
224 
225  /*
226  * lazy memory allocation: allocate space on first use of a buffer.
227  */
228  if (LocalBufHdrGetBlock(bufHdr) == NULL)
229  {
230  /* Set pointer for use by BufferGetBlock() macro */
232  }
233 
234  /*
235  * this buffer is not referenced but it might still be dirty. if that's
236  * the case, write it out before reusing it!
237  */
238  if (buf_state & BM_DIRTY)
239  {
240  instr_time io_start;
241  SMgrRelation oreln;
242  Page localpage = (char *) LocalBufHdrGetBlock(bufHdr);
243 
244  /* Find smgr relation for buffer */
245  oreln = smgropen(BufTagGetRelFileLocator(&bufHdr->tag), MyBackendId);
246 
247  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
248 
250 
251  /* And write... */
252  smgrwrite(oreln,
253  BufTagGetForkNum(&bufHdr->tag),
254  bufHdr->tag.blockNum,
255  localpage,
256  false);
257 
258  /* Temporary table I/O does not use Buffer Access Strategies */
260  IOOP_WRITE, io_start, 1);
261 
262  /* Mark not-dirty now in case we error out below */
263  buf_state &= ~BM_DIRTY;
264  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
265 
267  }
268 
269  /*
270  * Remove the victim buffer from the hashtable and mark as invalid.
271  */
272  if (buf_state & BM_TAG_VALID)
273  {
274  LocalBufferLookupEnt *hresult;
275 
276  hresult = (LocalBufferLookupEnt *)
277  hash_search(LocalBufHash, &bufHdr->tag, HASH_REMOVE, NULL);
278  if (!hresult) /* shouldn't happen */
279  elog(ERROR, "local buffer hash table corrupted");
280  /* mark buffer invalid just in case hash insert fails */
281  ClearBufferTag(&bufHdr->tag);
282  buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
283  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
285  }
286 
287  return BufferDescriptorGetBuffer(bufHdr);
288 }
289 
290 /* see LimitAdditionalPins() */
291 static void
293 {
294  uint32 max_pins;
295 
296  if (*additional_pins <= 1)
297  return;
298 
299  /*
300  * In contrast to LimitAdditionalPins() other backends don't play a role
301  * here. We can allow up to NLocBuffer pins in total.
302  */
303  max_pins = (NLocBuffer - NLocalPinnedBuffers);
304 
305  if (*additional_pins >= max_pins)
306  *additional_pins = max_pins;
307 }
308 
309 /*
310  * Implementation of ExtendBufferedRelBy() and ExtendBufferedRelTo() for
311  * temporary buffers.
312  */
315  ForkNumber fork,
316  uint32 flags,
317  uint32 extend_by,
318  BlockNumber extend_upto,
319  Buffer *buffers,
320  uint32 *extended_by)
321 {
322  BlockNumber first_block;
323  instr_time io_start;
324 
325  /* Initialize local buffers if first request in this session */
326  if (LocalBufHash == NULL)
328 
329  LimitAdditionalLocalPins(&extend_by);
330 
331  for (uint32 i = 0; i < extend_by; i++)
332  {
333  BufferDesc *buf_hdr;
334  Block buf_block;
335 
336  buffers[i] = GetLocalVictimBuffer();
337  buf_hdr = GetLocalBufferDescriptor(-buffers[i] - 1);
338  buf_block = LocalBufHdrGetBlock(buf_hdr);
339 
340  /* new buffers are zero-filled */
341  MemSet((char *) buf_block, 0, BLCKSZ);
342  }
343 
344  first_block = smgrnblocks(bmr.smgr, fork);
345 
346  if (extend_upto != InvalidBlockNumber)
347  {
348  /*
349  * In contrast to shared relations, nothing could change the relation
350  * size concurrently. Thus we shouldn't end up finding that we don't
351  * need to do anything.
352  */
353  Assert(first_block <= extend_upto);
354 
355  Assert((uint64) first_block + extend_by <= extend_upto);
356  }
357 
358  /* Fail if relation is already at maximum possible length */
359  if ((uint64) first_block + extend_by >= MaxBlockNumber)
360  ereport(ERROR,
361  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
362  errmsg("cannot extend relation %s beyond %u blocks",
363  relpath(bmr.smgr->smgr_rlocator, fork),
364  MaxBlockNumber)));
365 
366  for (uint32 i = 0; i < extend_by; i++)
367  {
368  int victim_buf_id;
369  BufferDesc *victim_buf_hdr;
370  BufferTag tag;
371  LocalBufferLookupEnt *hresult;
372  bool found;
373 
374  victim_buf_id = -buffers[i] - 1;
375  victim_buf_hdr = GetLocalBufferDescriptor(victim_buf_id);
376 
377  /* in case we need to pin an existing buffer below */
379 
380  InitBufferTag(&tag, &bmr.smgr->smgr_rlocator.locator, fork, first_block + i);
381 
382  hresult = (LocalBufferLookupEnt *)
383  hash_search(LocalBufHash, (void *) &tag, HASH_ENTER, &found);
384  if (found)
385  {
386  BufferDesc *existing_hdr;
387  uint32 buf_state;
388 
390 
391  existing_hdr = GetLocalBufferDescriptor(hresult->id);
392  PinLocalBuffer(existing_hdr, false);
393  buffers[i] = BufferDescriptorGetBuffer(existing_hdr);
394 
395  buf_state = pg_atomic_read_u32(&existing_hdr->state);
396  Assert(buf_state & BM_TAG_VALID);
397  Assert(!(buf_state & BM_DIRTY));
398  buf_state &= ~BM_VALID;
399  pg_atomic_unlocked_write_u32(&existing_hdr->state, buf_state);
400  }
401  else
402  {
403  uint32 buf_state = pg_atomic_read_u32(&victim_buf_hdr->state);
404 
405  Assert(!(buf_state & (BM_VALID | BM_TAG_VALID | BM_DIRTY | BM_JUST_DIRTIED)));
406 
407  victim_buf_hdr->tag = tag;
408 
409  buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
410 
411  pg_atomic_unlocked_write_u32(&victim_buf_hdr->state, buf_state);
412 
413  hresult->id = victim_buf_id;
414  }
415  }
416 
418 
419  /* actually extend relation */
420  smgrzeroextend(bmr.smgr, fork, first_block, extend_by, false);
421 
423  io_start, extend_by);
424 
425  for (uint32 i = 0; i < extend_by; i++)
426  {
427  Buffer buf = buffers[i];
428  BufferDesc *buf_hdr;
429  uint32 buf_state;
430 
431  buf_hdr = GetLocalBufferDescriptor(-buf - 1);
432 
433  buf_state = pg_atomic_read_u32(&buf_hdr->state);
434  buf_state |= BM_VALID;
435  pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
436  }
437 
438  *extended_by = extend_by;
439 
440  pgBufferUsage.local_blks_written += extend_by;
441 
442  return first_block;
443 }
444 
445 /*
446  * MarkLocalBufferDirty -
447  * mark a local buffer dirty
448  */
449 void
451 {
452  int bufid;
453  BufferDesc *bufHdr;
454  uint32 buf_state;
455 
456  Assert(BufferIsLocal(buffer));
457 
458 #ifdef LBDEBUG
459  fprintf(stderr, "LB DIRTY %d\n", buffer);
460 #endif
461 
462  bufid = -buffer - 1;
463 
464  Assert(LocalRefCount[bufid] > 0);
465 
466  bufHdr = GetLocalBufferDescriptor(bufid);
467 
468  buf_state = pg_atomic_read_u32(&bufHdr->state);
469 
470  if (!(buf_state & BM_DIRTY))
472 
473  buf_state |= BM_DIRTY;
474 
475  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
476 }
477 
478 /*
479  * DropRelationLocalBuffers
480  * This function removes from the buffer pool all the pages of the
481  * specified relation that have block numbers >= firstDelBlock.
482  * (In particular, with firstDelBlock = 0, all pages are removed.)
483  * Dirty pages are simply dropped, without bothering to write them
484  * out first. Therefore, this is NOT rollback-able, and so should be
485  * used only with extreme caution!
486  *
487  * See DropRelationBuffers in bufmgr.c for more notes.
488  */
489 void
491  BlockNumber firstDelBlock)
492 {
493  int i;
494 
495  for (i = 0; i < NLocBuffer; i++)
496  {
498  LocalBufferLookupEnt *hresult;
499  uint32 buf_state;
500 
501  buf_state = pg_atomic_read_u32(&bufHdr->state);
502 
503  if ((buf_state & BM_TAG_VALID) &&
504  BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
505  BufTagGetForkNum(&bufHdr->tag) == forkNum &&
506  bufHdr->tag.blockNum >= firstDelBlock)
507  {
508  if (LocalRefCount[i] != 0)
509  elog(ERROR, "block %u of %s is still referenced (local %u)",
510  bufHdr->tag.blockNum,
512  MyBackendId,
513  BufTagGetForkNum(&bufHdr->tag)),
514  LocalRefCount[i]);
515 
516  /* Remove entry from hashtable */
517  hresult = (LocalBufferLookupEnt *)
518  hash_search(LocalBufHash, &bufHdr->tag, HASH_REMOVE, NULL);
519  if (!hresult) /* shouldn't happen */
520  elog(ERROR, "local buffer hash table corrupted");
521  /* Mark buffer invalid */
522  ClearBufferTag(&bufHdr->tag);
523  buf_state &= ~BUF_FLAG_MASK;
524  buf_state &= ~BUF_USAGECOUNT_MASK;
525  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
526  }
527  }
528 }
529 
530 /*
531  * DropRelationAllLocalBuffers
532  * This function removes from the buffer pool all pages of all forks
533  * of the specified relation.
534  *
535  * See DropRelationsAllBuffers in bufmgr.c for more notes.
536  */
537 void
539 {
540  int i;
541 
542  for (i = 0; i < NLocBuffer; i++)
543  {
545  LocalBufferLookupEnt *hresult;
546  uint32 buf_state;
547 
548  buf_state = pg_atomic_read_u32(&bufHdr->state);
549 
550  if ((buf_state & BM_TAG_VALID) &&
551  BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator))
552  {
553  if (LocalRefCount[i] != 0)
554  elog(ERROR, "block %u of %s is still referenced (local %u)",
555  bufHdr->tag.blockNum,
557  MyBackendId,
558  BufTagGetForkNum(&bufHdr->tag)),
559  LocalRefCount[i]);
560  /* Remove entry from hashtable */
561  hresult = (LocalBufferLookupEnt *)
562  hash_search(LocalBufHash, &bufHdr->tag, HASH_REMOVE, NULL);
563  if (!hresult) /* shouldn't happen */
564  elog(ERROR, "local buffer hash table corrupted");
565  /* Mark buffer invalid */
566  ClearBufferTag(&bufHdr->tag);
567  buf_state &= ~BUF_FLAG_MASK;
568  buf_state &= ~BUF_USAGECOUNT_MASK;
569  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
570  }
571  }
572 }
573 
574 /*
575  * InitLocalBuffers -
576  * init the local buffer cache. Since most queries (esp. multi-user ones)
577  * don't involve local buffers, we delay allocating actual memory for the
578  * buffers until we need them; just make the buffer headers here.
579  */
580 static void
582 {
583  int nbufs = num_temp_buffers;
584  HASHCTL info;
585  int i;
586 
587  /*
588  * Parallel workers can't access data in temporary tables, because they
589  * have no visibility into the local buffers of their leader. This is a
590  * convenient, low-cost place to provide a backstop check for that. Note
591  * that we don't wish to prevent a parallel worker from accessing catalog
592  * metadata about a temp table, so checks at higher levels would be
593  * inappropriate.
594  */
595  if (IsParallelWorker())
596  ereport(ERROR,
597  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
598  errmsg("cannot access temporary tables during a parallel operation")));
599 
600  /* Allocate and zero buffer headers and auxiliary arrays */
601  LocalBufferDescriptors = (BufferDesc *) calloc(nbufs, sizeof(BufferDesc));
602  LocalBufferBlockPointers = (Block *) calloc(nbufs, sizeof(Block));
603  LocalRefCount = (int32 *) calloc(nbufs, sizeof(int32));
605  ereport(FATAL,
606  (errcode(ERRCODE_OUT_OF_MEMORY),
607  errmsg("out of memory")));
608 
609  nextFreeLocalBufId = 0;
610 
611  /* initialize fields that need to start off nonzero */
612  for (i = 0; i < nbufs; i++)
613  {
615 
616  /*
617  * negative to indicate local buffer. This is tricky: shared buffers
618  * start with 0. We have to start with -2. (Note that the routine
619  * BufferDescriptorGetBuffer adds 1 to buf_id so our first buffer id
620  * is -1.)
621  */
622  buf->buf_id = -i - 2;
623 
624  /*
625  * Intentionally do not initialize the buffer's atomic variable
626  * (besides zeroing the underlying memory above). That way we get
627  * errors on platforms without atomics, if somebody (re-)introduces
628  * atomic operations for local buffers.
629  */
630  }
631 
632  /* Create the lookup hash table */
633  info.keysize = sizeof(BufferTag);
634  info.entrysize = sizeof(LocalBufferLookupEnt);
635 
636  LocalBufHash = hash_create("Local Buffer Lookup Table",
637  nbufs,
638  &info,
640 
641  if (!LocalBufHash)
642  elog(ERROR, "could not initialize local buffer hash table");
643 
644  /* Initialization done, mark buffers allocated */
645  NLocBuffer = nbufs;
646 }
647 
648 /*
649  * XXX: We could have a slightly more efficient version of PinLocalBuffer()
650  * that does not support adjusting the usagecount - but so far it does not
651  * seem worth the trouble.
652  *
653  * Note that ResourceOwnerEnlarge() must have been done already.
654  */
655 bool
656 PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
657 {
658  uint32 buf_state;
659  Buffer buffer = BufferDescriptorGetBuffer(buf_hdr);
660  int bufid = -buffer - 1;
661 
662  buf_state = pg_atomic_read_u32(&buf_hdr->state);
663 
664  if (LocalRefCount[bufid] == 0)
665  {
667  if (adjust_usagecount &&
669  {
670  buf_state += BUF_USAGECOUNT_ONE;
671  pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
672  }
673  }
674  LocalRefCount[bufid]++;
676  BufferDescriptorGetBuffer(buf_hdr));
677 
678  return buf_state & BM_VALID;
679 }
680 
681 void
683 {
684  UnpinLocalBufferNoOwner(buffer);
686 }
687 
688 void
690 {
691  int buffid = -buffer - 1;
692 
693  Assert(BufferIsLocal(buffer));
694  Assert(LocalRefCount[buffid] > 0);
696 
697  if (--LocalRefCount[buffid] == 0)
699 }
700 
701 /*
702  * GUC check_hook for temp_buffers
703  */
704 bool
706 {
707  /*
708  * Once local buffers have been initialized, it's too late to change this.
709  * However, if this is only a test call, allow it.
710  */
711  if (source != PGC_S_TEST && NLocBuffer && NLocBuffer != *newval)
712  {
713  GUC_check_errdetail("temp_buffers cannot be changed after any temporary tables have been accessed in the session.");
714  return false;
715  }
716  return true;
717 }
718 
719 /*
720  * GetLocalBufferStorage - allocate memory for a local buffer
721  *
722  * The idea of this function is to aggregate our requests for storage
723  * so that the memory manager doesn't see a whole lot of relatively small
724  * requests. Since we'll never give back a local buffer once it's created
725  * within a particular process, no point in burdening memmgr with separately
726  * managed chunks.
727  */
728 static Block
730 {
731  static char *cur_block = NULL;
732  static int next_buf_in_block = 0;
733  static int num_bufs_in_block = 0;
734  static int total_bufs_allocated = 0;
735  static MemoryContext LocalBufferContext = NULL;
736 
737  char *this_buf;
738 
739  Assert(total_bufs_allocated < NLocBuffer);
740 
741  if (next_buf_in_block >= num_bufs_in_block)
742  {
743  /* Need to make a new request to memmgr */
744  int num_bufs;
745 
746  /*
747  * We allocate local buffers in a context of their own, so that the
748  * space eaten for them is easily recognizable in MemoryContextStats
749  * output. Create the context on first use.
750  */
751  if (LocalBufferContext == NULL)
752  LocalBufferContext =
754  "LocalBufferContext",
756 
757  /* Start with a 16-buffer request; subsequent ones double each time */
758  num_bufs = Max(num_bufs_in_block * 2, 16);
759  /* But not more than what we need for all remaining local bufs */
760  num_bufs = Min(num_bufs, NLocBuffer - total_bufs_allocated);
761  /* And don't overflow MaxAllocSize, either */
762  num_bufs = Min(num_bufs, MaxAllocSize / BLCKSZ);
763 
764  /* Buffers should be I/O aligned. */
765  cur_block = (char *)
767  MemoryContextAlloc(LocalBufferContext,
768  num_bufs * BLCKSZ + PG_IO_ALIGN_SIZE));
769  next_buf_in_block = 0;
770  num_bufs_in_block = num_bufs;
771  }
772 
773  /* Allocate next buffer in current memory block */
774  this_buf = cur_block + next_buf_in_block * BLCKSZ;
775  next_buf_in_block++;
776  total_bufs_allocated++;
777 
778  return (Block) this_buf;
779 }
780 
781 /*
782  * CheckForLocalBufferLeaks - ensure this backend holds no local buffer pins
783  *
784  * This is just like CheckForBufferLeaks(), but for local buffers.
785  */
786 static void
788 {
789 #ifdef USE_ASSERT_CHECKING
790  if (LocalRefCount)
791  {
792  int RefCountErrors = 0;
793  int i;
794 
795  for (i = 0; i < NLocBuffer; i++)
796  {
797  if (LocalRefCount[i] != 0)
798  {
799  Buffer b = -i - 1;
800  char *s;
801 
803  elog(WARNING, "local buffer refcount leak: %s", s);
804  pfree(s);
805 
806  RefCountErrors++;
807  }
808  }
809  Assert(RefCountErrors == 0);
810  }
811 #endif
812 }
813 
814 /*
815  * AtEOXact_LocalBuffers - clean up at end of transaction.
816  *
817  * This is just like AtEOXact_Buffers, but for local buffers.
818  */
819 void
820 AtEOXact_LocalBuffers(bool isCommit)
821 {
823 }
824 
825 /*
826  * AtProcExit_LocalBuffers - ensure we have dropped pins during backend exit.
827  *
828  * This is just like AtProcExit_Buffers, but for local buffers.
829  */
830 void
832 {
833  /*
834  * We shouldn't be holding any remaining pins; if we are, and assertions
835  * aren't enabled, we'll fail later in DropRelationBuffers while trying to
836  * drop the temp rels.
837  */
839 }
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:272
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:236
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
#define MaxBlockNumber
Definition: block.h:35
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BM_MAX_USAGE_COUNT
Definition: buf_internals.h:78
static void InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blockNum)
#define BM_TAG_VALID
Definition: buf_internals.h:63
#define BUF_USAGECOUNT_MASK
Definition: buf_internals.h:45
static ForkNumber BufTagGetForkNum(const BufferTag *tag)
static BufferDesc * GetLocalBufferDescriptor(uint32 id)
static bool BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
static bool BufTagMatchesRelFileLocator(const BufferTag *tag, const RelFileLocator *rlocator)
#define BUF_FLAG_MASK
Definition: buf_internals.h:48
#define BM_DIRTY
Definition: buf_internals.h:61
#define BM_JUST_DIRTIED
Definition: buf_internals.h:66
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:52
static void ClearBufferTag(BufferTag *tag)
struct buftag BufferTag
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:46
static RelFileLocator BufTagGetRelFileLocator(const BufferTag *tag)
#define BM_VALID
Definition: buf_internals.h:62
static Buffer BufferDescriptorGetBuffer(const BufferDesc *bdesc)
bool track_io_timing
Definition: bufmgr.c:139
char * DebugPrintBufferRefcount(Buffer buffer)
Definition: bufmgr.c:3319
void * Block
Definition: bufmgr.h:24
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1542
Pointer Page
Definition: bufpage.h:78
unsigned int uint32
Definition: c.h:495
#define Min(x, y)
Definition: c.h:993
#define TYPEALIGN(ALIGNVAL, LEN)
Definition: c.h:793
signed int int32
Definition: c.h:483
#define Max(x, y)
Definition: c.h:987
#define MemSet(start, val, len)
Definition: c.h:1009
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:953
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:350
int errcode(int sqlerrcode)
Definition: elog.c:860
int errmsg(const char *fmt,...)
Definition: elog.c:1075
#define FATAL
Definition: elog.h:41
#define WARNING
Definition: elog.h:36
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int io_direct_flags
Definition: fd.c:168
#define IO_DIRECT_DATA
Definition: fd.h:54
BackendId MyBackendId
Definition: globals.c:86
#define newval
#define GUC_check_errdetail
Definition: guc.h:446
GucSource
Definition: guc.h:108
@ PGC_S_TEST
Definition: guc.h:121
int num_temp_buffers
Definition: guc_tables.c:535
#define calloc(a, b)
Definition: header.h:55
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
#define IsParallelWorker()
Definition: parallel.h:61
BufferUsage pgBufferUsage
Definition: instrument.c:20
int b
Definition: isn.c:70
int i
Definition: isn.c:73
Assert(fmt[strlen(fmt) - 1] !='\n')
int32 * LocalRefCount
Definition: localbuf.c:47
void UnpinLocalBuffer(Buffer buffer)
Definition: localbuf.c:682
BufferDesc * LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, bool *foundPtr)
Definition: localbuf.c:118
static HTAB * LocalBufHash
Definition: localbuf.c:51
static int NLocalPinnedBuffers
Definition: localbuf.c:54
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:820
#define LocalBufHdrGetBlock(bufHdr)
Definition: localbuf.c:40
static void CheckForLocalBufferLeaks(void)
Definition: localbuf.c:787
void DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber firstDelBlock)
Definition: localbuf.c:490
static Block GetLocalBufferStorage(void)
Definition: localbuf.c:729
static void LimitAdditionalLocalPins(uint32 *additional_pins)
Definition: localbuf.c:292
static int nextFreeLocalBufId
Definition: localbuf.c:49
bool check_temp_buffers(int *newval, void **extra, GucSource source)
Definition: localbuf.c:705
void AtProcExit_LocalBuffers(void)
Definition: localbuf.c:831
bool PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
Definition: localbuf.c:656
static void InitLocalBuffers(void)
Definition: localbuf.c:581
static Buffer GetLocalVictimBuffer(void)
Definition: localbuf.c:179
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:450
void DropRelationAllLocalBuffers(RelFileLocator rlocator)
Definition: localbuf.c:538
int NLocBuffer
Definition: localbuf.c:43
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:70
BlockNumber ExtendBufferedRelLocal(BufferManagerRelation bmr, ForkNumber fork, uint32 flags, uint32 extend_by, BlockNumber extend_upto, Buffer *buffers, uint32 *extended_by)
Definition: localbuf.c:314
Block * LocalBufferBlockPointers
Definition: localbuf.c:46
void UnpinLocalBufferNoOwner(Buffer buffer)
Definition: localbuf.c:689
BufferDesc * LocalBufferDescriptors
Definition: localbuf.c:45
void pfree(void *pointer)
Definition: mcxt.c:1431
MemoryContext TopMemoryContext
Definition: mcxt.c:141
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1034
#define AllocSetContextCreate
Definition: memutils.h:128
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:152
#define MaxAllocSize
Definition: memutils.h:40
#define PG_IO_ALIGN_SIZE
static rewind_source * source
Definition: pg_rewind.c:89
static char * buf
Definition: pg_test_fsync.c:73
@ IOOBJECT_TEMP_RELATION
Definition: pgstat.h:281
@ IOCONTEXT_NORMAL
Definition: pgstat.h:290
@ IOOP_EXTEND
Definition: pgstat.h:299
@ IOOP_EVICT
Definition: pgstat.h:298
@ IOOP_WRITE
Definition: pgstat.h:304
instr_time pgstat_prepare_io_time(bool track_io_guc)
Definition: pgstat_io.c:100
void pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op, instr_time start_time, uint32 cnt)
Definition: pgstat_io.c:122
void pgstat_count_io_op(IOObject io_object, IOContext io_context, IOOp io_op)
Definition: pgstat_io.c:77
#define fprintf
Definition: port.h:242
ForkNumber
Definition: relpath.h:48
#define relpath(rlocator, forknum)
Definition: relpath.h:94
#define relpathbackend(rlocator, backend, forknum)
Definition: relpath.h:85
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition: resowner.c:442
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:648
SMgrRelation smgropen(RelFileLocator rlocator, BackendId backend)
Definition: smgr.c:197
void smgrzeroextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, int nblocks, bool skipFsync)
Definition: smgr.c:561
bool smgrprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, int nblocks)
Definition: smgr.c:586
static void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const void *buffer, bool skipFsync)
Definition: smgr.h:120
BufferTag tag
pg_atomic_uint32 state
struct SMgrRelationData * smgr
Definition: bufmgr.h:102
int64 local_blks_written
Definition: instrument.h:33
int64 local_blks_dirtied
Definition: instrument.h:32
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
Definition: dynahash.c:220
Buffer recent_buffer
Definition: bufmgr.h:59
RelFileLocator locator
RelFileLocatorBackend smgr_rlocator
Definition: smgr.h:37
BlockNumber blockNum
Definition: buf_internals.h:98