PostgreSQL Source Code  git master
localbuf.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * localbuf.c
4  * local buffer manager. Fast buffer manager for temporary tables,
5  * which never need to be WAL-logged or checkpointed, etc.
6  *
7  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994-5, Regents of the University of California
9  *
10  *
11  * IDENTIFICATION
12  * src/backend/storage/buffer/localbuf.c
13  *
14  *-------------------------------------------------------------------------
15  */
16 #include "postgres.h"
17 
18 #include "access/parallel.h"
19 #include "catalog/catalog.h"
20 #include "executor/instrument.h"
21 #include "storage/buf_internals.h"
22 #include "storage/bufmgr.h"
23 #include "utils/guc.h"
24 #include "utils/memutils.h"
25 #include "utils/resowner_private.h"
26 
27 
28 /*#define LBDEBUG*/
29 
30 /* entry for buffer lookup hashtable */
31 typedef struct
32 {
33  BufferTag key; /* Tag of a disk page */
34  int id; /* Associated local buffer's index */
36 
37 /* Note: this macro only works on local buffers, not shared ones! */
38 #define LocalBufHdrGetBlock(bufHdr) \
39  LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
40 
41 int NLocBuffer = 0; /* until buffers are initialized */
42 
46 
47 static int nextFreeLocalBuf = 0;
48 
49 static HTAB *LocalBufHash = NULL;
50 
51 
52 static void InitLocalBuffers(void);
53 static Block GetLocalBufferStorage(void);
54 
55 
56 /*
57  * PrefetchLocalBuffer -
58  * initiate asynchronous read of a block of a relation
59  *
60  * Do PrefetchBuffer's work for temporary relations.
61  * No-op if prefetching isn't compiled in.
62  */
65  BlockNumber blockNum)
66 {
67  PrefetchBufferResult result = {InvalidBuffer, false};
68  BufferTag newTag; /* identity of requested block */
69  LocalBufferLookupEnt *hresult;
70 
71  INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
72 
73  /* Initialize local buffers if first request in this session */
74  if (LocalBufHash == NULL)
76 
77  /* See if the desired buffer already exists */
78  hresult = (LocalBufferLookupEnt *)
79  hash_search(LocalBufHash, (void *) &newTag, HASH_FIND, NULL);
80 
81  if (hresult)
82  {
83  /* Yes, so nothing to do */
84  result.recent_buffer = -hresult->id - 1;
85  }
86  else
87  {
88 #ifdef USE_PREFETCH
89  /* Not in buffers, so initiate prefetch */
90  smgrprefetch(smgr, forkNum, blockNum);
91  result.initiated_io = true;
92 #endif /* USE_PREFETCH */
93  }
94 
95  return result;
96 }
97 
98 
99 /*
100  * LocalBufferAlloc -
101  * Find or create a local buffer for the given page of the given relation.
102  *
103  * API is similar to bufmgr.c's BufferAlloc, except that we do not need
104  * to do any locking since this is all local. Also, IO_IN_PROGRESS
105  * does not get set. Lastly, we support only default access strategy
106  * (hence, usage_count is always advanced).
107  */
108 BufferDesc *
110  bool *foundPtr)
111 {
112  BufferTag newTag; /* identity of requested block */
113  LocalBufferLookupEnt *hresult;
114  BufferDesc *bufHdr;
115  int b;
116  int trycounter;
117  bool found;
118  uint32 buf_state;
119 
120  INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
121 
122  /* Initialize local buffers if first request in this session */
123  if (LocalBufHash == NULL)
125 
126  /* See if the desired buffer already exists */
127  hresult = (LocalBufferLookupEnt *)
128  hash_search(LocalBufHash, (void *) &newTag, HASH_FIND, NULL);
129 
130  if (hresult)
131  {
132  b = hresult->id;
133  bufHdr = GetLocalBufferDescriptor(b);
134  Assert(BUFFERTAGS_EQUAL(bufHdr->tag, newTag));
135 #ifdef LBDEBUG
136  fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
137  smgr->smgr_rnode.node.relNode, forkNum, blockNum, -b - 1);
138 #endif
139  buf_state = pg_atomic_read_u32(&bufHdr->state);
140 
141  /* this part is equivalent to PinBuffer for a shared buffer */
142  if (LocalRefCount[b] == 0)
143  {
145  {
146  buf_state += BUF_USAGECOUNT_ONE;
147  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
148  }
149  }
150  LocalRefCount[b]++;
152  BufferDescriptorGetBuffer(bufHdr));
153  if (buf_state & BM_VALID)
154  *foundPtr = true;
155  else
156  {
157  /* Previous read attempt must have failed; try again */
158  *foundPtr = false;
159  }
160  return bufHdr;
161  }
162 
163 #ifdef LBDEBUG
164  fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
165  smgr->smgr_rnode.node.relNode, forkNum, blockNum,
166  -nextFreeLocalBuf - 1);
167 #endif
168 
169  /*
170  * Need to get a new buffer. We use a clock sweep algorithm (essentially
171  * the same as what freelist.c does now...)
172  */
173  trycounter = NLocBuffer;
174  for (;;)
175  {
176  b = nextFreeLocalBuf;
177 
178  if (++nextFreeLocalBuf >= NLocBuffer)
179  nextFreeLocalBuf = 0;
180 
181  bufHdr = GetLocalBufferDescriptor(b);
182 
183  if (LocalRefCount[b] == 0)
184  {
185  buf_state = pg_atomic_read_u32(&bufHdr->state);
186 
187  if (BUF_STATE_GET_USAGECOUNT(buf_state) > 0)
188  {
189  buf_state -= BUF_USAGECOUNT_ONE;
190  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
191  trycounter = NLocBuffer;
192  }
193  else
194  {
195  /* Found a usable buffer */
196  LocalRefCount[b]++;
198  BufferDescriptorGetBuffer(bufHdr));
199  break;
200  }
201  }
202  else if (--trycounter == 0)
203  ereport(ERROR,
204  (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
205  errmsg("no empty local buffer available")));
206  }
207 
208  /*
209  * this buffer is not referenced but it might still be dirty. if that's
210  * the case, write it out before reusing it!
211  */
212  if (buf_state & BM_DIRTY)
213  {
214  SMgrRelation oreln;
215  Page localpage = (char *) LocalBufHdrGetBlock(bufHdr);
216 
217  /* Find smgr relation for buffer */
218  oreln = smgropen(bufHdr->tag.rnode, MyBackendId);
219 
220  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
221 
222  /* And write... */
223  smgrwrite(oreln,
224  bufHdr->tag.forkNum,
225  bufHdr->tag.blockNum,
226  localpage,
227  false);
228 
229  /* Mark not-dirty now in case we error out below */
230  buf_state &= ~BM_DIRTY;
231  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
232 
234  }
235 
236  /*
237  * lazy memory allocation: allocate space on first use of a buffer.
238  */
239  if (LocalBufHdrGetBlock(bufHdr) == NULL)
240  {
241  /* Set pointer for use by BufferGetBlock() macro */
243  }
244 
245  /*
246  * Update the hash table: remove old entry, if any, and make new one.
247  */
248  if (buf_state & BM_TAG_VALID)
249  {
250  hresult = (LocalBufferLookupEnt *)
251  hash_search(LocalBufHash, (void *) &bufHdr->tag,
252  HASH_REMOVE, NULL);
253  if (!hresult) /* shouldn't happen */
254  elog(ERROR, "local buffer hash table corrupted");
255  /* mark buffer invalid just in case hash insert fails */
256  CLEAR_BUFFERTAG(bufHdr->tag);
257  buf_state &= ~(BM_VALID | BM_TAG_VALID);
258  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
259  }
260 
261  hresult = (LocalBufferLookupEnt *)
262  hash_search(LocalBufHash, (void *) &newTag, HASH_ENTER, &found);
263  if (found) /* shouldn't happen */
264  elog(ERROR, "local buffer hash table corrupted");
265  hresult->id = b;
266 
267  /*
268  * it's all ours now.
269  */
270  bufHdr->tag = newTag;
271  buf_state &= ~(BM_VALID | BM_DIRTY | BM_JUST_DIRTIED | BM_IO_ERROR);
272  buf_state |= BM_TAG_VALID;
273  buf_state &= ~BUF_USAGECOUNT_MASK;
274  buf_state += BUF_USAGECOUNT_ONE;
275  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
276 
277  *foundPtr = false;
278  return bufHdr;
279 }
280 
281 /*
282  * MarkLocalBufferDirty -
283  * mark a local buffer dirty
284  */
285 void
287 {
288  int bufid;
289  BufferDesc *bufHdr;
290  uint32 buf_state;
291 
292  Assert(BufferIsLocal(buffer));
293 
294 #ifdef LBDEBUG
295  fprintf(stderr, "LB DIRTY %d\n", buffer);
296 #endif
297 
298  bufid = -(buffer + 1);
299 
300  Assert(LocalRefCount[bufid] > 0);
301 
302  bufHdr = GetLocalBufferDescriptor(bufid);
303 
304  buf_state = pg_atomic_read_u32(&bufHdr->state);
305 
306  if (!(buf_state & BM_DIRTY))
308 
309  buf_state |= BM_DIRTY;
310 
311  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
312 }
313 
314 /*
315  * DropRelFileNodeLocalBuffers
316  * This function removes from the buffer pool all the pages of the
317  * specified relation that have block numbers >= firstDelBlock.
318  * (In particular, with firstDelBlock = 0, all pages are removed.)
319  * Dirty pages are simply dropped, without bothering to write them
320  * out first. Therefore, this is NOT rollback-able, and so should be
321  * used only with extreme caution!
322  *
323  * See DropRelFileNodeBuffers in bufmgr.c for more notes.
324  */
325 void
327  BlockNumber firstDelBlock)
328 {
329  int i;
330 
331  for (i = 0; i < NLocBuffer; i++)
332  {
334  LocalBufferLookupEnt *hresult;
335  uint32 buf_state;
336 
337  buf_state = pg_atomic_read_u32(&bufHdr->state);
338 
339  if ((buf_state & BM_TAG_VALID) &&
340  RelFileNodeEquals(bufHdr->tag.rnode, rnode) &&
341  bufHdr->tag.forkNum == forkNum &&
342  bufHdr->tag.blockNum >= firstDelBlock)
343  {
344  if (LocalRefCount[i] != 0)
345  elog(ERROR, "block %u of %s is still referenced (local %u)",
346  bufHdr->tag.blockNum,
348  bufHdr->tag.forkNum),
349  LocalRefCount[i]);
350  /* Remove entry from hashtable */
351  hresult = (LocalBufferLookupEnt *)
352  hash_search(LocalBufHash, (void *) &bufHdr->tag,
353  HASH_REMOVE, NULL);
354  if (!hresult) /* shouldn't happen */
355  elog(ERROR, "local buffer hash table corrupted");
356  /* Mark buffer invalid */
357  CLEAR_BUFFERTAG(bufHdr->tag);
358  buf_state &= ~BUF_FLAG_MASK;
359  buf_state &= ~BUF_USAGECOUNT_MASK;
360  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
361  }
362  }
363 }
364 
365 /*
366  * DropRelFileNodeAllLocalBuffers
367  * This function removes from the buffer pool all pages of all forks
368  * of the specified relation.
369  *
370  * See DropRelFileNodesAllBuffers in bufmgr.c for more notes.
371  */
372 void
374 {
375  int i;
376 
377  for (i = 0; i < NLocBuffer; i++)
378  {
380  LocalBufferLookupEnt *hresult;
381  uint32 buf_state;
382 
383  buf_state = pg_atomic_read_u32(&bufHdr->state);
384 
385  if ((buf_state & BM_TAG_VALID) &&
386  RelFileNodeEquals(bufHdr->tag.rnode, rnode))
387  {
388  if (LocalRefCount[i] != 0)
389  elog(ERROR, "block %u of %s is still referenced (local %u)",
390  bufHdr->tag.blockNum,
392  bufHdr->tag.forkNum),
393  LocalRefCount[i]);
394  /* Remove entry from hashtable */
395  hresult = (LocalBufferLookupEnt *)
396  hash_search(LocalBufHash, (void *) &bufHdr->tag,
397  HASH_REMOVE, NULL);
398  if (!hresult) /* shouldn't happen */
399  elog(ERROR, "local buffer hash table corrupted");
400  /* Mark buffer invalid */
401  CLEAR_BUFFERTAG(bufHdr->tag);
402  buf_state &= ~BUF_FLAG_MASK;
403  buf_state &= ~BUF_USAGECOUNT_MASK;
404  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
405  }
406  }
407 }
408 
409 /*
410  * InitLocalBuffers -
411  * init the local buffer cache. Since most queries (esp. multi-user ones)
412  * don't involve local buffers, we delay allocating actual memory for the
413  * buffers until we need them; just make the buffer headers here.
414  */
415 static void
417 {
418  int nbufs = num_temp_buffers;
419  HASHCTL info;
420  int i;
421 
422  /*
423  * Parallel workers can't access data in temporary tables, because they
424  * have no visibility into the local buffers of their leader. This is a
425  * convenient, low-cost place to provide a backstop check for that. Note
426  * that we don't wish to prevent a parallel worker from accessing catalog
427  * metadata about a temp table, so checks at higher levels would be
428  * inappropriate.
429  */
430  if (IsParallelWorker())
431  ereport(ERROR,
432  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
433  errmsg("cannot access temporary tables during a parallel operation")));
434 
435  /* Allocate and zero buffer headers and auxiliary arrays */
436  LocalBufferDescriptors = (BufferDesc *) calloc(nbufs, sizeof(BufferDesc));
437  LocalBufferBlockPointers = (Block *) calloc(nbufs, sizeof(Block));
438  LocalRefCount = (int32 *) calloc(nbufs, sizeof(int32));
439  if (!LocalBufferDescriptors || !LocalBufferBlockPointers || !LocalRefCount)
440  ereport(FATAL,
441  (errcode(ERRCODE_OUT_OF_MEMORY),
442  errmsg("out of memory")));
443 
444  nextFreeLocalBuf = 0;
445 
446  /* initialize fields that need to start off nonzero */
447  for (i = 0; i < nbufs; i++)
448  {
450 
451  /*
452  * negative to indicate local buffer. This is tricky: shared buffers
453  * start with 0. We have to start with -2. (Note that the routine
454  * BufferDescriptorGetBuffer adds 1 to buf_id so our first buffer id
455  * is -1.)
456  */
457  buf->buf_id = -i - 2;
458 
459  /*
460  * Intentionally do not initialize the buffer's atomic variable
461  * (besides zeroing the underlying memory above). That way we get
462  * errors on platforms without atomics, if somebody (re-)introduces
463  * atomic operations for local buffers.
464  */
465  }
466 
467  /* Create the lookup hash table */
468  MemSet(&info, 0, sizeof(info));
469  info.keysize = sizeof(BufferTag);
470  info.entrysize = sizeof(LocalBufferLookupEnt);
471 
472  LocalBufHash = hash_create("Local Buffer Lookup Table",
473  nbufs,
474  &info,
476 
477  if (!LocalBufHash)
478  elog(ERROR, "could not initialize local buffer hash table");
479 
480  /* Initialization done, mark buffers allocated */
481  NLocBuffer = nbufs;
482 }
483 
484 /*
485  * GetLocalBufferStorage - allocate memory for a local buffer
486  *
487  * The idea of this function is to aggregate our requests for storage
488  * so that the memory manager doesn't see a whole lot of relatively small
489  * requests. Since we'll never give back a local buffer once it's created
490  * within a particular process, no point in burdening memmgr with separately
491  * managed chunks.
492  */
493 static Block
495 {
496  static char *cur_block = NULL;
497  static int next_buf_in_block = 0;
498  static int num_bufs_in_block = 0;
499  static int total_bufs_allocated = 0;
500  static MemoryContext LocalBufferContext = NULL;
501 
502  char *this_buf;
503 
504  Assert(total_bufs_allocated < NLocBuffer);
505 
506  if (next_buf_in_block >= num_bufs_in_block)
507  {
508  /* Need to make a new request to memmgr */
509  int num_bufs;
510 
511  /*
512  * We allocate local buffers in a context of their own, so that the
513  * space eaten for them is easily recognizable in MemoryContextStats
514  * output. Create the context on first use.
515  */
516  if (LocalBufferContext == NULL)
517  LocalBufferContext =
519  "LocalBufferContext",
521 
522  /* Start with a 16-buffer request; subsequent ones double each time */
523  num_bufs = Max(num_bufs_in_block * 2, 16);
524  /* But not more than what we need for all remaining local bufs */
525  num_bufs = Min(num_bufs, NLocBuffer - total_bufs_allocated);
526  /* And don't overflow MaxAllocSize, either */
527  num_bufs = Min(num_bufs, MaxAllocSize / BLCKSZ);
528 
529  cur_block = (char *) MemoryContextAlloc(LocalBufferContext,
530  num_bufs * BLCKSZ);
531  next_buf_in_block = 0;
532  num_bufs_in_block = num_bufs;
533  }
534 
535  /* Allocate next buffer in current memory block */
536  this_buf = cur_block + next_buf_in_block * BLCKSZ;
537  next_buf_in_block++;
538  total_bufs_allocated++;
539 
540  return (Block) this_buf;
541 }
542 
543 /*
544  * CheckForLocalBufferLeaks - ensure this backend holds no local buffer pins
545  *
546  * This is just like CheckForBufferLeaks(), but for local buffers.
547  */
548 static void
550 {
551 #ifdef USE_ASSERT_CHECKING
552  if (LocalRefCount)
553  {
554  int RefCountErrors = 0;
555  int i;
556 
557  for (i = 0; i < NLocBuffer; i++)
558  {
559  if (LocalRefCount[i] != 0)
560  {
561  Buffer b = -i - 1;
562 
564  RefCountErrors++;
565  }
566  }
567  Assert(RefCountErrors == 0);
568  }
569 #endif
570 }
571 
572 /*
573  * AtEOXact_LocalBuffers - clean up at end of transaction.
574  *
575  * This is just like AtEOXact_Buffers, but for local buffers.
576  */
577 void
578 AtEOXact_LocalBuffers(bool isCommit)
579 {
581 }
582 
583 /*
584  * AtProcExit_LocalBuffers - ensure we have dropped pins during backend exit.
585  *
586  * This is just like AtProcExit_Buffers, but for local buffers.
587  */
588 void
590 {
591  /*
592  * We shouldn't be holding any remaining pins; if we are, and assertions
593  * aren't enabled, we'll fail later in DropRelFileNodeBuffers while trying
594  * to drop the temp rels.
595  */
597 }
#define calloc(a, b)
Definition: header.h:55
BufferDesc * LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, bool *foundPtr)
Definition: localbuf.c:109
#define AllocSetContextCreate
Definition: memutils.h:170
long local_blks_dirtied
Definition: instrument.h:27
BackendId MyBackendId
Definition: globals.c:81
#define BM_TAG_VALID
Definition: buf_internals.h:60
void PrintBufferLeakWarning(Buffer buffer)
Definition: bufmgr.c:2594
ForkNumber forkNum
Definition: buf_internals.h:93
#define HASH_ELEM
Definition: hsearch.h:85
int num_temp_buffers
Definition: guc.c:553
static int nextFreeLocalBuf
Definition: localbuf.c:47
ResourceOwner CurrentResourceOwner
Definition: resowner.c:142
static void CheckForLocalBufferLeaks(void)
Definition: localbuf.c:549
#define Min(x, y)
Definition: c.h:927
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:64
#define InvalidBuffer
Definition: buf.h:25
Size entrysize
Definition: hsearch.h:72
void DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum, BlockNumber firstDelBlock)
Definition: localbuf.c:326
#define GetLocalBufferDescriptor(id)
struct buftag BufferTag
int errcode(int sqlerrcode)
Definition: elog.c:610
#define MemSet(start, val, len)
Definition: c.h:949
uint32 BlockNumber
Definition: block.h:31
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:919
#define BM_DIRTY
Definition: buf_internals.h:58
#define fprintf
Definition: port.h:197
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:930
Buffer recent_buffer
Definition: bufmgr.h:54
#define LocalBufHdrGetBlock(bufHdr)
Definition: localbuf.c:38
signed int int32
Definition: c.h:362
Definition: dynahash.c:218
void DropRelFileNodeAllLocalBuffers(RelFileNode rnode)
Definition: localbuf.c:373
#define ERROR
Definition: elog.h:43
#define FATAL
Definition: elog.h:52
BufferDesc * LocalBufferDescriptors
Definition: localbuf.c:43
#define BUF_FLAG_MASK
Definition: buf_internals.h:45
int NLocBuffer
Definition: localbuf.c:41
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:192
RelFileNodeBackend smgr_rnode
Definition: smgr.h:42
static void InitLocalBuffers(void)
Definition: localbuf.c:416
void AtProcExit_LocalBuffers(void)
Definition: localbuf.c:589
static char * buf
Definition: pg_test_fsync.c:68
void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:524
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:43
#define BM_JUST_DIRTIED
Definition: buf_internals.h:63
unsigned int uint32
Definition: c.h:374
#define IsParallelWorker()
Definition: parallel.h:61
MemoryContext TopMemoryContext
Definition: mcxt.c:44
#define BUFFERTAGS_EQUAL(a, b)
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:146
ForkNumber
Definition: relpath.h:40
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:286
#define MaxAllocSize
Definition: memutils.h:40
#define HASH_BLOBS
Definition: hsearch.h:86
#define BM_VALID
Definition: buf_internals.h:59
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:326
Size keysize
Definition: hsearch.h:71
#define ereport(elevel,...)
Definition: elog.h:144
RelFileNode node
Definition: relfilenode.h:74
#define Max(x, y)
Definition: c.h:921
#define Assert(condition)
Definition: c.h:745
#define CLEAR_BUFFERTAG(a)
Definition: buf_internals.h:97
#define INIT_BUFFERTAG(a, xx_rnode, xx_forkNum, xx_blockNum)
#define BUF_USAGECOUNT_MASK
Definition: buf_internals.h:42
static HTAB * LocalBufHash
Definition: localbuf.c:49
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferDescriptorGetBuffer(bdesc)
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1414
BlockNumber blockNum
Definition: buf_internals.h:94
bool smgrprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
Definition: smgr.c:488
RelFileNode rnode
Definition: buf_internals.h:92
static Block GetLocalBufferStorage(void)
Definition: localbuf.c:494
#define BM_MAX_USAGE_COUNT
Definition: buf_internals.h:76
#define BM_IO_ERROR
Definition: buf_internals.h:62
BufferTag tag
int errmsg(const char *fmt,...)
Definition: elog.c:824
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:797
long local_blks_written
Definition: instrument.h:28
Block * LocalBufferBlockPointers
Definition: localbuf.c:44
#define elog(elevel,...)
Definition: elog.h:214
int i
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:277
pg_atomic_uint32 state
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:49
#define relpathbackend(rnode, backend, forknum)
Definition: relpath.h:78
int Buffer
Definition: buf.h:23
int32 * LocalRefCount
Definition: localbuf.c:45
Pointer Page
Definition: bufpage.h:78
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:578
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
BufferUsage pgBufferUsage
Definition: instrument.c:20
void * Block
Definition: bufmgr.h:24
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241