PostgreSQL Source Code  git master
buf_init.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * buf_init.c
4  * buffer manager initialization routines
5  *
6  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/buffer/buf_init.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "storage/buf_internals.h"
18 #include "storage/bufmgr.h"
19 
25 
26 
27 /*
28  * Data Structures:
29  * buffers live in a freelist and a lookup data structure.
30  *
31  *
32  * Buffer Lookup:
33  * Two important notes. First, the buffer has to be
34  * available for lookup BEFORE an IO begins. Otherwise
35  * a second process trying to read the buffer will
36  * allocate its own copy and the buffer pool will
37  * become inconsistent.
38  *
39  * Buffer Replacement:
40  * see freelist.c. A buffer cannot be replaced while in
41  * use either by data manager or during IO.
42  *
43  *
44  * Synchronization/Locking:
45  *
46  * IO_IN_PROGRESS -- this is a flag in the buffer descriptor.
47  * It must be set when an IO is initiated and cleared at
48  * the end of the IO. It is there to make sure that one
49  * process doesn't start to use a buffer while another is
50  * faulting it in. see WaitIO and related routines.
51  *
52  * refcount -- Counts the number of processes holding pins on a buffer.
53  * A buffer is pinned during IO and immediately after a BufferAlloc().
54  * Pins must be released before end of transaction. For efficiency the
55  * shared refcount isn't increased if an individual backend pins a buffer
56  * multiple times. Check the PrivateRefCount infrastructure in bufmgr.c.
57  */
58 
59 
60 /*
61  * Initialize shared buffer pool
62  *
63  * This is called once during shared-memory initialization (either in the
64  * postmaster, or in a standalone backend).
65  */
66 void
68 {
69  bool foundBufs,
70  foundDescs,
71  foundIOLocks,
72  foundBufCkpt;
73 
74  /* Align descriptors to a cacheline boundary. */
75  BufferDescriptors = (BufferDescPadded *)
76  ShmemInitStruct("Buffer Descriptors",
77  NBuffers * sizeof(BufferDescPadded),
78  &foundDescs);
79 
80  BufferBlocks = (char *)
81  ShmemInitStruct("Buffer Blocks",
82  NBuffers * (Size) BLCKSZ, &foundBufs);
83 
84  /* Align lwlocks to cacheline boundary */
85  BufferIOLWLockArray = (LWLockMinimallyPadded *)
86  ShmemInitStruct("Buffer IO Locks",
88  &foundIOLocks);
89 
92 
93  /*
94  * The array used to sort to-be-checkpointed buffer ids is located in
95  * shared memory, to avoid having to allocate significant amounts of
96  * memory at runtime. As that'd be in the middle of a checkpoint, or when
97  * the checkpointer is restarted, memory allocation failures would be
98  * painful.
99  */
100  CkptBufferIds = (CkptSortItem *)
101  ShmemInitStruct("Checkpoint BufferIds",
102  NBuffers * sizeof(CkptSortItem), &foundBufCkpt);
103 
104  if (foundDescs || foundBufs || foundIOLocks || foundBufCkpt)
105  {
106  /* should find all of these, or none of them */
107  Assert(foundDescs && foundBufs && foundIOLocks && foundBufCkpt);
108  /* note: this path is only taken in EXEC_BACKEND case */
109  }
110  else
111  {
112  int i;
113 
114  /*
115  * Initialize all the buffer headers.
116  */
117  for (i = 0; i < NBuffers; i++)
118  {
120 
121  CLEAR_BUFFERTAG(buf->tag);
122 
123  pg_atomic_init_u32(&buf->state, 0);
124  buf->wait_backend_pid = 0;
125 
126  buf->buf_id = i;
127 
128  /*
129  * Initially link all the buffers together as unused. Subsequent
130  * management of this list is done by freelist.c.
131  */
132  buf->freeNext = i + 1;
133 
136 
139  }
140 
141  /* Correct last entry of linked list */
142  GetBufferDescriptor(NBuffers - 1)->freeNext = FREENEXT_END_OF_LIST;
143  }
144 
145  /* Init other shared buffer-management stuff */
146  StrategyInitialize(!foundDescs);
147 
148  /* Initialize per-backend file flush context */
149  WritebackContextInit(&BackendWritebackContext,
151 }
152 
153 /*
154  * BufferShmemSize
155  *
156  * compute the size of shared memory for the buffer pool including
157  * data pages, buffer descriptors, hash tables, etc.
158  */
159 Size
161 {
162  Size size = 0;
163 
164  /* size of buffer descriptors */
165  size = add_size(size, mul_size(NBuffers, sizeof(BufferDescPadded)));
166  /* to allow aligning buffer descriptors */
167  size = add_size(size, PG_CACHE_LINE_SIZE);
168 
169  /* size of data pages */
170  size = add_size(size, mul_size(NBuffers, BLCKSZ));
171 
172  /* size of stuff controlled by freelist.c */
173  size = add_size(size, StrategyShmemSize());
174 
175  /*
176  * It would be nice to include the I/O locks in the BufferDesc, but that
177  * would increase the size of a BufferDesc to more than one cache line,
178  * and benchmarking has shown that keeping every BufferDesc aligned on a
179  * cache line boundary is important for performance. So, instead, the
180  * array of I/O locks is allocated in a separate tranche. Because those
181  * locks are not highly contended, we lay out the array with minimal
182  * padding.
183  */
184  size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded)));
185  /* to allow aligning the above */
186  size = add_size(size, PG_CACHE_LINE_SIZE);
187 
188  /* size of checkpoint sort array in bufmgr.c */
189  size = add_size(size, mul_size(NBuffers, sizeof(CkptSortItem)));
190 
191  return size;
192 }
#define PG_CACHE_LINE_SIZE
#define FREENEXT_END_OF_LIST
LWLockMinimallyPadded * BufferIOLWLockArray
Definition: buf_init.c:22
int wait_backend_pid
int backend_flush_after
Definition: bufmgr.c:121
#define BufferDescriptorGetIOLock(bdesc)
void StrategyInitialize(bool init)
Definition: freelist.c:475
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition: bufmgr.c:4288
void LWLockRegisterTranche(int tranche_id, const char *tranche_name)
Definition: lwlock.c:603
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:372
WritebackContext BackendWritebackContext
Definition: buf_init.c:23
static char * buf
Definition: pg_test_fsync.c:67
BufferDescPadded * BufferDescriptors
Definition: buf_init.c:20
#define GetBufferDescriptor(id)
union LWLockMinimallyPadded LWLockMinimallyPadded
void InitBufferPool(void)
Definition: buf_init.c:67
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:678
Size mul_size(Size s1, Size s2)
Definition: shmem.c:492
Size add_size(Size s1, Size s2)
Definition: shmem.c:475
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:739
#define CLEAR_BUFFERTAG(a)
Definition: buf_internals.h:97
CkptSortItem * CkptBufferIds
Definition: buf_init.c:24
size_t Size
Definition: c.h:467
BufferTag tag
Size BufferShmemSize(void)
Definition: buf_init.c:160
int i
int NBuffers
Definition: globals.c:131
pg_atomic_uint32 state
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
char * BufferBlocks
Definition: buf_init.c:21
Size StrategyShmemSize(void)
Definition: freelist.c:454