PostgreSQL Source Code  git master
bulk_write.c File Reference
#include "postgres.h"
#include "access/xloginsert.h"
#include "access/xlogrecord.h"
#include "storage/bufmgr.h"
#include "storage/bufpage.h"
#include "storage/bulk_write.h"
#include "storage/proc.h"
#include "storage/smgr.h"
#include "utils/rel.h"
Include dependency graph for bulk_write.c:

Go to the source code of this file.

Data Structures

struct  PendingWrite
 
struct  BulkWriteState
 

Macros

#define MAX_PENDING_WRITES   XLR_MAX_BLOCK_ID
 

Typedefs

typedef struct PendingWrite PendingWrite
 

Functions

static void smgr_bulk_flush (BulkWriteState *bulkstate)
 
BulkWriteStatesmgr_bulk_start_rel (Relation rel, ForkNumber forknum)
 
BulkWriteStatesmgr_bulk_start_smgr (SMgrRelation smgr, ForkNumber forknum, bool use_wal)
 
void smgr_bulk_finish (BulkWriteState *bulkstate)
 
static int buffer_cmp (const void *a, const void *b)
 
void smgr_bulk_write (BulkWriteState *bulkstate, BlockNumber blocknum, BulkWriteBuffer buf, bool page_std)
 
BulkWriteBuffer smgr_bulk_get_buf (BulkWriteState *bulkstate)
 

Variables

static const PGIOAlignedBlock zero_buffer = {{0}}
 

Macro Definition Documentation

◆ MAX_PENDING_WRITES

#define MAX_PENDING_WRITES   XLR_MAX_BLOCK_ID

Definition at line 46 of file bulk_write.c.

Typedef Documentation

◆ PendingWrite

typedef struct PendingWrite PendingWrite

Function Documentation

◆ buffer_cmp()

static int buffer_cmp ( const void *  a,
const void *  b 
)
static

Definition at line 224 of file bulk_write.c.

225 {
226  const PendingWrite *bufa = (const PendingWrite *) a;
227  const PendingWrite *bufb = (const PendingWrite *) b;
228 
229  /* We should not see duplicated writes for the same block */
230  Assert(bufa->blkno != bufb->blkno);
231  if (bufa->blkno > bufb->blkno)
232  return 1;
233  else
234  return -1;
235 }
#define Assert(condition)
Definition: c.h:849
int b
Definition: isn.c:70
int a
Definition: isn.c:69
BlockNumber blkno
Definition: bulk_write.c:53

References a, Assert, b, and PendingWrite::blkno.

Referenced by smgr_bulk_flush().

◆ smgr_bulk_finish()

void smgr_bulk_finish ( BulkWriteState bulkstate)

Definition at line 129 of file bulk_write.c.

130 {
131  /* WAL-log and flush any remaining pages */
132  smgr_bulk_flush(bulkstate);
133 
134  /*
135  * Fsync the relation, or register it for the next checkpoint, if
136  * necessary.
137  */
138  if (SmgrIsTemp(bulkstate->smgr))
139  {
140  /* Temporary relations don't need to be fsync'd, ever */
141  }
142  else if (!bulkstate->use_wal)
143  {
144  /*----------
145  * This is either an unlogged relation, or a permanent relation but we
146  * skipped WAL-logging because wal_level=minimal:
147  *
148  * A) Unlogged relation
149  *
150  * Unlogged relations will go away on crash, but they need to be
151  * fsync'd on a clean shutdown. It's sufficient to call
152  * smgrregistersync(), that ensures that the checkpointer will
153  * flush it at the shutdown checkpoint. (It will flush it on the
154  * next online checkpoint too, which is not strictly necessary.)
155  *
156  * Note that the init-fork of an unlogged relation is not
157  * considered unlogged for our purposes. It's treated like a
158  * regular permanent relation. The callers will pass use_wal=true
159  * for the init fork.
160  *
161  * B) Permanent relation, WAL-logging skipped because wal_level=minimal
162  *
163  * This is a new relation, and we didn't WAL-log the pages as we
164  * wrote, but they need to be fsync'd before commit.
165  *
166  * We don't need to do that here, however. The fsync() is done at
167  * commit, by smgrDoPendingSyncs() (*).
168  *
169  * (*) smgrDoPendingSyncs() might decide to WAL-log the whole
170  * relation at commit instead of fsyncing it, if the relation was
171  * very small, but it's smgrDoPendingSyncs() responsibility in any
172  * case.
173  *
174  * We cannot distinguish the two here, so conservatively assume it's
175  * an unlogged relation. A permanent relation with wal_level=minimal
176  * would require no actions, see above.
177  */
178  smgrregistersync(bulkstate->smgr, bulkstate->forknum);
179  }
180  else
181  {
182  /*
183  * Permanent relation, WAL-logged normally.
184  *
185  * We already WAL-logged all the pages, so they will be replayed from
186  * WAL on crash. However, when we wrote out the pages, we passed
187  * skipFsync=true to avoid the overhead of registering all the writes
188  * with the checkpointer. Register the whole relation now.
189  *
190  * There is one hole in that idea: If a checkpoint occurred while we
191  * were writing the pages, it already missed fsyncing the pages we had
192  * written before the checkpoint started. A crash later on would
193  * replay the WAL starting from the checkpoint, therefore it wouldn't
194  * replay our earlier WAL records. So if a checkpoint started after
195  * the bulk write, fsync the files now.
196  */
197 
198  /*
199  * Prevent a checkpoint from starting between the GetRedoRecPtr() and
200  * smgrregistersync() calls.
201  */
204 
205  if (bulkstate->start_RedoRecPtr != GetRedoRecPtr())
206  {
207  /*
208  * A checkpoint occurred and it didn't know about our writes, so
209  * fsync() the relation ourselves.
210  */
212  smgrimmedsync(bulkstate->smgr, bulkstate->forknum);
213  elog(DEBUG1, "flushed relation because a checkpoint occurred concurrently");
214  }
215  else
216  {
217  smgrregistersync(bulkstate->smgr, bulkstate->forknum);
219  }
220  }
221 }
static void smgr_bulk_flush(BulkWriteState *bulkstate)
Definition: bulk_write.c:241
#define DEBUG1
Definition: elog.h:30
#define elog(elevel,...)
Definition: elog.h:225
#define DELAY_CHKPT_START
Definition: proc.h:119
void smgrimmedsync(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:811
void smgrregistersync(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:779
#define SmgrIsTemp(smgr)
Definition: smgr.h:73
PGPROC * MyProc
Definition: proc.c:67
SMgrRelation smgr
Definition: bulk_write.c:63
XLogRecPtr start_RedoRecPtr
Definition: bulk_write.c:75
ForkNumber forknum
Definition: bulk_write.c:64
int delayChkptFlags
Definition: proc.h:240
XLogRecPtr GetRedoRecPtr(void)
Definition: xlog.c:6436

References Assert, DEBUG1, DELAY_CHKPT_START, PGPROC::delayChkptFlags, elog, BulkWriteState::forknum, GetRedoRecPtr(), MyProc, BulkWriteState::smgr, smgr_bulk_flush(), smgrimmedsync(), SmgrIsTemp, smgrregistersync(), BulkWriteState::start_RedoRecPtr, and BulkWriteState::use_wal.

Referenced by _bt_load(), btbuildempty(), end_heap_rewrite(), gist_indexsortbuild(), RelationCopyStorage(), and spgbuildempty().

◆ smgr_bulk_flush()

static void smgr_bulk_flush ( BulkWriteState bulkstate)
static

Definition at line 241 of file bulk_write.c.

242 {
243  int npending = bulkstate->npending;
244  PendingWrite *pending_writes = bulkstate->pending_writes;
245 
246  if (npending == 0)
247  return;
248 
249  if (npending > 1)
250  qsort(pending_writes, npending, sizeof(PendingWrite), buffer_cmp);
251 
252  if (bulkstate->use_wal)
253  {
255  Page pages[MAX_PENDING_WRITES];
256  bool page_std = true;
257 
258  for (int i = 0; i < npending; i++)
259  {
260  blknos[i] = pending_writes[i].blkno;
261  pages[i] = pending_writes[i].buf->data;
262 
263  /*
264  * If any of the pages use !page_std, we log them all as such.
265  * That's a bit wasteful, but in practice, a mix of standard and
266  * non-standard page layout is rare. None of the built-in AMs do
267  * that.
268  */
269  if (!pending_writes[i].page_std)
270  page_std = false;
271  }
272  log_newpages(&bulkstate->smgr->smgr_rlocator.locator, bulkstate->forknum,
273  npending, blknos, pages, page_std);
274  }
275 
276  for (int i = 0; i < npending; i++)
277  {
278  BlockNumber blkno = pending_writes[i].blkno;
279  Page page = pending_writes[i].buf->data;
280 
281  PageSetChecksumInplace(page, blkno);
282 
283  if (blkno >= bulkstate->pages_written)
284  {
285  /*
286  * If we have to write pages nonsequentially, fill in the space
287  * with zeroes until we come back and overwrite. This is not
288  * logically necessary on standard Unix filesystems (unwritten
289  * space will read as zeroes anyway), but it should help to avoid
290  * fragmentation. The dummy pages aren't WAL-logged though.
291  */
292  while (blkno > bulkstate->pages_written)
293  {
294  /* don't set checksum for all-zero page */
295  smgrextend(bulkstate->smgr, bulkstate->forknum,
296  bulkstate->pages_written++,
297  &zero_buffer,
298  true);
299  }
300 
301  smgrextend(bulkstate->smgr, bulkstate->forknum, blkno, page, true);
302  bulkstate->pages_written = pending_writes[i].blkno + 1;
303  }
304  else
305  smgrwrite(bulkstate->smgr, bulkstate->forknum, blkno, page, true);
306  pfree(page);
307  }
308 
309  bulkstate->npending = 0;
310 }
uint32 BlockNumber
Definition: block.h:31
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1542
Pointer Page
Definition: bufpage.h:81
static const PGIOAlignedBlock zero_buffer
Definition: bulk_write.c:48
#define MAX_PENDING_WRITES
Definition: bulk_write.c:46
static int buffer_cmp(const void *a, const void *b)
Definition: bulk_write.c:224
int i
Definition: isn.c:73
void pfree(void *pointer)
Definition: mcxt.c:1521
#define qsort(a, b, c, d)
Definition: port.h:447
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const void *buffer, bool skipFsync)
Definition: smgr.c:538
static void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const void *buffer, bool skipFsync)
Definition: smgr.h:123
BlockNumber pages_written
Definition: bulk_write.c:72
PendingWrite pending_writes[MAX_PENDING_WRITES]
Definition: bulk_write.c:69
BulkWriteBuffer buf
Definition: bulk_write.c:52
RelFileLocator locator
RelFileLocatorBackend smgr_rlocator
Definition: smgr.h:37
char data[BLCKSZ]
Definition: c.h:1128
void log_newpages(RelFileLocator *rlocator, ForkNumber forknum, int num_pages, BlockNumber *blknos, Page *pages, bool page_std)
Definition: xloginsert.c:1175

References PendingWrite::blkno, PendingWrite::buf, buffer_cmp(), PGIOAlignedBlock::data, BulkWriteState::forknum, i, RelFileLocatorBackend::locator, log_newpages(), MAX_PENDING_WRITES, BulkWriteState::npending, BulkWriteState::pages_written, PageSetChecksumInplace(), BulkWriteState::pending_writes, pfree(), qsort, BulkWriteState::smgr, SMgrRelationData::smgr_rlocator, smgrextend(), smgrwrite(), BulkWriteState::use_wal, and zero_buffer.

Referenced by smgr_bulk_finish(), and smgr_bulk_write().

◆ smgr_bulk_get_buf()

BulkWriteBuffer smgr_bulk_get_buf ( BulkWriteState bulkstate)

Definition at line 345 of file bulk_write.c.

346 {
347  return MemoryContextAllocAligned(bulkstate->memcxt, BLCKSZ, PG_IO_ALIGN_SIZE, 0);
348 }
void * MemoryContextAllocAligned(MemoryContext context, Size size, Size alignto, int flags)
Definition: mcxt.c:1409
#define PG_IO_ALIGN_SIZE
MemoryContext memcxt
Definition: bulk_write.c:77

References BulkWriteState::memcxt, MemoryContextAllocAligned(), and PG_IO_ALIGN_SIZE.

Referenced by _bt_blnewpage(), _bt_uppershutdown(), btbuildempty(), gist_indexsortbuild(), gist_indexsortbuild_levelstate_flush(), raw_heap_insert(), RelationCopyStorage(), and spgbuildempty().

◆ smgr_bulk_start_rel()

BulkWriteState* smgr_bulk_start_rel ( Relation  rel,
ForkNumber  forknum 
)

Definition at line 86 of file bulk_write.c.

87 {
89  forknum,
90  RelationNeedsWAL(rel) || forknum == INIT_FORKNUM);
91 }
BulkWriteState * smgr_bulk_start_smgr(SMgrRelation smgr, ForkNumber forknum, bool use_wal)
Definition: bulk_write.c:99
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:567
#define RelationNeedsWAL(relation)
Definition: rel.h:628
@ INIT_FORKNUM
Definition: relpath.h:61

References INIT_FORKNUM, RelationGetSmgr(), RelationNeedsWAL, and smgr_bulk_start_smgr().

Referenced by _bt_load(), begin_heap_rewrite(), btbuildempty(), gist_indexsortbuild(), and spgbuildempty().

◆ smgr_bulk_start_smgr()

BulkWriteState* smgr_bulk_start_smgr ( SMgrRelation  smgr,
ForkNumber  forknum,
bool  use_wal 
)

Definition at line 99 of file bulk_write.c.

100 {
102 
103  state = palloc(sizeof(BulkWriteState));
104  state->smgr = smgr;
105  state->forknum = forknum;
106  state->use_wal = use_wal;
107 
108  state->npending = 0;
109  state->pages_written = 0;
110 
111  state->start_RedoRecPtr = GetRedoRecPtr();
112 
113  /*
114  * Remember the memory context. We will use it to allocate all the
115  * buffers later.
116  */
117  state->memcxt = CurrentMemoryContext;
118 
119  return state;
120 }
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
void * palloc(Size size)
Definition: mcxt.c:1317
Definition: regguts.h:323

References CurrentMemoryContext, GetRedoRecPtr(), and palloc().

Referenced by RelationCopyStorage(), and smgr_bulk_start_rel().

◆ smgr_bulk_write()

void smgr_bulk_write ( BulkWriteState bulkstate,
BlockNumber  blocknum,
BulkWriteBuffer  buf,
bool  page_std 
)

Definition at line 321 of file bulk_write.c.

322 {
323  PendingWrite *w;
324 
325  w = &bulkstate->pending_writes[bulkstate->npending++];
326  w->buf = buf;
327  w->blkno = blocknum;
328  w->page_std = page_std;
329 
330  if (bulkstate->npending == MAX_PENDING_WRITES)
331  smgr_bulk_flush(bulkstate);
332 }
static char * buf
Definition: pg_test_fsync.c:73
bool page_std
Definition: bulk_write.c:54

References PendingWrite::blkno, PendingWrite::buf, buf, MAX_PENDING_WRITES, BulkWriteState::npending, PendingWrite::page_std, BulkWriteState::pending_writes, and smgr_bulk_flush().

Referenced by _bt_blwritepage(), btbuildempty(), end_heap_rewrite(), gist_indexsortbuild(), gist_indexsortbuild_levelstate_flush(), raw_heap_insert(), RelationCopyStorage(), and spgbuildempty().

Variable Documentation

◆ zero_buffer

const PGIOAlignedBlock zero_buffer = {{0}}
static

Definition at line 48 of file bulk_write.c.

Referenced by smgr_bulk_flush().