PostgreSQL Source Code git master
Loading...
Searching...
No Matches
pg_buffercache_pages.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * pg_buffercache_pages.c
4 * display some contents of the buffer cache
5 *
6 * contrib/pg_buffercache/pg_buffercache_pages.c
7 *-------------------------------------------------------------------------
8 */
9#include "postgres.h"
10
11#include "access/htup_details.h"
12#include "access/relation.h"
13#include "catalog/pg_type.h"
14#include "funcapi.h"
15#include "port/pg_numa.h"
17#include "storage/bufmgr.h"
18#include "utils/rel.h"
19#include "utils/tuplestore.h"
20
21
22#define NUM_BUFFERCACHE_PAGES_MIN_ELEM 8
23#define NUM_BUFFERCACHE_PAGES_ELEM 9
24#define NUM_BUFFERCACHE_SUMMARY_ELEM 5
25#define NUM_BUFFERCACHE_USAGE_COUNTS_ELEM 4
26#define NUM_BUFFERCACHE_EVICT_ELEM 2
27#define NUM_BUFFERCACHE_EVICT_RELATION_ELEM 3
28#define NUM_BUFFERCACHE_EVICT_ALL_ELEM 3
29#define NUM_BUFFERCACHE_MARK_DIRTY_ELEM 2
30#define NUM_BUFFERCACHE_MARK_DIRTY_RELATION_ELEM 3
31#define NUM_BUFFERCACHE_MARK_DIRTY_ALL_ELEM 3
32
33#define NUM_BUFFERCACHE_OS_PAGES_ELEM 3
34
36 .name = "pg_buffercache",
37 .version = PG_VERSION
38);
39
40/*
41 * Record structure holding the to be exposed cache data for OS pages. This
42 * structure is used by pg_buffercache_os_pages(), where NUMA information may
43 * or may not be included.
44 */
51
52/*
53 * Function context for data persisting over repeated calls.
54 */
61
62
63/*
64 * Function returning data from the shared buffer cache - buffer number,
65 * relation node/tablespace/database/blocknum and dirty indicator.
66 */
78
79
80/* Only need to touch memory once per backend process lifetime */
81static bool firstNumaTouch = true;
82
83
86{
87 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
89 int i;
90
91 /*
92 * To smoothly support upgrades from version 1.0 of this extension
93 * transparently handle the (non-)existence of the pinning_backends
94 * column. We unfortunately have to get the result type for that... - we
95 * can't use the result type determined by the function definition without
96 * potentially crashing when somebody uses the old (or even wrong)
97 * function definition though.
98 */
100 elog(ERROR, "return type must be a row type");
101
104 elog(ERROR, "incorrect number of output arguments");
105
106 InitMaterializedSRF(fcinfo, 0);
107
108 /*
109 * Scan through all the buffers, adding one row for each of the buffers to
110 * the tuplestore.
111 *
112 * We don't hold the partition locks, so we don't get a consistent
113 * snapshot across all buffers, but we do grab the buffer header locks, so
114 * the information of each buffer is self-consistent.
115 */
116 for (i = 0; i < NBuffers; i++)
117 {
120 uint32 bufferid;
121 RelFileNumber relfilenumber;
122 Oid reltablespace;
124 ForkNumber forknum;
125 BlockNumber blocknum;
126 bool isvalid;
127 bool isdirty;
131 bool nulls[NUM_BUFFERCACHE_PAGES_ELEM];
132
134
136 /* Lock each buffer header before inspecting. */
138
140 relfilenumber = BufTagGetRelNumber(&bufHdr->tag);
141 reltablespace = bufHdr->tag.spcOid;
142 reldatabase = bufHdr->tag.dbOid;
143 forknum = BufTagGetForkNum(&bufHdr->tag);
144 blocknum = bufHdr->tag.blockNum;
147
148 if (buf_state & BM_DIRTY)
149 isdirty = true;
150 else
151 isdirty = false;
152
153 /* Note if the buffer is valid, and has storage created */
155 isvalid = true;
156 else
157 isvalid = false;
158
160
161 /* Build the tuple and add it to tuplestore */
162 values[0] = Int32GetDatum(bufferid);
163 nulls[0] = false;
164
165 /*
166 * Set all fields except the bufferid to null if the buffer is unused
167 * or not valid.
168 */
169 if (blocknum == InvalidBlockNumber || isvalid == false)
170 {
171 nulls[1] = true;
172 nulls[2] = true;
173 nulls[3] = true;
174 nulls[4] = true;
175 nulls[5] = true;
176 nulls[6] = true;
177 nulls[7] = true;
178 /* unused for v1.0 callers, but the array is always long enough */
179 nulls[8] = true;
180 }
181 else
182 {
183 values[1] = ObjectIdGetDatum(relfilenumber);
184 nulls[1] = false;
185 values[2] = ObjectIdGetDatum(reltablespace);
186 nulls[2] = false;
188 nulls[3] = false;
189 values[4] = Int16GetDatum(forknum);
190 nulls[4] = false;
191 values[5] = Int64GetDatum((int64) blocknum);
192 nulls[5] = false;
194 nulls[6] = false;
196 nulls[7] = false;
197 /* unused for v1.0 callers, but the array is always long enough */
199 nulls[8] = false;
200 }
201
202 tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
203 }
204
205 return (Datum) 0;
206}
207
208/*
209 * Inquire about OS pages mappings for shared buffers, with NUMA information,
210 * optionally.
211 *
212 * When "include_numa" is false, this routines ignores everything related
213 * to NUMA (returned as NULL values), returning mapping information between
214 * shared buffers and OS pages.
215 *
216 * When "include_numa" is true, NUMA is initialized and numa_node values
217 * are generated. In order to get reliable results we also need to touch
218 * memory pages, so that the inquiry about NUMA memory node does not return
219 * -2, indicating unmapped/unallocated pages.
220 *
221 * Buffers may be smaller or larger than OS memory pages. For each buffer we
222 * return one entry for each memory page used by the buffer (if the buffer is
223 * smaller, it only uses a part of one memory page).
224 *
225 * We expect both sizes (for buffers and memory pages) to be a power-of-2, so
226 * one is always a multiple of the other.
227 *
228 */
229static Datum
231{
233 MemoryContext oldcontext;
234 BufferCacheOsPagesContext *fctx; /* User function context. */
235 TupleDesc tupledesc;
237 HeapTuple tuple;
239
240 if (SRF_IS_FIRSTCALL())
241 {
242 int i,
243 idx;
246 int *os_page_status = NULL;
248 int max_entries;
249 char *startptr,
250 *endptr;
251
252 /* If NUMA information is requested, initialize NUMA support. */
253 if (include_numa && pg_numa_init() == -1)
254 elog(ERROR, "libnuma initialization failed or NUMA is not supported on this platform");
255
256 /*
257 * The database block size and OS memory page size are unlikely to be
258 * the same. The block size is 1-32KB, the memory page size depends on
259 * platform. On x86 it's usually 4KB, on ARM it's 4KB or 64KB, but
260 * there are also features like THP etc. Moreover, we don't quite know
261 * how the pages and buffers "align" in memory - the buffers may be
262 * shifted in some way, using more memory pages than necessary.
263 *
264 * So we need to be careful about mapping buffers to memory pages. We
265 * calculate the maximum number of pages a buffer might use, so that
266 * we allocate enough space for the entries. And then we count the
267 * actual number of entries as we scan the buffers.
268 *
269 * This information is needed before calling move_pages() for NUMA
270 * node id inquiry.
271 */
273
274 /*
275 * The pages and block size is expected to be 2^k, so one divides the
276 * other (we don't know in which direction). This does not say
277 * anything about relative alignment of pages/buffers.
278 */
279 Assert((os_page_size % BLCKSZ == 0) || (BLCKSZ % os_page_size == 0));
280
281 if (include_numa)
282 {
283 void **os_page_ptrs = NULL;
284
285 /*
286 * How many addresses we are going to query? Simply get the page
287 * for the first buffer, and first page after the last buffer, and
288 * count the pages from that.
289 */
290 startptr = (char *) TYPEALIGN_DOWN(os_page_size,
291 BufferGetBlock(1));
292 endptr = (char *) TYPEALIGN(os_page_size,
293 (char *) BufferGetBlock(NBuffers) + BLCKSZ);
294 os_page_count = (endptr - startptr) / os_page_size;
295
296 /* Used to determine the NUMA node for all OS pages at once */
299
300 /*
301 * Fill pointers for all the memory pages. This loop stores and
302 * touches (if needed) addresses into os_page_ptrs[] as input to
303 * one big move_pages(2) inquiry system call, as done in
304 * pg_numa_query_pages().
305 */
306 idx = 0;
307 for (char *ptr = startptr; ptr < endptr; ptr += os_page_size)
308 {
309 os_page_ptrs[idx++] = ptr;
310
311 /* Only need to touch memory once per backend process lifetime */
312 if (firstNumaTouch)
314 }
315
317
318 elog(DEBUG1, "NUMA: NBuffers=%d os_page_count=" UINT64_FORMAT " "
319 "os_page_size=%zu", NBuffers, os_page_count, os_page_size);
320
321 /*
322 * If we ever get 0xff back from kernel inquiry, then we probably
323 * have bug in our buffers to OS page mapping code here.
324 */
325 memset(os_page_status, 0xff, sizeof(int) * os_page_count);
326
327 /* Query NUMA status for all the pointers */
329 elog(ERROR, "failed NUMA pages inquiry: %m");
330 }
331
332 /* Initialize the multi-call context, load entries about buffers */
333
335
336 /* Switch context when allocating stuff to be used in later calls */
337 oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
338
339 /* Create a user function context for cross-call persistence */
341
343 elog(ERROR, "return type must be a row type");
344
346 elog(ERROR, "incorrect number of output arguments");
347
348 /* Construct a tuple descriptor for the result rows. */
350 TupleDescInitEntry(tupledesc, (AttrNumber) 1, "bufferid",
351 INT4OID, -1, 0);
352 TupleDescInitEntry(tupledesc, (AttrNumber) 2, "os_page_num",
353 INT8OID, -1, 0);
354 TupleDescInitEntry(tupledesc, (AttrNumber) 3, "numa_node",
355 INT4OID, -1, 0);
356
357 TupleDescFinalize(tupledesc);
358 fctx->tupdesc = BlessTupleDesc(tupledesc);
359 fctx->include_numa = include_numa;
360
361 /*
362 * Each buffer needs at least one entry, but it might be offset in
363 * some way, and use one extra entry. So we allocate space for the
364 * maximum number of entries we might need, and then count the exact
365 * number as we're walking buffers. That way we can do it in one pass,
366 * without reallocating memory.
367 */
370
371 /* Allocate entries for BufferCacheOsPagesRec records. */
372 fctx->record = (BufferCacheOsPagesRec *)
375
376 /* Return to original context when allocating transient memory */
377 MemoryContextSwitchTo(oldcontext);
378
379 if (include_numa && firstNumaTouch)
380 elog(DEBUG1, "NUMA: page-faulting the buffercache for proper NUMA readouts");
381
382 /*
383 * Scan through all the buffers, saving the relevant fields in the
384 * fctx->record structure.
385 *
386 * We don't hold the partition locks, so we don't get a consistent
387 * snapshot across all buffers, but we do grab the buffer header
388 * locks, so the information of each buffer is self-consistent.
389 */
390 startptr = (char *) TYPEALIGN_DOWN(os_page_size, (char *) BufferGetBlock(1));
391 idx = 0;
392 for (i = 0; i < NBuffers; i++)
393 {
394 char *buffptr = (char *) BufferGetBlock(i + 1);
396 uint32 bufferid;
397 int32 page_num;
398 char *startptr_buff,
400
402
404
405 /* Lock each buffer header before inspecting. */
409
410 /* start of the first page of this buffer */
412
413 /* end of the buffer (no need to align to memory page) */
415
417
418 /* calculate ID of the first page for this buffer */
419 page_num = (startptr_buff - startptr) / os_page_size;
420
421 /* Add an entry for each OS page overlapping with this buffer. */
422 for (char *ptr = startptr_buff; ptr < endptr_buff; ptr += os_page_size)
423 {
424 fctx->record[idx].bufferid = bufferid;
425 fctx->record[idx].page_num = page_num;
426 fctx->record[idx].numa_node = include_numa ? os_page_status[page_num] : -1;
427
428 /* advance to the next entry/page */
429 ++idx;
430 ++page_num;
431 }
432 }
433
435
436 if (include_numa)
438
439 /* Set max calls and remember the user function context. */
440 funcctx->max_calls = idx;
441 funcctx->user_fctx = fctx;
442
443 /* Remember this backend touched the pages (only relevant for NUMA) */
444 if (include_numa)
445 firstNumaTouch = false;
446 }
447
449
450 /* Get the saved state */
451 fctx = funcctx->user_fctx;
452
453 if (funcctx->call_cntr < funcctx->max_calls)
454 {
455 uint32 i = funcctx->call_cntr;
458
459 values[0] = Int32GetDatum(fctx->record[i].bufferid);
460 nulls[0] = false;
461
462 values[1] = Int64GetDatum(fctx->record[i].page_num);
463 nulls[1] = false;
464
465 if (fctx->include_numa)
466 {
467 /* status is valid node number */
468 if (fctx->record[i].numa_node >= 0)
469 {
470 values[2] = Int32GetDatum(fctx->record[i].numa_node);
471 nulls[2] = false;
472 }
473 else
474 {
475 /* some kind of error (e.g. pages moved to swap) */
476 values[2] = (Datum) 0;
477 nulls[2] = true;
478 }
479 }
480 else
481 {
482 values[2] = (Datum) 0;
483 nulls[2] = true;
484 }
485
486 /* Build and return the tuple. */
487 tuple = heap_form_tuple(fctx->tupdesc, values, nulls);
488 result = HeapTupleGetDatum(tuple);
489
491 }
492 else
494}
495
496/*
497 * pg_buffercache_os_pages
498 *
499 * Retrieve information about OS pages, with or without NUMA information.
500 */
501Datum
503{
504 bool include_numa;
505
506 /* Get the boolean parameter that controls the NUMA behavior. */
507 include_numa = PG_GETARG_BOOL(0);
508
509 return pg_buffercache_os_pages_internal(fcinfo, include_numa);
510}
511
512/* Backward-compatible wrapper for v1.6. */
513Datum
515{
516 /* Call internal function with include_numa=true */
517 return pg_buffercache_os_pages_internal(fcinfo, true);
518}
519
520Datum
522{
524 TupleDesc tupledesc;
525 HeapTuple tuple;
528
534
535 if (get_call_result_type(fcinfo, NULL, &tupledesc) != TYPEFUNC_COMPOSITE)
536 elog(ERROR, "return type must be a row type");
537
538 for (int i = 0; i < NBuffers; i++)
539 {
542
544
545 /*
546 * This function summarizes the state of all headers. Locking the
547 * buffer headers wouldn't provide an improved result as the state of
548 * the buffer can still change after we release the lock and it'd
549 * noticeably increase the cost of the function.
550 */
553
554 if (buf_state & BM_VALID)
555 {
556 buffers_used++;
558
559 if (buf_state & BM_DIRTY)
561 }
562 else
564
567 }
568
569 memset(nulls, 0, sizeof(nulls));
574
575 if (buffers_used != 0)
577 else
578 nulls[4] = true;
579
580 /* Build and return the tuple. */
581 tuple = heap_form_tuple(tupledesc, values, nulls);
582 result = HeapTupleGetDatum(tuple);
583
585}
586
587Datum
589{
590 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
591 int usage_counts[BM_MAX_USAGE_COUNT + 1] = {0};
592 int dirty[BM_MAX_USAGE_COUNT + 1] = {0};
593 int pinned[BM_MAX_USAGE_COUNT + 1] = {0};
595 bool nulls[NUM_BUFFERCACHE_USAGE_COUNTS_ELEM] = {0};
596
597 InitMaterializedSRF(fcinfo, 0);
598
599 for (int i = 0; i < NBuffers; i++)
600 {
603 int usage_count;
604
606
609
610 if (buf_state & BM_DIRTY)
611 dirty[usage_count]++;
612
614 pinned[usage_count]++;
615 }
616
617 for (int i = 0; i < BM_MAX_USAGE_COUNT + 1; i++)
618 {
619 values[0] = Int32GetDatum(i);
621 values[2] = Int32GetDatum(dirty[i]);
622 values[3] = Int32GetDatum(pinned[i]);
623
624 tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
625 }
626
627 return (Datum) 0;
628}
629
630/*
631 * Helper function to check if the user has superuser privileges.
632 */
633static void
635{
636 if (!superuser())
639 errmsg("must be superuser to use %s()",
640 func_name)));
641}
642
643/*
644 * Try to evict a shared buffer.
645 */
646Datum
648{
650 TupleDesc tupledesc;
651 HeapTuple tuple;
653 bool nulls[NUM_BUFFERCACHE_EVICT_ELEM] = {0};
654
656 bool buffer_flushed;
657
658 if (get_call_result_type(fcinfo, NULL, &tupledesc) != TYPEFUNC_COMPOSITE)
659 elog(ERROR, "return type must be a row type");
660
661 pg_buffercache_superuser_check("pg_buffercache_evict");
662
664 elog(ERROR, "bad buffer ID: %d", buf);
665
668
669 tuple = heap_form_tuple(tupledesc, values, nulls);
670 result = HeapTupleGetDatum(tuple);
671
673}
674
675/*
676 * Try to evict specified relation.
677 */
678Datum
680{
682 TupleDesc tupledesc;
683 HeapTuple tuple;
685 bool nulls[NUM_BUFFERCACHE_EVICT_RELATION_ELEM] = {0};
686
687 Oid relOid;
688 Relation rel;
689
693
694 if (get_call_result_type(fcinfo, NULL, &tupledesc) != TYPEFUNC_COMPOSITE)
695 elog(ERROR, "return type must be a row type");
696
697 pg_buffercache_superuser_check("pg_buffercache_evict_relation");
698
699 relOid = PG_GETARG_OID(0);
700
701 rel = relation_open(relOid, AccessShareLock);
702
706 errmsg("relation uses local buffers, %s() is intended to be used for shared buffers only",
707 "pg_buffercache_evict_relation")));
708
711
713
717
718 tuple = heap_form_tuple(tupledesc, values, nulls);
719 result = HeapTupleGetDatum(tuple);
720
722}
723
724
725/*
726 * Try to evict all shared buffers.
727 */
728Datum
730{
732 TupleDesc tupledesc;
733 HeapTuple tuple;
735 bool nulls[NUM_BUFFERCACHE_EVICT_ALL_ELEM] = {0};
736
740
741 if (get_call_result_type(fcinfo, NULL, &tupledesc) != TYPEFUNC_COMPOSITE)
742 elog(ERROR, "return type must be a row type");
743
744 pg_buffercache_superuser_check("pg_buffercache_evict_all");
745
748
752
753 tuple = heap_form_tuple(tupledesc, values, nulls);
754 result = HeapTupleGetDatum(tuple);
755
757}
758
759/*
760 * Try to mark a shared buffer as dirty.
761 */
762Datum
764{
765
767 TupleDesc tupledesc;
768 HeapTuple tuple;
770 bool nulls[NUM_BUFFERCACHE_MARK_DIRTY_ELEM] = {0};
771
774
775 if (get_call_result_type(fcinfo, NULL, &tupledesc) != TYPEFUNC_COMPOSITE)
776 elog(ERROR, "return type must be a row type");
777
778 pg_buffercache_superuser_check("pg_buffercache_mark_dirty");
779
781 elog(ERROR, "bad buffer ID: %d", buf);
782
785
786 tuple = heap_form_tuple(tupledesc, values, nulls);
787 result = HeapTupleGetDatum(tuple);
788
790}
791
792/*
793 * Try to mark all the shared buffers of a relation as dirty.
794 */
795Datum
797{
799 TupleDesc tupledesc;
800 HeapTuple tuple;
803
804 Oid relOid;
805 Relation rel;
806
810
811 if (get_call_result_type(fcinfo, NULL, &tupledesc) != TYPEFUNC_COMPOSITE)
812 elog(ERROR, "return type must be a row type");
813
814 pg_buffercache_superuser_check("pg_buffercache_mark_dirty_relation");
815
816 relOid = PG_GETARG_OID(0);
817
818 rel = relation_open(relOid, AccessShareLock);
819
823 errmsg("relation uses local buffers, %s() is intended to be used for shared buffers only",
824 "pg_buffercache_mark_dirty_relation")));
825
828
830
834
835 tuple = heap_form_tuple(tupledesc, values, nulls);
836 result = HeapTupleGetDatum(tuple);
837
839}
840
841/*
842 * Try to mark all the shared buffers as dirty.
843 */
844Datum
846{
848 TupleDesc tupledesc;
849 HeapTuple tuple;
851 bool nulls[NUM_BUFFERCACHE_MARK_DIRTY_ALL_ELEM] = {0};
852
856
857 if (get_call_result_type(fcinfo, NULL, &tupledesc) != TYPEFUNC_COMPOSITE)
858 elog(ERROR, "return type must be a row type");
859
860 pg_buffercache_superuser_check("pg_buffercache_mark_dirty_all");
861
864
868
869 tuple = heap_form_tuple(tupledesc, values, nulls);
870 result = HeapTupleGetDatum(tuple);
871
873}
Datum idx(PG_FUNCTION_ARGS)
Definition _int_op.c:262
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition atomics.h:467
int16 AttrNumber
Definition attnum.h:21
uint32 BlockNumber
Definition block.h:31
#define InvalidBlockNumber
Definition block.h:33
static Datum values[MAXATTR]
Definition bootstrap.c:190
int Buffer
Definition buf.h:23
#define BM_MAX_USAGE_COUNT
#define BM_TAG_VALID
static ForkNumber BufTagGetForkNum(const BufferTag *tag)
static RelFileNumber BufTagGetRelNumber(const BufferTag *tag)
static void UnlockBufHdr(BufferDesc *desc)
#define BM_DIRTY
#define BUF_STATE_GET_USAGECOUNT(state)
#define BUF_STATE_GET_REFCOUNT(state)
#define BM_VALID
static BufferDesc * GetBufferDescriptor(uint32 id)
static Buffer BufferDescriptorGetBuffer(const BufferDesc *bdesc)
void EvictAllUnpinnedBuffers(int32 *buffers_evicted, int32 *buffers_flushed, int32 *buffers_skipped)
Definition bufmgr.c:7982
void EvictRelUnpinnedBuffers(Relation rel, int32 *buffers_evicted, int32 *buffers_flushed, int32 *buffers_skipped)
Definition bufmgr.c:8032
uint64 LockBufHdr(BufferDesc *desc)
Definition bufmgr.c:7518
void MarkDirtyAllUnpinnedBuffers(int32 *buffers_dirtied, int32 *buffers_already_dirty, int32 *buffers_skipped)
Definition bufmgr.c:8232
bool MarkDirtyUnpinnedBuffer(Buffer buf, bool *buffer_already_dirty)
Definition bufmgr.c:8139
bool EvictUnpinnedBuffer(Buffer buf, bool *buffer_flushed)
Definition bufmgr.c:7953
void MarkDirtyRelUnpinnedBuffers(Relation rel, int32 *buffers_dirtied, int32 *buffers_already_dirty, int32 *buffers_skipped)
Definition bufmgr.c:8175
static Block BufferGetBlock(Buffer buffer)
Definition bufmgr.h:435
#define TYPEALIGN(ALIGNVAL, LEN)
Definition c.h:889
#define Max(x, y)
Definition c.h:1085
#define Assert(condition)
Definition c.h:943
int64_t int64
Definition c.h:621
#define UINT64_FORMAT
Definition c.h:635
int32_t int32
Definition c.h:620
uint64_t uint64
Definition c.h:625
uint16_t uint16
Definition c.h:623
uint32_t uint32
Definition c.h:624
size_t Size
Definition c.h:689
#define TYPEALIGN_DOWN(ALIGNVAL, LEN)
Definition c.h:901
uint32 result
int errcode(int sqlerrcode)
Definition elog.c:874
#define DEBUG1
Definition elog.h:31
#define ERROR
Definition elog.h:40
#define elog(elevel,...)
Definition elog.h:228
#define ereport(elevel,...)
Definition elog.h:152
TupleDesc BlessTupleDesc(TupleDesc tupdesc)
#define palloc_object(type)
Definition fe_memutils.h:74
#define palloc_array(type, count)
Definition fe_memutils.h:76
#define palloc0_array(type, count)
Definition fe_memutils.h:77
#define PG_GETARG_OID(n)
Definition fmgr.h:275
#define PG_MODULE_MAGIC_EXT(...)
Definition fmgr.h:540
#define PG_FUNCTION_INFO_V1(funcname)
Definition fmgr.h:417
#define PG_GETARG_INT32(n)
Definition fmgr.h:269
#define PG_GETARG_BOOL(n)
Definition fmgr.h:274
#define PG_RETURN_DATUM(x)
Definition fmgr.h:354
#define PG_FUNCTION_ARGS
Definition fmgr.h:193
void InitMaterializedSRF(FunctionCallInfo fcinfo, uint32 flags)
Definition funcapi.c:76
TypeFuncClass get_call_result_type(FunctionCallInfo fcinfo, Oid *resultTypeId, TupleDesc *resultTupleDesc)
Definition funcapi.c:276
#define SRF_IS_FIRSTCALL()
Definition funcapi.h:304
#define SRF_PERCALL_SETUP()
Definition funcapi.h:308
@ TYPEFUNC_COMPOSITE
Definition funcapi.h:149
#define SRF_RETURN_NEXT(_funcctx, _result)
Definition funcapi.h:310
#define SRF_FIRSTCALL_INIT()
Definition funcapi.h:306
static Datum HeapTupleGetDatum(const HeapTupleData *tuple)
Definition funcapi.h:230
#define SRF_RETURN_DONE(_funcctx)
Definition funcapi.h:328
int NBuffers
Definition globals.c:144
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull)
Definition heaptuple.c:1025
int i
Definition isn.c:77
#define AccessShareLock
Definition lockdefs.h:36
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
void * MemoryContextAllocHuge(MemoryContext context, Size size)
Definition mcxt.c:1725
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:125
static char * errmsg
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
Datum pg_buffercache_os_pages(PG_FUNCTION_ARGS)
Datum pg_buffercache_evict_relation(PG_FUNCTION_ARGS)
#define NUM_BUFFERCACHE_USAGE_COUNTS_ELEM
#define NUM_BUFFERCACHE_OS_PAGES_ELEM
Datum pg_buffercache_evict(PG_FUNCTION_ARGS)
Datum pg_buffercache_mark_dirty_relation(PG_FUNCTION_ARGS)
Datum pg_buffercache_summary(PG_FUNCTION_ARGS)
static void pg_buffercache_superuser_check(char *func_name)
Datum pg_buffercache_usage_counts(PG_FUNCTION_ARGS)
#define NUM_BUFFERCACHE_SUMMARY_ELEM
Datum pg_buffercache_pages(PG_FUNCTION_ARGS)
#define NUM_BUFFERCACHE_EVICT_ELEM
#define NUM_BUFFERCACHE_PAGES_MIN_ELEM
#define NUM_BUFFERCACHE_EVICT_ALL_ELEM
Datum pg_buffercache_evict_all(PG_FUNCTION_ARGS)
#define NUM_BUFFERCACHE_MARK_DIRTY_RELATION_ELEM
#define NUM_BUFFERCACHE_PAGES_ELEM
#define NUM_BUFFERCACHE_MARK_DIRTY_ELEM
#define NUM_BUFFERCACHE_MARK_DIRTY_ALL_ELEM
Datum pg_buffercache_mark_dirty_all(PG_FUNCTION_ARGS)
Datum pg_buffercache_mark_dirty(PG_FUNCTION_ARGS)
Datum pg_buffercache_numa_pages(PG_FUNCTION_ARGS)
static bool firstNumaTouch
static Datum pg_buffercache_os_pages_internal(FunctionCallInfo fcinfo, bool include_numa)
#define NUM_BUFFERCACHE_EVICT_RELATION_ELEM
#define pg_numa_touch_mem_if_required(ptr)
Definition pg_numa.h:37
PGDLLIMPORT int pg_numa_query_pages(int pid, unsigned long count, void **pages, int *status)
Definition pg_numa.c:132
PGDLLIMPORT int pg_numa_init(void)
Definition pg_numa.c:125
static char buf[DEFAULT_XLOG_SEG_SIZE]
static Datum Int64GetDatum(int64 X)
Definition postgres.h:413
static Datum Int16GetDatum(int16 X)
Definition postgres.h:172
static Datum UInt16GetDatum(uint16 X)
Definition postgres.h:192
static Datum BoolGetDatum(bool X)
Definition postgres.h:112
static Datum ObjectIdGetDatum(Oid X)
Definition postgres.h:252
uint64_t Datum
Definition postgres.h:70
static Datum Float8GetDatum(float8 X)
Definition postgres.h:502
static Datum Int32GetDatum(int32 X)
Definition postgres.h:212
unsigned int Oid
static int fb(int x)
#define RelationUsesLocalBuffers(relation)
Definition rel.h:648
Oid RelFileNumber
Definition relpath.h:25
ForkNumber
Definition relpath.h:56
Size pg_get_shmem_pagesize(void)
Definition shmem.c:1304
void relation_close(Relation relation, LOCKMODE lockmode)
Definition relation.c:206
Relation relation_open(Oid relationId, LOCKMODE lockmode)
Definition relation.c:48
BufferCacheOsPagesRec * record
bool superuser(void)
Definition superuser.c:47
TupleDesc CreateTemplateTupleDesc(int natts)
Definition tupdesc.c:165
void TupleDescFinalize(TupleDesc tupdesc)
Definition tupdesc.c:511
void TupleDescInitEntry(TupleDesc desc, AttrNumber attributeNumber, const char *attributeName, Oid oidtypeid, int32 typmod, int attdim)
Definition tupdesc.c:900
void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, const Datum *values, const bool *isnull)
Definition tuplestore.c:785
const char * name