PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
mcxt.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * mcxt.c
4 * POSTGRES memory context management code.
5 *
6 * This module handles context management operations that are independent
7 * of the particular kind of context being operated on. It calls
8 * context-type-specific operations via the function pointers in a
9 * context's MemoryContextMethods struct.
10 *
11 *
12 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
13 * Portions Copyright (c) 1994, Regents of the University of California
14 *
15 *
16 * IDENTIFICATION
17 * src/backend/utils/mmgr/mcxt.c
18 *
19 *-------------------------------------------------------------------------
20 */
21
22#include "postgres.h"
23
24#include "mb/pg_wchar.h"
25#include "miscadmin.h"
26#include "nodes/pg_list.h"
27#include "storage/lwlock.h"
28#include "storage/ipc.h"
29#include "utils/dsa.h"
30#include "utils/hsearch.h"
31#include "utils/memdebug.h"
32#include "utils/memutils.h"
35
36
37static void BogusFree(void *pointer);
38static void *BogusRealloc(void *pointer, Size size, int flags);
39static MemoryContext BogusGetChunkContext(void *pointer);
40static Size BogusGetChunkSpace(void *pointer);
41
42/*****************************************************************************
43 * GLOBAL MEMORY *
44 *****************************************************************************/
45#define BOGUS_MCTX(id) \
46 [id].free_p = BogusFree, \
47 [id].realloc = BogusRealloc, \
48 [id].get_chunk_context = BogusGetChunkContext, \
49 [id].get_chunk_space = BogusGetChunkSpace
50
52 /* aset.c */
54 [MCTX_ASET_ID].free_p = AllocSetFree,
55 [MCTX_ASET_ID].realloc = AllocSetRealloc,
57 [MCTX_ASET_ID].delete_context = AllocSetDelete,
58 [MCTX_ASET_ID].get_chunk_context = AllocSetGetChunkContext,
59 [MCTX_ASET_ID].get_chunk_space = AllocSetGetChunkSpace,
60 [MCTX_ASET_ID].is_empty = AllocSetIsEmpty,
62#ifdef MEMORY_CONTEXT_CHECKING
63 [MCTX_ASET_ID].check = AllocSetCheck,
64#endif
65
66 /* generation.c */
71 [MCTX_GENERATION_ID].delete_context = GenerationDelete,
76#ifdef MEMORY_CONTEXT_CHECKING
77 [MCTX_GENERATION_ID].check = GenerationCheck,
78#endif
79
80 /* slab.c */
81 [MCTX_SLAB_ID].alloc = SlabAlloc,
82 [MCTX_SLAB_ID].free_p = SlabFree,
83 [MCTX_SLAB_ID].realloc = SlabRealloc,
84 [MCTX_SLAB_ID].reset = SlabReset,
85 [MCTX_SLAB_ID].delete_context = SlabDelete,
86 [MCTX_SLAB_ID].get_chunk_context = SlabGetChunkContext,
87 [MCTX_SLAB_ID].get_chunk_space = SlabGetChunkSpace,
88 [MCTX_SLAB_ID].is_empty = SlabIsEmpty,
89 [MCTX_SLAB_ID].stats = SlabStats,
90#ifdef MEMORY_CONTEXT_CHECKING
91 [MCTX_SLAB_ID].check = SlabCheck,
92#endif
93
94 /* alignedalloc.c */
95 [MCTX_ALIGNED_REDIRECT_ID].alloc = NULL, /* not required */
98 [MCTX_ALIGNED_REDIRECT_ID].reset = NULL, /* not required */
99 [MCTX_ALIGNED_REDIRECT_ID].delete_context = NULL, /* not required */
102 [MCTX_ALIGNED_REDIRECT_ID].is_empty = NULL, /* not required */
103 [MCTX_ALIGNED_REDIRECT_ID].stats = NULL, /* not required */
104#ifdef MEMORY_CONTEXT_CHECKING
105 [MCTX_ALIGNED_REDIRECT_ID].check = NULL, /* not required */
106#endif
107
108 /* bump.c */
109 [MCTX_BUMP_ID].alloc = BumpAlloc,
110 [MCTX_BUMP_ID].free_p = BumpFree,
111 [MCTX_BUMP_ID].realloc = BumpRealloc,
112 [MCTX_BUMP_ID].reset = BumpReset,
113 [MCTX_BUMP_ID].delete_context = BumpDelete,
114 [MCTX_BUMP_ID].get_chunk_context = BumpGetChunkContext,
115 [MCTX_BUMP_ID].get_chunk_space = BumpGetChunkSpace,
116 [MCTX_BUMP_ID].is_empty = BumpIsEmpty,
117 [MCTX_BUMP_ID].stats = BumpStats,
118#ifdef MEMORY_CONTEXT_CHECKING
119 [MCTX_BUMP_ID].check = BumpCheck,
120#endif
121
122
123 /*
124 * Reserved and unused IDs should have dummy entries here. This allows us
125 * to fail cleanly if a bogus pointer is passed to pfree or the like. It
126 * seems sufficient to provide routines for the methods that might get
127 * invoked from inspection of a chunk (see MCXT_METHOD calls below).
128 */
140};
141
142#undef BOGUS_MCTX
143/*
144 * This is passed to MemoryContextStatsInternal to determine whether
145 * to print context statistics or not and where to print them logs or
146 * stderr.
147 */
149{
154
155/*
156 * CurrentMemoryContext
157 * Default memory context for allocations.
158 */
160
161/*
162 * Standard top-level contexts. For a description of the purpose of each
163 * of these contexts, refer to src/backend/utils/mmgr/README
164 */
172
173/* This is a transient link to the active portal's memory context: */
176
177static void MemoryContextDeleteOnly(MemoryContext context);
179static void MemoryContextStatsInternal(MemoryContext context, int level,
180 int max_level, int max_children,
181 MemoryContextCounters *totals,
182 PrintDestination print_location,
183 int *num_contexts);
184static void MemoryContextStatsPrint(MemoryContext context, void *passthru,
185 const char *stats_string,
186 bool print_to_stderr);
187static void PublishMemoryContext(MemoryStatsEntry *memcxt_infos,
188 int curr_id, MemoryContext context,
189 List *path,
191 int num_contexts, dsa_area *area,
192 int max_levels);
193static void compute_contexts_count_and_ids(List *contexts, HTAB *context_id_lookup,
194 int *stats_count,
195 bool summary);
196static List *compute_context_path(MemoryContext c, HTAB *context_id_lookup);
197static void free_memorycontextstate_dsa(dsa_area *area, int total_stats,
198 dsa_pointer prev_dsa_pointer);
199static void end_memorycontext_reporting(void);
200
201/*
202 * You should not do memory allocations within a critical section, because
203 * an out-of-memory error will be escalated to a PANIC. To enforce that
204 * rule, the allocation functions Assert that.
205 */
206#define AssertNotInCriticalSection(context) \
207 Assert(CritSectionCount == 0 || (context)->allowInCritSection)
208
209/*
210 * Call the given function in the MemoryContextMethods for the memory context
211 * type that 'pointer' belongs to.
212 */
213#define MCXT_METHOD(pointer, method) \
214 mcxt_methods[GetMemoryChunkMethodID(pointer)].method
215
216/*
217 * GetMemoryChunkMethodID
218 * Return the MemoryContextMethodID from the uint64 chunk header which
219 * directly precedes 'pointer'.
220 */
221static inline MemoryContextMethodID
222GetMemoryChunkMethodID(const void *pointer)
223{
224 uint64 header;
225
226 /*
227 * Try to detect bogus pointers handed to us, poorly though we can.
228 * Presumably, a pointer that isn't MAXALIGNED isn't pointing at an
229 * allocated chunk.
230 */
231 Assert(pointer == (const void *) MAXALIGN(pointer));
232
233 /* Allow access to the uint64 header */
234 VALGRIND_MAKE_MEM_DEFINED((char *) pointer - sizeof(uint64), sizeof(uint64));
235
236 header = *((const uint64 *) ((const char *) pointer - sizeof(uint64)));
237
238 /* Disallow access to the uint64 header */
239 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer - sizeof(uint64), sizeof(uint64));
240
242}
243
244/*
245 * GetMemoryChunkHeader
246 * Return the uint64 chunk header which directly precedes 'pointer'.
247 *
248 * This is only used after GetMemoryChunkMethodID, so no need for error checks.
249 */
250static inline uint64
251GetMemoryChunkHeader(const void *pointer)
252{
253 uint64 header;
254
255 /* Allow access to the uint64 header */
256 VALGRIND_MAKE_MEM_DEFINED((char *) pointer - sizeof(uint64), sizeof(uint64));
257
258 header = *((const uint64 *) ((const char *) pointer - sizeof(uint64)));
259
260 /* Disallow access to the uint64 header */
261 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer - sizeof(uint64), sizeof(uint64));
262
263 return header;
264}
265
266/*
267 * MemoryContextTraverseNext
268 * Helper function to traverse all descendants of a memory context
269 * without recursion.
270 *
271 * Recursion could lead to out-of-stack errors with deep context hierarchies,
272 * which would be unpleasant in error cleanup code paths.
273 *
274 * To process 'context' and all its descendants, use a loop like this:
275 *
276 * <process 'context'>
277 * for (MemoryContext curr = context->firstchild;
278 * curr != NULL;
279 * curr = MemoryContextTraverseNext(curr, context))
280 * {
281 * <process 'curr'>
282 * }
283 *
284 * This visits all the contexts in pre-order, that is a node is visited
285 * before its children.
286 */
287static MemoryContext
289{
290 /* After processing a node, traverse to its first child if any */
291 if (curr->firstchild != NULL)
292 return curr->firstchild;
293
294 /*
295 * After processing a childless node, traverse to its next sibling if
296 * there is one. If there isn't, traverse back up to the parent (which
297 * has already been visited, and now so have all its descendants). We're
298 * done if that is "top", otherwise traverse to its next sibling if any,
299 * otherwise repeat moving up.
300 */
301 while (curr->nextchild == NULL)
302 {
303 curr = curr->parent;
304 if (curr == top)
305 return NULL;
306 }
307 return curr->nextchild;
308}
309
310/*
311 * Support routines to trap use of invalid memory context method IDs
312 * (from calling pfree or the like on a bogus pointer). As a possible
313 * aid in debugging, we report the header word along with the pointer
314 * address (if we got here, there must be an accessible header word).
315 */
316static void
317BogusFree(void *pointer)
318{
319 elog(ERROR, "pfree called with invalid pointer %p (header 0x%016" PRIx64 ")",
320 pointer, GetMemoryChunkHeader(pointer));
321}
322
323static void *
324BogusRealloc(void *pointer, Size size, int flags)
325{
326 elog(ERROR, "repalloc called with invalid pointer %p (header 0x%016" PRIx64 ")",
327 pointer, GetMemoryChunkHeader(pointer));
328 return NULL; /* keep compiler quiet */
329}
330
331static MemoryContext
333{
334 elog(ERROR, "GetMemoryChunkContext called with invalid pointer %p (header 0x%016" PRIx64 ")",
335 pointer, GetMemoryChunkHeader(pointer));
336 return NULL; /* keep compiler quiet */
337}
338
339static Size
340BogusGetChunkSpace(void *pointer)
341{
342 elog(ERROR, "GetMemoryChunkSpace called with invalid pointer %p (header 0x%016" PRIx64 ")",
343 pointer, GetMemoryChunkHeader(pointer));
344 return 0; /* keep compiler quiet */
345}
346
347
348/*****************************************************************************
349 * EXPORTED ROUTINES *
350 *****************************************************************************/
351
352
353/*
354 * MemoryContextInit
355 * Start up the memory-context subsystem.
356 *
357 * This must be called before creating contexts or allocating memory in
358 * contexts. TopMemoryContext and ErrorContext are initialized here;
359 * other contexts must be created afterwards.
360 *
361 * In normal multi-backend operation, this is called once during
362 * postmaster startup, and not at all by individual backend startup
363 * (since the backends inherit an already-initialized context subsystem
364 * by virtue of being forked off the postmaster). But in an EXEC_BACKEND
365 * build, each process must do this for itself.
366 *
367 * In a standalone backend this must be called during backend startup.
368 */
369void
371{
372 Assert(TopMemoryContext == NULL);
373
374 /*
375 * First, initialize TopMemoryContext, which is the parent of all others.
376 */
378 "TopMemoryContext",
380
381 /*
382 * Not having any other place to point CurrentMemoryContext, make it point
383 * to TopMemoryContext. Caller should change this soon!
384 */
386
387 /*
388 * Initialize ErrorContext as an AllocSetContext with slow growth rate ---
389 * we don't really expect much to be allocated in it. More to the point,
390 * require it to contain at least 8K at all times. This is the only case
391 * where retained memory in a context is *essential* --- we want to be
392 * sure ErrorContext still has some memory even if we've run out
393 * elsewhere! Also, allow allocations in ErrorContext within a critical
394 * section. Otherwise a PANIC will cause an assertion failure in the error
395 * reporting code, before printing out the real cause of the failure.
396 *
397 * This should be the last step in this function, as elog.c assumes memory
398 * management works once ErrorContext is non-null.
399 */
401 "ErrorContext",
402 8 * 1024,
403 8 * 1024,
404 8 * 1024);
406}
407
408/*
409 * MemoryContextReset
410 * Release all space allocated within a context and delete all its
411 * descendant contexts (but not the named context itself).
412 */
413void
415{
417
418 /* save a function call in common case where there are no children */
419 if (context->firstchild != NULL)
421
422 /* save a function call if no pallocs since startup or last reset */
423 if (!context->isReset)
424 MemoryContextResetOnly(context);
425}
426
427/*
428 * MemoryContextResetOnly
429 * Release all space allocated within a context.
430 * Nothing is done to the context's descendant contexts.
431 */
432void
434{
436
437 /* Nothing to do if no pallocs since startup or last reset */
438 if (!context->isReset)
439 {
441
442 /*
443 * If context->ident points into the context's memory, it will become
444 * a dangling pointer. We could prevent that by setting it to NULL
445 * here, but that would break valid coding patterns that keep the
446 * ident elsewhere, e.g. in a parent context. So for now we assume
447 * the programmer got it right.
448 */
449
450 context->methods->reset(context);
451 context->isReset = true;
453 VALGRIND_CREATE_MEMPOOL(context, 0, false);
454 }
455}
456
457/*
458 * MemoryContextResetChildren
459 * Release all space allocated within a context's descendants,
460 * but don't delete the contexts themselves. The named context
461 * itself is not touched.
462 */
463void
465{
467
468 for (MemoryContext curr = context->firstchild;
469 curr != NULL;
470 curr = MemoryContextTraverseNext(curr, context))
471 {
473 }
474}
475
476/*
477 * MemoryContextDelete
478 * Delete a context and its descendants, and release all space
479 * allocated therein.
480 *
481 * The type-specific delete routine removes all storage for the context,
482 * but we have to deal with descendant nodes here.
483 */
484void
486{
487 MemoryContext curr;
488
490
491 /*
492 * Delete subcontexts from the bottom up.
493 *
494 * Note: Do not use recursion here. A "stack depth limit exceeded" error
495 * would be unpleasant if we're already in the process of cleaning up from
496 * transaction abort. We also cannot use MemoryContextTraverseNext() here
497 * because we modify the tree as we go.
498 */
499 curr = context;
500 for (;;)
501 {
502 MemoryContext parent;
503
504 /* Descend down until we find a leaf context with no children */
505 while (curr->firstchild != NULL)
506 curr = curr->firstchild;
507
508 /*
509 * We're now at a leaf with no children. Free it and continue from the
510 * parent. Or if this was the original node, we're all done.
511 */
512 parent = curr->parent;
514
515 if (curr == context)
516 break;
517 curr = parent;
518 }
519}
520
521/*
522 * Subroutine of MemoryContextDelete,
523 * to delete a context that has no children.
524 * We must also delink the context from its parent, if it has one.
525 */
526static void
528{
530 /* We had better not be deleting TopMemoryContext ... */
531 Assert(context != TopMemoryContext);
532 /* And not CurrentMemoryContext, either */
533 Assert(context != CurrentMemoryContext);
534 /* All the children should've been deleted already */
535 Assert(context->firstchild == NULL);
536
537 /*
538 * It's not entirely clear whether 'tis better to do this before or after
539 * delinking the context; but an error in a callback will likely result in
540 * leaking the whole context (if it's not a root context) if we do it
541 * after, so let's do it before.
542 */
544
545 /*
546 * We delink the context from its parent before deleting it, so that if
547 * there's an error we won't have deleted/busted contexts still attached
548 * to the context tree. Better a leak than a crash.
549 */
550 MemoryContextSetParent(context, NULL);
551
552 /*
553 * Also reset the context's ident pointer, in case it points into the
554 * context. This would only matter if someone tries to get stats on the
555 * (already unlinked) context, which is unlikely, but let's be safe.
556 */
557 context->ident = NULL;
558
559 context->methods->delete_context(context);
560
562}
563
564/*
565 * MemoryContextDeleteChildren
566 * Delete all the descendants of the named context and release all
567 * space allocated therein. The named context itself is not touched.
568 */
569void
571{
573
574 /*
575 * MemoryContextDelete will delink the child from me, so just iterate as
576 * long as there is a child.
577 */
578 while (context->firstchild != NULL)
580}
581
582/*
583 * MemoryContextRegisterResetCallback
584 * Register a function to be called before next context reset/delete.
585 * Such callbacks will be called in reverse order of registration.
586 *
587 * The caller is responsible for allocating a MemoryContextCallback struct
588 * to hold the info about this callback request, and for filling in the
589 * "func" and "arg" fields in the struct to show what function to call with
590 * what argument. Typically the callback struct should be allocated within
591 * the specified context, since that means it will automatically be freed
592 * when no longer needed.
593 *
594 * There is no API for deregistering a callback once registered. If you
595 * want it to not do anything anymore, adjust the state pointed to by its
596 * "arg" to indicate that.
597 */
598void
601{
603
604 /* Push onto head so this will be called before older registrants. */
605 cb->next = context->reset_cbs;
606 context->reset_cbs = cb;
607 /* Mark the context as non-reset (it probably is already). */
608 context->isReset = false;
609}
610
611/*
612 * MemoryContextCallResetCallbacks
613 * Internal function to call all registered callbacks for context.
614 */
615static void
617{
619
620 /*
621 * We pop each callback from the list before calling. That way, if an
622 * error occurs inside the callback, we won't try to call it a second time
623 * in the likely event that we reset or delete the context later.
624 */
625 while ((cb = context->reset_cbs) != NULL)
626 {
627 context->reset_cbs = cb->next;
628 cb->func(cb->arg);
629 }
630}
631
632/*
633 * MemoryContextSetIdentifier
634 * Set the identifier string for a memory context.
635 *
636 * An identifier can be provided to help distinguish among different contexts
637 * of the same kind in memory context stats dumps. The identifier string
638 * must live at least as long as the context it is for; typically it is
639 * allocated inside that context, so that it automatically goes away on
640 * context deletion. Pass id = NULL to forget any old identifier.
641 */
642void
644{
646 context->ident = id;
647}
648
649/*
650 * MemoryContextSetParent
651 * Change a context to belong to a new parent (or no parent).
652 *
653 * We provide this as an API function because it is sometimes useful to
654 * change a context's lifespan after creation. For example, a context
655 * might be created underneath a transient context, filled with data,
656 * and then reparented underneath CacheMemoryContext to make it long-lived.
657 * In this way no special effort is needed to get rid of the context in case
658 * a failure occurs before its contents are completely set up.
659 *
660 * Callers often assume that this function cannot fail, so don't put any
661 * elog(ERROR) calls in it.
662 *
663 * A possible caller error is to reparent a context under itself, creating
664 * a loop in the context graph. We assert here that context != new_parent,
665 * but checking for multi-level loops seems more trouble than it's worth.
666 */
667void
669{
671 Assert(context != new_parent);
672
673 /* Fast path if it's got correct parent already */
674 if (new_parent == context->parent)
675 return;
676
677 /* Delink from existing parent, if any */
678 if (context->parent)
679 {
680 MemoryContext parent = context->parent;
681
682 if (context->prevchild != NULL)
683 context->prevchild->nextchild = context->nextchild;
684 else
685 {
686 Assert(parent->firstchild == context);
687 parent->firstchild = context->nextchild;
688 }
689
690 if (context->nextchild != NULL)
691 context->nextchild->prevchild = context->prevchild;
692 }
693
694 /* And relink */
695 if (new_parent)
696 {
697 Assert(MemoryContextIsValid(new_parent));
698 context->parent = new_parent;
699 context->prevchild = NULL;
700 context->nextchild = new_parent->firstchild;
701 if (new_parent->firstchild != NULL)
702 new_parent->firstchild->prevchild = context;
703 new_parent->firstchild = context;
704 }
705 else
706 {
707 context->parent = NULL;
708 context->prevchild = NULL;
709 context->nextchild = NULL;
710 }
711}
712
713/*
714 * MemoryContextAllowInCriticalSection
715 * Allow/disallow allocations in this memory context within a critical
716 * section.
717 *
718 * Normally, memory allocations are not allowed within a critical section,
719 * because a failure would lead to PANIC. There are a few exceptions to
720 * that, like allocations related to debugging code that is not supposed to
721 * be enabled in production. This function can be used to exempt specific
722 * memory contexts from the assertion in palloc().
723 */
724void
726{
728
729 context->allowInCritSection = allow;
730}
731
732/*
733 * GetMemoryChunkContext
734 * Given a currently-allocated chunk, determine the MemoryContext that
735 * the chunk belongs to.
736 */
739{
740 return MCXT_METHOD(pointer, get_chunk_context) (pointer);
741}
742
743/*
744 * GetMemoryChunkSpace
745 * Given a currently-allocated chunk, determine the total space
746 * it occupies (including all memory-allocation overhead).
747 *
748 * This is useful for measuring the total space occupied by a set of
749 * allocated chunks.
750 */
751Size
753{
754 return MCXT_METHOD(pointer, get_chunk_space) (pointer);
755}
756
757/*
758 * MemoryContextGetParent
759 * Get the parent context (if any) of the specified context
760 */
763{
765
766 return context->parent;
767}
768
769/*
770 * MemoryContextIsEmpty
771 * Is a memory context empty of any allocated space?
772 */
773bool
775{
777
778 /*
779 * For now, we consider a memory context nonempty if it has any children;
780 * perhaps this should be changed later.
781 */
782 if (context->firstchild != NULL)
783 return false;
784 /* Otherwise use the type-specific inquiry */
785 return context->methods->is_empty(context);
786}
787
788/*
789 * Find the memory allocated to blocks for this memory context. If recurse is
790 * true, also include children.
791 */
792Size
794{
795 Size total = context->mem_allocated;
796
798
799 if (recurse)
800 {
801 for (MemoryContext curr = context->firstchild;
802 curr != NULL;
803 curr = MemoryContextTraverseNext(curr, context))
804 {
805 total += curr->mem_allocated;
806 }
807 }
808
809 return total;
810}
811
812/*
813 * Return the memory consumption statistics about the given context and its
814 * children.
815 */
816void
818 MemoryContextCounters *consumed)
819{
821
822 memset(consumed, 0, sizeof(*consumed));
823
824 /* Examine the context itself */
825 context->methods->stats(context, NULL, NULL, consumed, false);
826
827 /* Examine children, using iteration not recursion */
828 for (MemoryContext curr = context->firstchild;
829 curr != NULL;
830 curr = MemoryContextTraverseNext(curr, context))
831 {
832 curr->methods->stats(curr, NULL, NULL, consumed, false);
833 }
834}
835
836/*
837 * MemoryContextStats
838 * Print statistics about the named context and all its descendants.
839 *
840 * This is just a debugging utility, so it's not very fancy. However, we do
841 * make some effort to summarize when the output would otherwise be very long.
842 * The statistics are sent to stderr.
843 */
844void
846{
847 /* Hard-wired limits are usually good enough */
848 MemoryContextStatsDetail(context, 100, 100, true);
849}
850
851/*
852 * MemoryContextStatsDetail
853 *
854 * Entry point for use if you want to vary the number of child contexts shown.
855 *
856 * If print_to_stderr is true, print statistics about the memory contexts
857 * with fprintf(stderr), otherwise use ereport().
858 */
859void
861 int max_level, int max_children,
862 bool print_to_stderr)
863{
864 MemoryContextCounters grand_totals;
865 int num_contexts;
866 PrintDestination print_location;
867
868 memset(&grand_totals, 0, sizeof(grand_totals));
869
870 if (print_to_stderr)
871 print_location = PRINT_STATS_TO_STDERR;
872 else
873 print_location = PRINT_STATS_TO_LOGS;
874
875 /* num_contexts report number of contexts aggregated in the output */
876 MemoryContextStatsInternal(context, 0, max_level, max_children,
877 &grand_totals, print_location, &num_contexts);
878
879 if (print_to_stderr)
880 fprintf(stderr,
881 "Grand total: %zu bytes in %zu blocks; %zu free (%zu chunks); %zu used\n",
882 grand_totals.totalspace, grand_totals.nblocks,
883 grand_totals.freespace, grand_totals.freechunks,
884 grand_totals.totalspace - grand_totals.freespace);
885 else
886 {
887 /*
888 * Use LOG_SERVER_ONLY to prevent the memory contexts from being sent
889 * to the connected client.
890 *
891 * We don't buffer the information about all memory contexts in a
892 * backend into StringInfo and log it as one message. That would
893 * require the buffer to be enlarged, risking an OOM as there could be
894 * a large number of memory contexts in a backend. Instead, we log
895 * one message per memory context.
896 */
898 (errhidestmt(true),
899 errhidecontext(true),
900 errmsg_internal("Grand total: %zu bytes in %zu blocks; %zu free (%zu chunks); %zu used",
901 grand_totals.totalspace, grand_totals.nblocks,
902 grand_totals.freespace, grand_totals.freechunks,
903 grand_totals.totalspace - grand_totals.freespace)));
904 }
905}
906
907/*
908 * MemoryContextStatsInternal
909 * One recursion level for MemoryContextStats
910 *
911 * Print stats for this context if possible, but in any case accumulate counts
912 * into *totals (if not NULL). The callers should make sure that print_location
913 * is set to PRINT_STATS_STDERR or PRINT_STATS_TO_LOGS or PRINT_STATS_NONE.
914 */
915static void
917 int max_level, int max_children,
918 MemoryContextCounters *totals,
919 PrintDestination print_location, int *num_contexts)
920{
921 MemoryContext child;
922 int ichild;
923
925
926 /* Examine the context itself */
927 switch (print_location)
928 {
930 context->methods->stats(context,
932 &level,
933 totals, true);
934 break;
935
937 context->methods->stats(context,
939 &level,
940 totals, false);
941 break;
942
943 case PRINT_STATS_NONE:
944
945 /*
946 * Do not print the statistics if print_location is
947 * PRINT_STATS_NONE, only compute totals. This is used in
948 * reporting of memory context statistics via a sql function. Last
949 * parameter is not relevant.
950 */
951 context->methods->stats(context,
952 NULL,
953 NULL,
954 totals, false);
955 break;
956 }
957
958 /* Increment the context count for each of the recursive call */
959 *num_contexts = *num_contexts + 1;
960
961 /*
962 * Examine children.
963 *
964 * If we are past the recursion depth limit or already running low on
965 * stack, do not print them explicitly but just summarize them. Similarly,
966 * if there are more than max_children of them, we do not print the rest
967 * explicitly, but just summarize them.
968 */
969 child = context->firstchild;
970 ichild = 0;
971 if (level < max_level && !stack_is_too_deep())
972 {
973 for (; child != NULL && ichild < max_children;
974 child = child->nextchild, ichild++)
975 {
976 MemoryContextStatsInternal(child, level + 1,
977 max_level, max_children,
978 totals,
979 print_location, num_contexts);
980 }
981 }
982
983 if (child != NULL)
984 {
985 /* Summarize the rest of the children, avoiding recursion. */
986 MemoryContextCounters local_totals;
987
988 memset(&local_totals, 0, sizeof(local_totals));
989
990 ichild = 0;
991 while (child != NULL)
992 {
993 child->methods->stats(child, NULL, NULL, &local_totals, false);
994 ichild++;
995 child = MemoryContextTraverseNext(child, context);
996 }
997
998 /*
999 * Add the count of children contexts which are traversed in the
1000 * non-recursive manner.
1001 */
1002 *num_contexts = *num_contexts + ichild;
1003
1004 if (print_location == PRINT_STATS_TO_STDERR)
1005 {
1006 for (int i = 0; i <= level; i++)
1007 fprintf(stderr, " ");
1008 fprintf(stderr,
1009 "%d more child contexts containing %zu total in %zu blocks; %zu free (%zu chunks); %zu used\n",
1010 ichild,
1011 local_totals.totalspace,
1012 local_totals.nblocks,
1013 local_totals.freespace,
1014 local_totals.freechunks,
1015 local_totals.totalspace - local_totals.freespace);
1016 }
1017 else if (print_location == PRINT_STATS_TO_LOGS)
1019 (errhidestmt(true),
1020 errhidecontext(true),
1021 errmsg_internal("level: %d; %d more child contexts containing %zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1022 level,
1023 ichild,
1024 local_totals.totalspace,
1025 local_totals.nblocks,
1026 local_totals.freespace,
1027 local_totals.freechunks,
1028 local_totals.totalspace - local_totals.freespace)));
1029
1030 if (totals)
1031 {
1032 totals->nblocks += local_totals.nblocks;
1033 totals->freechunks += local_totals.freechunks;
1034 totals->totalspace += local_totals.totalspace;
1035 totals->freespace += local_totals.freespace;
1036 }
1037 }
1038}
1039
1040/*
1041 * MemoryContextStatsPrint
1042 * Print callback used by MemoryContextStatsInternal
1043 *
1044 * For now, the passthru pointer just points to "int level"; later we might
1045 * make that more complicated.
1046 */
1047static void
1049 const char *stats_string,
1050 bool print_to_stderr)
1051{
1052 int level = *(int *) passthru;
1053 const char *name = context->name;
1054 const char *ident = context->ident;
1055 char truncated_ident[110];
1056 int i;
1057
1058 /*
1059 * It seems preferable to label dynahash contexts with just the hash table
1060 * name. Those are already unique enough, so the "dynahash" part isn't
1061 * very helpful, and this way is more consistent with pre-v11 practice.
1062 */
1063 if (ident && strcmp(name, "dynahash") == 0)
1064 {
1065 name = ident;
1066 ident = NULL;
1067 }
1068
1069 truncated_ident[0] = '\0';
1070
1071 if (ident)
1072 {
1073 /*
1074 * Some contexts may have very long identifiers (e.g., SQL queries).
1075 * Arbitrarily truncate at 100 bytes, but be careful not to break
1076 * multibyte characters. Also, replace ASCII control characters, such
1077 * as newlines, with spaces.
1078 */
1079 int idlen = strlen(ident);
1080 bool truncated = false;
1081
1082 strcpy(truncated_ident, ": ");
1083 i = strlen(truncated_ident);
1084
1085 if (idlen > 100)
1086 {
1087 idlen = pg_mbcliplen(ident, idlen, 100);
1088 truncated = true;
1089 }
1090
1091 while (idlen-- > 0)
1092 {
1093 unsigned char c = *ident++;
1094
1095 if (c < ' ')
1096 c = ' ';
1097 truncated_ident[i++] = c;
1098 }
1099 truncated_ident[i] = '\0';
1100
1101 if (truncated)
1102 strcat(truncated_ident, "...");
1103 }
1104
1105 if (print_to_stderr)
1106 {
1107 for (i = 0; i < level; i++)
1108 fprintf(stderr, " ");
1109 fprintf(stderr, "%s: %s%s\n", name, stats_string, truncated_ident);
1110 }
1111 else
1113 (errhidestmt(true),
1114 errhidecontext(true),
1115 errmsg_internal("level: %d; %s: %s%s",
1116 level, name, stats_string, truncated_ident)));
1117}
1118
1119/*
1120 * MemoryContextCheck
1121 * Check all chunks in the named context and its children.
1122 *
1123 * This is just a debugging utility, so it's not fancy.
1124 */
1125#ifdef MEMORY_CONTEXT_CHECKING
1126void
1127MemoryContextCheck(MemoryContext context)
1128{
1129 Assert(MemoryContextIsValid(context));
1130 context->methods->check(context);
1131
1132 for (MemoryContext curr = context->firstchild;
1133 curr != NULL;
1134 curr = MemoryContextTraverseNext(curr, context))
1135 {
1137 curr->methods->check(curr);
1138 }
1139}
1140#endif
1141
1142/*
1143 * MemoryContextCreate
1144 * Context-type-independent part of context creation.
1145 *
1146 * This is only intended to be called by context-type-specific
1147 * context creation routines, not by the unwashed masses.
1148 *
1149 * The memory context creation procedure goes like this:
1150 * 1. Context-type-specific routine makes some initial space allocation,
1151 * including enough space for the context header. If it fails,
1152 * it can ereport() with no damage done.
1153 * 2. Context-type-specific routine sets up all type-specific fields of
1154 * the header (those beyond MemoryContextData proper), as well as any
1155 * other management fields it needs to have a fully valid context.
1156 * Usually, failure in this step is impossible, but if it's possible
1157 * the initial space allocation should be freed before ereport'ing.
1158 * 3. Context-type-specific routine calls MemoryContextCreate() to fill in
1159 * the generic header fields and link the context into the context tree.
1160 * 4. We return to the context-type-specific routine, which finishes
1161 * up type-specific initialization. This routine can now do things
1162 * that might fail (like allocate more memory), so long as it's
1163 * sure the node is left in a state that delete will handle.
1164 *
1165 * node: the as-yet-uninitialized common part of the context header node.
1166 * tag: NodeTag code identifying the memory context type.
1167 * method_id: MemoryContextMethodID of the context-type being created.
1168 * parent: parent context, or NULL if this will be a top-level context.
1169 * name: name of context (must be statically allocated).
1170 *
1171 * Context routines generally assume that MemoryContextCreate can't fail,
1172 * so this can contain Assert but not elog/ereport.
1173 */
1174void
1176 NodeTag tag,
1177 MemoryContextMethodID method_id,
1178 MemoryContext parent,
1179 const char *name)
1180{
1181 /* Creating new memory contexts is not allowed in a critical section */
1183
1184 /* Initialize all standard fields of memory context header */
1185 node->type = tag;
1186 node->isReset = true;
1187 node->methods = &mcxt_methods[method_id];
1188 node->parent = parent;
1189 node->firstchild = NULL;
1190 node->mem_allocated = 0;
1191 node->prevchild = NULL;
1192 node->name = name;
1193 node->ident = NULL;
1194 node->reset_cbs = NULL;
1195
1196 /* OK to link node into context tree */
1197 if (parent)
1198 {
1199 node->nextchild = parent->firstchild;
1200 if (parent->firstchild != NULL)
1201 parent->firstchild->prevchild = node;
1202 parent->firstchild = node;
1203 /* inherit allowInCritSection flag from parent */
1204 node->allowInCritSection = parent->allowInCritSection;
1205 }
1206 else
1207 {
1208 node->nextchild = NULL;
1209 node->allowInCritSection = false;
1210 }
1211
1212 VALGRIND_CREATE_MEMPOOL(node, 0, false);
1213}
1214
1215/*
1216 * MemoryContextAllocationFailure
1217 * For use by MemoryContextMethods implementations to handle when malloc
1218 * returns NULL. The behavior is specific to whether MCXT_ALLOC_NO_OOM
1219 * is in 'flags'.
1220 */
1221void *
1223{
1224 if ((flags & MCXT_ALLOC_NO_OOM) == 0)
1225 {
1226 if (TopMemoryContext)
1228 ereport(ERROR,
1229 (errcode(ERRCODE_OUT_OF_MEMORY),
1230 errmsg("out of memory"),
1231 errdetail("Failed on request of size %zu in memory context \"%s\".",
1232 size, context->name)));
1233 }
1234 return NULL;
1235}
1236
1237/*
1238 * MemoryContextSizeFailure
1239 * For use by MemoryContextMethods implementations to handle invalid
1240 * memory allocation request sizes.
1241 */
1242void
1244{
1245 elog(ERROR, "invalid memory alloc request size %zu", size);
1246}
1247
1248/*
1249 * MemoryContextAlloc
1250 * Allocate space within the specified context.
1251 *
1252 * This could be turned into a macro, but we'd have to import
1253 * nodes/memnodes.h into postgres.h which seems a bad idea.
1254 */
1255void *
1257{
1258 void *ret;
1259
1260 Assert(MemoryContextIsValid(context));
1262
1263 context->isReset = false;
1264
1265 /*
1266 * For efficiency reasons, we purposefully offload the handling of
1267 * allocation failures to the MemoryContextMethods implementation as this
1268 * allows these checks to be performed only when an actual malloc needs to
1269 * be done to request more memory from the OS. Additionally, not having
1270 * to execute any instructions after this call allows the compiler to use
1271 * the sibling call optimization. If you're considering adding code after
1272 * this call, consider making it the responsibility of the 'alloc'
1273 * function instead.
1274 */
1275 ret = context->methods->alloc(context, size, 0);
1276
1277 VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1278
1279 return ret;
1280}
1281
1282/*
1283 * MemoryContextAllocZero
1284 * Like MemoryContextAlloc, but clears allocated memory
1285 *
1286 * We could just call MemoryContextAlloc then clear the memory, but this
1287 * is a very common combination, so we provide the combined operation.
1288 */
1289void *
1291{
1292 void *ret;
1293
1294 Assert(MemoryContextIsValid(context));
1296
1297 context->isReset = false;
1298
1299 ret = context->methods->alloc(context, size, 0);
1300
1301 VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1302
1303 MemSetAligned(ret, 0, size);
1304
1305 return ret;
1306}
1307
1308/*
1309 * MemoryContextAllocExtended
1310 * Allocate space within the specified context using the given flags.
1311 */
1312void *
1314{
1315 void *ret;
1316
1317 Assert(MemoryContextIsValid(context));
1319
1320 if (!((flags & MCXT_ALLOC_HUGE) != 0 ? AllocHugeSizeIsValid(size) :
1321 AllocSizeIsValid(size)))
1322 elog(ERROR, "invalid memory alloc request size %zu", size);
1323
1324 context->isReset = false;
1325
1326 ret = context->methods->alloc(context, size, flags);
1327 if (unlikely(ret == NULL))
1328 return NULL;
1329
1330 VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1331
1332 if ((flags & MCXT_ALLOC_ZERO) != 0)
1333 MemSetAligned(ret, 0, size);
1334
1335 return ret;
1336}
1337
1338/*
1339 * HandleLogMemoryContextInterrupt
1340 * Handle receipt of an interrupt indicating logging of memory
1341 * contexts.
1342 *
1343 * All the actual work is deferred to ProcessLogMemoryContextInterrupt(),
1344 * because we cannot safely emit a log message inside the signal handler.
1345 */
1346void
1348{
1349 InterruptPending = true;
1351 /* latch will be set by procsignal_sigusr1_handler */
1352}
1353
1354/*
1355 * HandleGetMemoryContextInterrupt
1356 * Handle receipt of an interrupt indicating a request to publish memory
1357 * contexts statistics.
1358 *
1359 * All the actual work is deferred to ProcessGetMemoryContextInterrupt() as
1360 * this cannot be performed in a signal handler.
1361 */
1362void
1364{
1365 InterruptPending = true;
1367 /* latch will be set by procsignal_sigusr1_handler */
1368}
1369
1370/*
1371 * ProcessLogMemoryContextInterrupt
1372 * Perform logging of memory contexts of this backend process.
1373 *
1374 * Any backend that participates in ProcSignal signaling must arrange
1375 * to call this function if we see LogMemoryContextPending set.
1376 * It is called from CHECK_FOR_INTERRUPTS(), which is enough because
1377 * the target process for logging of memory contexts is a backend.
1378 */
1379void
1381{
1383
1384 /*
1385 * Use LOG_SERVER_ONLY to prevent this message from being sent to the
1386 * connected client.
1387 */
1389 (errhidestmt(true),
1390 errhidecontext(true),
1391 errmsg("logging memory contexts of PID %d", MyProcPid)));
1392
1393 /*
1394 * When a backend process is consuming huge memory, logging all its memory
1395 * contexts might overrun available disk space. To prevent this, we limit
1396 * the depth of the hierarchy, as well as the number of child contexts to
1397 * log per parent to 100.
1398 *
1399 * As with MemoryContextStats(), we suppose that practical cases where the
1400 * dump gets long will typically be huge numbers of siblings under the
1401 * same parent context; while the additional debugging value from seeing
1402 * details about individual siblings beyond 100 will not be large.
1403 */
1405}
1406
1407/*
1408 * ProcessGetMemoryContextInterrupt
1409 * Generate information about memory contexts used by the process.
1410 *
1411 * Performs a breadth first search on the memory context tree, thus parents
1412 * statistics are reported before their children in the monitoring function
1413 * output.
1414 *
1415 * Statistics for all the processes are shared via the same dynamic shared
1416 * area. Statistics written by each process are tracked independently in
1417 * per-process DSA pointers. These pointers are stored in static shared memory.
1418 *
1419 * We calculate maximum number of context's statistics that can be displayed
1420 * using a pre-determined limit for memory available per process for this
1421 * utility maximum size of statistics for each context. The remaining context
1422 * statistics if any are captured as a cumulative total at the end of
1423 * individual context's statistics.
1424 *
1425 * If summary is true, we capture the level 1 and level 2 contexts
1426 * statistics. For that we traverse the memory context tree recursively in
1427 * depth first search manner to cover all the children of a parent context, to
1428 * be able to display a cumulative total of memory consumption by a parent at
1429 * level 2 and all its children.
1430 */
1431void
1433{
1434 List *contexts;
1435 HASHCTL ctl;
1436 HTAB *context_id_lookup;
1437 int context_id = 0;
1438 MemoryStatsEntry *meminfo;
1439 bool summary = false;
1440 int max_stats;
1441 int idx = MyProcNumber;
1442 int stats_count = 0;
1443 int stats_num = 0;
1445 int num_individual_stats = 0;
1446
1448
1449 /*
1450 * The hash table is used for constructing "path" column of the view,
1451 * similar to its local backend counterpart.
1452 */
1453 ctl.keysize = sizeof(MemoryContext);
1454 ctl.entrysize = sizeof(MemoryStatsContextId);
1456
1457 context_id_lookup = hash_create("pg_get_remote_backend_memory_contexts",
1458 256,
1459 &ctl,
1461
1462 /* List of contexts to process in the next round - start at the top. */
1463 contexts = list_make1(TopMemoryContext);
1464
1465 /* Compute the number of stats that can fit in the defined limit */
1466 max_stats =
1469 summary = memCxtState[idx].summary;
1470 LWLockRelease(&memCxtState[idx].lw_lock);
1471
1472 /*
1473 * Traverse the memory context tree to find total number of contexts. If
1474 * summary is requested report the total number of contexts at level 1 and
1475 * 2 from the top. Also, populate the hash table of context ids.
1476 */
1477 compute_contexts_count_and_ids(contexts, context_id_lookup, &stats_count,
1478 summary);
1479
1480 /*
1481 * Allocate memory in this process's DSA for storing statistics of the the
1482 * memory contexts upto max_stats, for contexts that don't fit within a
1483 * limit, a cumulative total is written as the last record in the DSA
1484 * segment.
1485 */
1486 stats_num = Min(stats_count, max_stats);
1487
1489
1490 /*
1491 * Create a DSA and send handle to the the client process after storing
1492 * the context statistics. If number of contexts exceed a predefined
1493 * limit(8MB), a cumulative total is stored for such contexts.
1494 */
1496 {
1498 dsa_handle handle;
1499
1501
1503
1505 MemoryContextSwitchTo(oldcontext);
1506
1508
1509 /*
1510 * Pin the DSA area, this is to make sure the area remains attachable
1511 * even if current backend exits. This is done so that the statistics
1512 * are published even if the process exits while a client is waiting.
1513 */
1515
1516 /* Set the handle in shared memory */
1518 }
1519
1520 /*
1521 * If DSA exists, created by another process publishing statistics, attach
1522 * to it.
1523 */
1524 else if (MemoryStatsDsaArea == NULL)
1525 {
1527
1530 MemoryContextSwitchTo(oldcontext);
1532 }
1534
1535 /*
1536 * Hold the process lock to protect writes to process specific memory. Two
1537 * processes publishing statistics do not block each other.
1538 */
1541
1542 if (DsaPointerIsValid(memCxtState[idx].memstats_dsa_pointer))
1543 {
1544 /*
1545 * Free any previous allocations, free the name, ident and path
1546 * pointers before freeing the pointer that contains them.
1547 */
1549 memCxtState[idx].memstats_dsa_pointer);
1550 }
1551
1552 /*
1553 * Assigning total stats before allocating memory so that memory cleanup
1554 * can run if any subsequent dsa_allocate call to allocate name/ident/path
1555 * fails.
1556 */
1557 memCxtState[idx].total_stats = stats_num;
1560
1561 meminfo = (MemoryStatsEntry *)
1562 dsa_get_address(MemoryStatsDsaArea, memCxtState[idx].memstats_dsa_pointer);
1563
1564 if (summary)
1565 {
1566 int cxt_id = 0;
1567 List *path = NIL;
1568
1569 /* Copy TopMemoryContext statistics to DSA */
1570 memset(&stat, 0, sizeof(stat));
1572 &stat, true);
1573 path = lcons_int(1, path);
1574 PublishMemoryContext(meminfo, cxt_id, TopMemoryContext, path, stat,
1575 1, MemoryStatsDsaArea, 100);
1576 cxt_id = cxt_id + 1;
1577
1578 /*
1579 * Copy statistics for each of TopMemoryContexts children. This
1580 * includes statistics of at most 100 children per node, with each
1581 * child node limited to a depth of 100 in its subtree.
1582 */
1583 for (MemoryContext c = TopMemoryContext->firstchild; c != NULL;
1584 c = c->nextchild)
1585 {
1586 MemoryContextCounters grand_totals;
1587 int num_contexts = 0;
1588 int level = 0;
1589
1590 path = NIL;
1591 memset(&grand_totals, 0, sizeof(grand_totals));
1592
1593 MemoryContextStatsInternal(c, level, 100, 100, &grand_totals,
1594 PRINT_STATS_NONE, &num_contexts);
1595
1596 path = compute_context_path(c, context_id_lookup);
1597
1598 /*
1599 * Register the stats entry first, that way the cleanup handler
1600 * can reach it in case of allocation failures of one or more
1601 * members.
1602 */
1603 memCxtState[idx].total_stats = cxt_id++;
1604 PublishMemoryContext(meminfo, cxt_id, c, path,
1605 grand_totals, num_contexts, MemoryStatsDsaArea, 100);
1606 }
1607 memCxtState[idx].total_stats = cxt_id;
1608
1610
1611 /* Notify waiting backends and return */
1612 hash_destroy(context_id_lookup);
1613
1614 return;
1615 }
1616
1618 {
1619 List *path = NIL;
1620
1621 /*
1622 * Figure out the transient context_id of this context and each of its
1623 * ancestors, to compute a path for this context.
1624 */
1625 path = compute_context_path(cur, context_id_lookup);
1626
1627 /* Examine the context stats */
1628 memset(&stat, 0, sizeof(stat));
1629 (*cur->methods->stats) (cur, NULL, NULL, &stat, true);
1630
1631 /* Account for saving one statistics slot for cumulative reporting */
1632 if (context_id < (max_stats - 1) || stats_count <= max_stats)
1633 {
1634 /* Copy statistics to DSA memory */
1635 PublishMemoryContext(meminfo, context_id, cur, path, stat, 1, MemoryStatsDsaArea, 100);
1636 }
1637 else
1638 {
1639 meminfo[max_stats - 1].totalspace += stat.totalspace;
1640 meminfo[max_stats - 1].nblocks += stat.nblocks;
1641 meminfo[max_stats - 1].freespace += stat.freespace;
1642 meminfo[max_stats - 1].freechunks += stat.freechunks;
1643 }
1644
1645 /*
1646 * DSA max limit per process is reached, write aggregate of the
1647 * remaining statistics.
1648 *
1649 * We can store contexts from 0 to max_stats - 1. When stats_count is
1650 * greater than max_stats, we stop reporting individual statistics
1651 * when context_id equals max_stats - 2. As we use max_stats - 1 array
1652 * slot for reporting cumulative statistics or "Remaining Totals".
1653 */
1654 if (stats_count > max_stats && context_id == (max_stats - 2))
1655 {
1656 char *nameptr;
1657 int namelen = strlen("Remaining Totals");
1658
1659 num_individual_stats = context_id + 1;
1660 meminfo[max_stats - 1].name = dsa_allocate(MemoryStatsDsaArea, namelen + 1);
1661 nameptr = dsa_get_address(MemoryStatsDsaArea, meminfo[max_stats - 1].name);
1662 strncpy(nameptr, "Remaining Totals", namelen);
1663 meminfo[max_stats - 1].ident = InvalidDsaPointer;
1664 meminfo[max_stats - 1].path = InvalidDsaPointer;
1665 meminfo[max_stats - 1].type = 0;
1666 }
1667 context_id++;
1668 }
1669
1670 /*
1671 * Statistics are not aggregated, i.e individual statistics reported when
1672 * stats_count <= max_stats.
1673 */
1674 if (stats_count <= max_stats)
1675 {
1676 memCxtState[idx].total_stats = context_id;
1677 }
1678 /* Report number of aggregated memory contexts */
1679 else
1680 {
1681 meminfo[max_stats - 1].num_agg_stats = context_id -
1682 num_individual_stats;
1683
1684 /*
1685 * Total stats equals num_individual_stats + 1 record for cumulative
1686 * statistics.
1687 */
1688 memCxtState[idx].total_stats = num_individual_stats + 1;
1689 }
1690
1691 /* Notify waiting backends and return */
1693
1694 hash_destroy(context_id_lookup);
1695}
1696
1697/*
1698 * Update timestamp and signal all the waiting client backends after copying
1699 * all the statistics.
1700 */
1701static void
1703{
1707}
1708
1709/*
1710 * compute_context_path
1711 *
1712 * Append the transient context_id of this context and each of its ancestors
1713 * to a list, in order to compute a path.
1714 */
1715static List *
1717{
1718 bool found;
1719 List *path = NIL;
1720 MemoryContext cur_context;
1721
1722 for (cur_context = c; cur_context != NULL; cur_context = cur_context->parent)
1723 {
1724 MemoryStatsContextId *cur_entry;
1725
1726 cur_entry = hash_search(context_id_lookup, &cur_context, HASH_FIND, &found);
1727
1728 if (!found)
1729 elog(ERROR, "hash table corrupted, can't construct path value");
1730
1731 path = lcons_int(cur_entry->context_id, path);
1732 }
1733
1734 return path;
1735}
1736
1737/*
1738 * Return the number of contexts allocated currently by the backend
1739 * Assign context ids to each of the contexts.
1740 */
1741static void
1742compute_contexts_count_and_ids(List *contexts, HTAB *context_id_lookup,
1743 int *stats_count, bool summary)
1744{
1746 {
1747 MemoryStatsContextId *entry;
1748 bool found;
1749
1750 entry = (MemoryStatsContextId *) hash_search(context_id_lookup, &cur,
1751 HASH_ENTER, &found);
1752 Assert(!found);
1753
1754 /*
1755 * context id starts with 1 so increment the stats_count before
1756 * assigning.
1757 */
1758 entry->context_id = ++(*stats_count);
1759
1760 /* Append the children of the current context to the main list. */
1761 for (MemoryContext c = cur->firstchild; c != NULL; c = c->nextchild)
1762 {
1763 if (summary)
1764 {
1765 entry = (MemoryStatsContextId *) hash_search(context_id_lookup, &c,
1766 HASH_ENTER, &found);
1767 Assert(!found);
1768
1769 entry->context_id = ++(*stats_count);
1770 }
1771
1772 contexts = lappend(contexts, c);
1773 }
1774
1775 /*
1776 * In summary mode only the first two level (from top) contexts are
1777 * displayed.
1778 */
1779 if (summary)
1780 break;
1781 }
1782}
1783
1784/*
1785 * PublishMemoryContext
1786 *
1787 * Copy the memory context statistics of a single context to a DSA memory
1788 */
1789static void
1790PublishMemoryContext(MemoryStatsEntry *memcxt_info, int curr_id,
1791 MemoryContext context, List *path,
1792 MemoryContextCounters stat, int num_contexts,
1793 dsa_area *area, int max_levels)
1794{
1795 const char *ident = context->ident;
1796 const char *name = context->name;
1797 int *path_list;
1798
1799 /*
1800 * To be consistent with logging output, we label dynahash contexts with
1801 * just the hash table name as with MemoryContextStatsPrint().
1802 */
1803 if (context->ident && strncmp(context->name, "dynahash", 8) == 0)
1804 {
1805 name = context->ident;
1806 ident = NULL;
1807 }
1808
1809 if (name != NULL)
1810 {
1811 int namelen = strlen(name);
1812 char *nameptr;
1813
1815 namelen = pg_mbcliplen(name, namelen,
1817
1818 memcxt_info[curr_id].name = dsa_allocate(area, namelen + 1);
1819 nameptr = (char *) dsa_get_address(area, memcxt_info[curr_id].name);
1820 strlcpy(nameptr, name, namelen + 1);
1821 }
1822 else
1823 memcxt_info[curr_id].name = InvalidDsaPointer;
1824
1825 /* Trim and copy the identifier if it is not set to NULL */
1826 if (ident != NULL)
1827 {
1828 int idlen = strlen(context->ident);
1829 char *identptr;
1830
1831 /*
1832 * Some identifiers such as SQL query string can be very long,
1833 * truncate oversize identifiers.
1834 */
1836 idlen = pg_mbcliplen(ident, idlen,
1838
1839 memcxt_info[curr_id].ident = dsa_allocate(area, idlen + 1);
1840 identptr = (char *) dsa_get_address(area, memcxt_info[curr_id].ident);
1841 strlcpy(identptr, ident, idlen + 1);
1842 }
1843 else
1844 memcxt_info[curr_id].ident = InvalidDsaPointer;
1845
1846 /* Allocate DSA memory for storing path information */
1847 if (path == NIL)
1848 memcxt_info[curr_id].path = InvalidDsaPointer;
1849 else
1850 {
1851 int levels = Min(list_length(path), max_levels);
1852
1853 memcxt_info[curr_id].path_length = levels;
1854 memcxt_info[curr_id].path = dsa_allocate0(area, levels * sizeof(int));
1855 memcxt_info[curr_id].levels = list_length(path);
1856 path_list = (int *) dsa_get_address(area, memcxt_info[curr_id].path);
1857
1858 foreach_int(i, path)
1859 {
1860 path_list[foreach_current_index(i)] = i;
1861 if (--levels == 0)
1862 break;
1863 }
1864 }
1865 memcxt_info[curr_id].type = context->type;
1866 memcxt_info[curr_id].totalspace = stat.totalspace;
1867 memcxt_info[curr_id].nblocks = stat.nblocks;
1868 memcxt_info[curr_id].freespace = stat.freespace;
1869 memcxt_info[curr_id].freechunks = stat.freechunks;
1870 memcxt_info[curr_id].num_agg_stats = num_contexts;
1871}
1872
1873/*
1874 * free_memorycontextstate_dsa
1875 *
1876 * Worker for freeing resources from a MemoryStatsEntry. Callers are
1877 * responsible for ensuring that the DSA pointer is valid.
1878 */
1879static void
1881 dsa_pointer prev_dsa_pointer)
1882{
1883 MemoryStatsEntry *meminfo;
1884
1885 meminfo = (MemoryStatsEntry *) dsa_get_address(area, prev_dsa_pointer);
1886 Assert(meminfo != NULL);
1887 for (int i = 0; i < total_stats; i++)
1888 {
1889 if (DsaPointerIsValid(meminfo[i].name))
1890 dsa_free(area, meminfo[i].name);
1891
1892 if (DsaPointerIsValid(meminfo[i].ident))
1893 dsa_free(area, meminfo[i].ident);
1894
1895 if (DsaPointerIsValid(meminfo[i].path))
1896 dsa_free(area, meminfo[i].path);
1897 }
1898
1899 dsa_free(area, memCxtState[MyProcNumber].memstats_dsa_pointer);
1901}
1902
1903/*
1904 * Free the memory context statistics stored by this process
1905 * in DSA area.
1906 */
1907void
1909{
1910 int idx = MyProcNumber;
1911
1913 return;
1914
1916
1917 if (!DsaPointerIsValid(memCxtState[idx].memstats_dsa_pointer))
1918 {
1919 LWLockRelease(&memCxtState[idx].lw_lock);
1920 return;
1921 }
1922
1923 /* If the dsa mapping could not be found, attach to the area */
1924 if (MemoryStatsDsaArea == NULL)
1926
1927 /*
1928 * Free the memory context statistics, free the name, ident and path
1929 * pointers before freeing the pointer that contains these pointers and
1930 * integer statistics.
1931 */
1933 memCxtState[idx].memstats_dsa_pointer);
1934
1936 LWLockRelease(&memCxtState[idx].lw_lock);
1937}
1938
1939void *
1941{
1942 /* duplicates MemoryContextAlloc to avoid increased overhead */
1943 void *ret;
1945
1946 Assert(MemoryContextIsValid(context));
1948
1949 context->isReset = false;
1950
1951 /*
1952 * For efficiency reasons, we purposefully offload the handling of
1953 * allocation failures to the MemoryContextMethods implementation as this
1954 * allows these checks to be performed only when an actual malloc needs to
1955 * be done to request more memory from the OS. Additionally, not having
1956 * to execute any instructions after this call allows the compiler to use
1957 * the sibling call optimization. If you're considering adding code after
1958 * this call, consider making it the responsibility of the 'alloc'
1959 * function instead.
1960 */
1961 ret = context->methods->alloc(context, size, 0);
1962 /* We expect OOM to be handled by the alloc function */
1963 Assert(ret != NULL);
1964 VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1965
1966 return ret;
1967}
1968
1969void *
1971{
1972 /* duplicates MemoryContextAllocZero to avoid increased overhead */
1973 void *ret;
1975
1976 Assert(MemoryContextIsValid(context));
1978
1979 context->isReset = false;
1980
1981 ret = context->methods->alloc(context, size, 0);
1982 /* We expect OOM to be handled by the alloc function */
1983 Assert(ret != NULL);
1984 VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1985
1986 MemSetAligned(ret, 0, size);
1987
1988 return ret;
1989}
1990
1991void *
1992palloc_extended(Size size, int flags)
1993{
1994 /* duplicates MemoryContextAllocExtended to avoid increased overhead */
1995 void *ret;
1997
1998 Assert(MemoryContextIsValid(context));
2000
2001 context->isReset = false;
2002
2003 ret = context->methods->alloc(context, size, flags);
2004 if (unlikely(ret == NULL))
2005 {
2006 /* NULL can be returned only when using MCXT_ALLOC_NO_OOM */
2007 Assert(flags & MCXT_ALLOC_NO_OOM);
2008 return NULL;
2009 }
2010
2011 VALGRIND_MEMPOOL_ALLOC(context, ret, size);
2012
2013 if ((flags & MCXT_ALLOC_ZERO) != 0)
2014 MemSetAligned(ret, 0, size);
2015
2016 return ret;
2017}
2018
2019/*
2020 * MemoryContextAllocAligned
2021 * Allocate 'size' bytes of memory in 'context' aligned to 'alignto'
2022 * bytes.
2023 *
2024 * Currently, we align addresses by requesting additional bytes from the
2025 * MemoryContext's standard allocator function and then aligning the returned
2026 * address by the required alignment. This means that the given MemoryContext
2027 * must support providing us with a chunk of memory that's larger than 'size'.
2028 * For allocators such as Slab, that's not going to work, as slab only allows
2029 * chunks of the size that's specified when the context is created.
2030 *
2031 * 'alignto' must be a power of 2.
2032 * 'flags' may be 0 or set the same as MemoryContextAllocExtended().
2033 */
2034void *
2036 Size size, Size alignto, int flags)
2037{
2038 MemoryChunk *alignedchunk;
2039 Size alloc_size;
2040 void *unaligned;
2041 void *aligned;
2042
2043 /* wouldn't make much sense to waste that much space */
2044 Assert(alignto < (128 * 1024 * 1024));
2045
2046 /* ensure alignto is a power of 2 */
2047 Assert((alignto & (alignto - 1)) == 0);
2048
2049 /*
2050 * If the alignment requirements are less than what we already guarantee
2051 * then just use the standard allocation function.
2052 */
2053 if (unlikely(alignto <= MAXIMUM_ALIGNOF))
2054 return MemoryContextAllocExtended(context, size, flags);
2055
2056 /*
2057 * We implement aligned pointers by simply allocating enough memory for
2058 * the requested size plus the alignment and an additional "redirection"
2059 * MemoryChunk. This additional MemoryChunk is required for operations
2060 * such as pfree when used on the pointer returned by this function. We
2061 * use this redirection MemoryChunk in order to find the pointer to the
2062 * memory that was returned by the MemoryContextAllocExtended call below.
2063 * We do that by "borrowing" the block offset field and instead of using
2064 * that to find the offset into the owning block, we use it to find the
2065 * original allocated address.
2066 *
2067 * Here we must allocate enough extra memory so that we can still align
2068 * the pointer returned by MemoryContextAllocExtended and also have enough
2069 * space for the redirection MemoryChunk. Since allocations will already
2070 * be at least aligned by MAXIMUM_ALIGNOF, we can subtract that amount
2071 * from the allocation size to save a little memory.
2072 */
2073 alloc_size = size + PallocAlignedExtraBytes(alignto);
2074
2075#ifdef MEMORY_CONTEXT_CHECKING
2076 /* ensure there's space for a sentinel byte */
2077 alloc_size += 1;
2078#endif
2079
2080 /* perform the actual allocation */
2081 unaligned = MemoryContextAllocExtended(context, alloc_size, flags);
2082
2083 /* set the aligned pointer */
2084 aligned = (void *) TYPEALIGN(alignto, (char *) unaligned +
2085 sizeof(MemoryChunk));
2086
2087 alignedchunk = PointerGetMemoryChunk(aligned);
2088
2089 /*
2090 * We set the redirect MemoryChunk so that the block offset calculation is
2091 * used to point back to the 'unaligned' allocated chunk. This allows us
2092 * to use MemoryChunkGetBlock() to find the unaligned chunk when we need
2093 * to perform operations such as pfree() and repalloc().
2094 *
2095 * We store 'alignto' in the MemoryChunk's 'value' so that we know what
2096 * the alignment was set to should we ever be asked to realloc this
2097 * pointer.
2098 */
2099 MemoryChunkSetHdrMask(alignedchunk, unaligned, alignto,
2101
2102 /* double check we produced a correctly aligned pointer */
2103 Assert((void *) TYPEALIGN(alignto, aligned) == aligned);
2104
2105#ifdef MEMORY_CONTEXT_CHECKING
2106 alignedchunk->requested_size = size;
2107 /* set mark to catch clobber of "unused" space */
2108 set_sentinel(aligned, size);
2109#endif
2110
2111 /* Mark the bytes before the redirection header as noaccess */
2113 (char *) alignedchunk - (char *) unaligned);
2114
2115 /* Disallow access to the redirection chunk header. */
2116 VALGRIND_MAKE_MEM_NOACCESS(alignedchunk, sizeof(MemoryChunk));
2117
2118 return aligned;
2119}
2120
2121/*
2122 * palloc_aligned
2123 * Allocate 'size' bytes returning a pointer that's aligned to the
2124 * 'alignto' boundary.
2125 *
2126 * Currently, we align addresses by requesting additional bytes from the
2127 * MemoryContext's standard allocator function and then aligning the returned
2128 * address by the required alignment. This means that the given MemoryContext
2129 * must support providing us with a chunk of memory that's larger than 'size'.
2130 * For allocators such as Slab, that's not going to work, as slab only allows
2131 * chunks of the size that's specified when the context is created.
2132 *
2133 * 'alignto' must be a power of 2.
2134 * 'flags' may be 0 or set the same as MemoryContextAllocExtended().
2135 */
2136void *
2137palloc_aligned(Size size, Size alignto, int flags)
2138{
2139 return MemoryContextAllocAligned(CurrentMemoryContext, size, alignto, flags);
2140}
2141
2142/*
2143 * pfree
2144 * Release an allocated chunk.
2145 */
2146void
2147pfree(void *pointer)
2148{
2149#ifdef USE_VALGRIND
2151 MemoryContext context = GetMemoryChunkContext(pointer);
2152#endif
2153
2154 MCXT_METHOD(pointer, free_p) (pointer);
2155
2156#ifdef USE_VALGRIND
2157 if (method != MCTX_ALIGNED_REDIRECT_ID)
2158 VALGRIND_MEMPOOL_FREE(context, pointer);
2159#endif
2160}
2161
2162/*
2163 * repalloc
2164 * Adjust the size of a previously allocated chunk.
2165 */
2166void *
2167repalloc(void *pointer, Size size)
2168{
2169#ifdef USE_VALGRIND
2171#endif
2172#if defined(USE_ASSERT_CHECKING) || defined(USE_VALGRIND)
2173 MemoryContext context = GetMemoryChunkContext(pointer);
2174#endif
2175 void *ret;
2176
2178
2179 /* isReset must be false already */
2180 Assert(!context->isReset);
2181
2182 /*
2183 * For efficiency reasons, we purposefully offload the handling of
2184 * allocation failures to the MemoryContextMethods implementation as this
2185 * allows these checks to be performed only when an actual malloc needs to
2186 * be done to request more memory from the OS. Additionally, not having
2187 * to execute any instructions after this call allows the compiler to use
2188 * the sibling call optimization. If you're considering adding code after
2189 * this call, consider making it the responsibility of the 'realloc'
2190 * function instead.
2191 */
2192 ret = MCXT_METHOD(pointer, realloc) (pointer, size, 0);
2193
2194#ifdef USE_VALGRIND
2195 if (method != MCTX_ALIGNED_REDIRECT_ID)
2196 VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
2197#endif
2198
2199 return ret;
2200}
2201
2202/*
2203 * repalloc_extended
2204 * Adjust the size of a previously allocated chunk,
2205 * with HUGE and NO_OOM options.
2206 */
2207void *
2208repalloc_extended(void *pointer, Size size, int flags)
2209{
2210#if defined(USE_ASSERT_CHECKING) || defined(USE_VALGRIND)
2211 MemoryContext context = GetMemoryChunkContext(pointer);
2212#endif
2213 void *ret;
2214
2216
2217 /* isReset must be false already */
2218 Assert(!context->isReset);
2219
2220 /*
2221 * For efficiency reasons, we purposefully offload the handling of
2222 * allocation failures to the MemoryContextMethods implementation as this
2223 * allows these checks to be performed only when an actual malloc needs to
2224 * be done to request more memory from the OS. Additionally, not having
2225 * to execute any instructions after this call allows the compiler to use
2226 * the sibling call optimization. If you're considering adding code after
2227 * this call, consider making it the responsibility of the 'realloc'
2228 * function instead.
2229 */
2230 ret = MCXT_METHOD(pointer, realloc) (pointer, size, flags);
2231 if (unlikely(ret == NULL))
2232 return NULL;
2233
2234 VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
2235
2236 return ret;
2237}
2238
2239/*
2240 * repalloc0
2241 * Adjust the size of a previously allocated chunk and zero out the added
2242 * space.
2243 */
2244void *
2245repalloc0(void *pointer, Size oldsize, Size size)
2246{
2247 void *ret;
2248
2249 /* catch wrong argument order */
2250 if (unlikely(oldsize > size))
2251 elog(ERROR, "invalid repalloc0 call: oldsize %zu, new size %zu",
2252 oldsize, size);
2253
2254 ret = repalloc(pointer, size);
2255 memset((char *) ret + oldsize, 0, (size - oldsize));
2256 return ret;
2257}
2258
2259/*
2260 * MemoryContextAllocHuge
2261 * Allocate (possibly-expansive) space within the specified context.
2262 *
2263 * See considerations in comment at MaxAllocHugeSize.
2264 */
2265void *
2267{
2268 void *ret;
2269
2270 Assert(MemoryContextIsValid(context));
2272
2273 context->isReset = false;
2274
2275 /*
2276 * For efficiency reasons, we purposefully offload the handling of
2277 * allocation failures to the MemoryContextMethods implementation as this
2278 * allows these checks to be performed only when an actual malloc needs to
2279 * be done to request more memory from the OS. Additionally, not having
2280 * to execute any instructions after this call allows the compiler to use
2281 * the sibling call optimization. If you're considering adding code after
2282 * this call, consider making it the responsibility of the 'alloc'
2283 * function instead.
2284 */
2285 ret = context->methods->alloc(context, size, MCXT_ALLOC_HUGE);
2286
2287 VALGRIND_MEMPOOL_ALLOC(context, ret, size);
2288
2289 return ret;
2290}
2291
2292/*
2293 * repalloc_huge
2294 * Adjust the size of a previously allocated chunk, permitting a large
2295 * value. The previous allocation need not have been "huge".
2296 */
2297void *
2298repalloc_huge(void *pointer, Size size)
2299{
2300 /* this one seems not worth its own implementation */
2301 return repalloc_extended(pointer, size, MCXT_ALLOC_HUGE);
2302}
2303
2304/*
2305 * MemoryContextStrdup
2306 * Like strdup(), but allocate from the specified context
2307 */
2308char *
2309MemoryContextStrdup(MemoryContext context, const char *string)
2310{
2311 char *nstr;
2312 Size len = strlen(string) + 1;
2313
2314 nstr = (char *) MemoryContextAlloc(context, len);
2315
2316 memcpy(nstr, string, len);
2317
2318 return nstr;
2319}
2320
2321char *
2322pstrdup(const char *in)
2323{
2325}
2326
2327/*
2328 * pnstrdup
2329 * Like pstrdup(), but append null byte to a
2330 * not-necessarily-null-terminated input string.
2331 */
2332char *
2333pnstrdup(const char *in, Size len)
2334{
2335 char *out;
2336
2337 len = strnlen(in, len);
2338
2339 out = palloc(len + 1);
2340 memcpy(out, in, len);
2341 out[len] = '\0';
2342
2343 return out;
2344}
2345
2346/*
2347 * Make copy of string with all trailing newline characters removed.
2348 */
2349char *
2350pchomp(const char *in)
2351{
2352 size_t n;
2353
2354 n = strlen(in);
2355 while (n > 0 && in[n - 1] == '\n')
2356 n--;
2357 return pnstrdup(in, n);
2358}
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:262
MemoryContext AlignedAllocGetChunkContext(void *pointer)
Definition: alignedalloc.c:121
void * AlignedAllocRealloc(void *pointer, Size size, int flags)
Definition: alignedalloc.c:60
Size AlignedAllocGetChunkSpace(void *pointer)
Definition: alignedalloc.c:143
void AlignedAllocFree(void *pointer)
Definition: alignedalloc.c:29
void AllocSetReset(MemoryContext context)
Definition: aset.c:537
void * AllocSetRealloc(void *pointer, Size size, int flags)
Definition: aset.c:1169
Size AllocSetGetChunkSpace(void *pointer)
Definition: aset.c:1462
MemoryContext AllocSetGetChunkContext(void *pointer)
Definition: aset.c:1433
void AllocSetStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition: aset.c:1521
bool AllocSetIsEmpty(MemoryContext context)
Definition: aset.c:1496
void * AllocSetAlloc(MemoryContext context, Size size, int flags)
Definition: aset.c:967
void AllocSetFree(void *pointer)
Definition: aset.c:1062
void AllocSetDelete(MemoryContext context)
Definition: aset.c:607
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1645
void BumpFree(void *pointer)
Definition: bump.c:617
void BumpDelete(MemoryContext context)
Definition: bump.c:278
Size BumpGetChunkSpace(void *pointer)
Definition: bump.c:649
void BumpStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition: bump.c:688
MemoryContext BumpGetChunkContext(void *pointer)
Definition: bump.c:638
void BumpReset(MemoryContext context)
Definition: bump.c:243
bool BumpIsEmpty(MemoryContext context)
Definition: bump.c:660
void * BumpRealloc(void *pointer, Size size, int flags)
Definition: bump.c:627
void * BumpAlloc(MemoryContext context, Size size, int flags)
Definition: bump.c:491
#define Min(x, y)
Definition: c.h:975
#define MAXALIGN(LEN)
Definition: c.h:782
#define TYPEALIGN(ALIGNVAL, LEN)
Definition: c.h:775
#define MemSetAligned(start, val, len)
Definition: c.h:1021
uint64_t uint64
Definition: c.h:503
#define unlikely(x)
Definition: c.h:347
size_t Size
Definition: c.h:576
void ConditionVariableBroadcast(ConditionVariable *cv)
#define fprintf(file, fmt, msg)
Definition: cubescan.l:21
dsa_area * dsa_attach(dsa_handle handle)
Definition: dsa.c:510
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:942
void dsa_pin_mapping(dsa_area *area)
Definition: dsa.c:635
dsa_handle dsa_get_handle(dsa_area *area)
Definition: dsa.c:498
void dsa_detach(dsa_area *area)
Definition: dsa.c:1952
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:826
void dsa_pin(dsa_area *area)
Definition: dsa.c:975
#define dsa_allocate0(area, size)
Definition: dsa.h:113
uint64 dsa_pointer
Definition: dsa.h:62
#define dsa_allocate(area, size)
Definition: dsa.h:109
dsm_handle dsa_handle
Definition: dsa.h:136
#define InvalidDsaPointer
Definition: dsa.h:78
#define DSA_HANDLE_INVALID
Definition: dsa.h:139
#define dsa_create(tranch_id)
Definition: dsa.h:117
#define DsaPointerIsValid(x)
Definition: dsa.h:106
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:865
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
struct cursor * cur
Definition: ecpg.c:29
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1158
int errhidestmt(bool hide_stmt)
Definition: elog.c:1433
int errdetail(const char *fmt,...)
Definition: elog.c:1204
int errhidecontext(bool hide_ctx)
Definition: elog.c:1452
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define LOG_SERVER_ONLY
Definition: elog.h:32
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:149
#define MCXT_ALLOC_ZERO
Definition: fe_memutils.h:30
#define MCXT_ALLOC_HUGE
Definition: fe_memutils.h:28
#define MCXT_ALLOC_NO_OOM
Definition: fe_memutils.h:29
void * GenerationRealloc(void *pointer, Size size, int flags)
Definition: generation.c:800
void GenerationReset(MemoryContext context)
Definition: generation.c:283
void GenerationFree(void *pointer)
Definition: generation.c:689
MemoryContext GenerationGetChunkContext(void *pointer)
Definition: generation.c:947
Size GenerationGetChunkSpace(void *pointer)
Definition: generation.c:973
bool GenerationIsEmpty(MemoryContext context)
Definition: generation.c:1002
void GenerationStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition: generation.c:1033
void GenerationDelete(MemoryContext context)
Definition: generation.c:328
void * GenerationAlloc(MemoryContext context, Size size, int flags)
Definition: generation.c:527
volatile sig_atomic_t LogMemoryContextPending
Definition: globals.c:41
volatile sig_atomic_t InterruptPending
Definition: globals.c:32
int MyProcPid
Definition: globals.c:48
ProcNumber MyProcNumber
Definition: globals.c:91
volatile uint32 CritSectionCount
Definition: globals.c:46
volatile sig_atomic_t PublishMemoryContextPending
Definition: globals.c:42
Assert(PointerIsAligned(start, uint64))
#define realloc(a, b)
Definition: header.h:60
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
#define ident
Definition: indent_codes.h:47
int i
Definition: isn.c:77
List * lappend(List *list, void *datum)
Definition: list.c:339
List * lcons_int(int datum, List *list)
Definition: list.c:513
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1182
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1902
@ LW_EXCLUSIVE
Definition: lwlock.h:114
int pg_mbcliplen(const char *mbstr, int len, int limit)
Definition: mbutils.c:1083
void * repalloc0(void *pointer, Size oldsize, Size size)
Definition: mcxt.c:2245
void HandleGetMemoryContextInterrupt(void)
Definition: mcxt.c:1363
static void MemoryContextCallResetCallbacks(MemoryContext context)
Definition: mcxt.c:616
char * MemoryContextStrdup(MemoryContext context, const char *string)
Definition: mcxt.c:2309
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1256
MemoryContext MessageContext
Definition: mcxt.c:169
bool MemoryContextIsEmpty(MemoryContext context)
Definition: mcxt.c:774
void MemoryContextMemConsumed(MemoryContext context, MemoryContextCounters *consumed)
Definition: mcxt.c:817
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:414
void MemoryContextCreate(MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
Definition: mcxt.c:1175
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1290
MemoryContext TopTransactionContext
Definition: mcxt.c:170
char * pstrdup(const char *in)
Definition: mcxt.c:2322
void HandleLogMemoryContextInterrupt(void)
Definition: mcxt.c:1347
void MemoryContextRegisterResetCallback(MemoryContext context, MemoryContextCallback *cb)
Definition: mcxt.c:599
static MemoryContextMethodID GetMemoryChunkMethodID(const void *pointer)
Definition: mcxt.c:222
void MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
Definition: mcxt.c:668
static void * BogusRealloc(void *pointer, Size size, int flags)
Definition: mcxt.c:324
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:2167
void AtProcExit_memstats_cleanup(int code, Datum arg)
Definition: mcxt.c:1908
static void compute_contexts_count_and_ids(List *contexts, HTAB *context_id_lookup, int *stats_count, bool summary)
Definition: mcxt.c:1742
void pfree(void *pointer)
Definition: mcxt.c:2147
Size GetMemoryChunkSpace(void *pointer)
Definition: mcxt.c:752
void * palloc0(Size size)
Definition: mcxt.c:1970
static void free_memorycontextstate_dsa(dsa_area *area, int total_stats, dsa_pointer prev_dsa_pointer)
Definition: mcxt.c:1880
static Size BogusGetChunkSpace(void *pointer)
Definition: mcxt.c:340
void ProcessGetMemoryContextInterrupt(void)
Definition: mcxt.c:1432
PrintDestination
Definition: mcxt.c:149
@ PRINT_STATS_TO_LOGS
Definition: mcxt.c:151
@ PRINT_STATS_NONE
Definition: mcxt.c:152
@ PRINT_STATS_TO_STDERR
Definition: mcxt.c:150
void * MemoryContextAllocAligned(MemoryContext context, Size size, Size alignto, int flags)
Definition: mcxt.c:2035
void MemoryContextDeleteChildren(MemoryContext context)
Definition: mcxt.c:570
MemoryContext TopMemoryContext
Definition: mcxt.c:165
char * pchomp(const char *in)
Definition: mcxt.c:2350
#define AssertNotInCriticalSection(context)
Definition: mcxt.c:206
void * palloc(Size size)
Definition: mcxt.c:1940
MemoryContext CurTransactionContext
Definition: mcxt.c:171
MemoryContext CurrentMemoryContext
Definition: mcxt.c:159
static MemoryContext MemoryContextTraverseNext(MemoryContext curr, MemoryContext top)
Definition: mcxt.c:288
MemoryContext GetMemoryChunkContext(void *pointer)
Definition: mcxt.c:738
void * MemoryContextAllocExtended(MemoryContext context, Size size, int flags)
Definition: mcxt.c:1313
void MemoryContextStatsDetail(MemoryContext context, int max_level, int max_children, bool print_to_stderr)
Definition: mcxt.c:860
Size MemoryContextMemAllocated(MemoryContext context, bool recurse)
Definition: mcxt.c:793
char * pnstrdup(const char *in, Size len)
Definition: mcxt.c:2333
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:845
void MemoryContextInit(void)
Definition: mcxt.c:370
static void BogusFree(void *pointer)
Definition: mcxt.c:317
void * palloc_extended(Size size, int flags)
Definition: mcxt.c:1992
MemoryContext PostmasterContext
Definition: mcxt.c:167
void * MemoryContextAllocationFailure(MemoryContext context, Size size, int flags)
Definition: mcxt.c:1222
#define BOGUS_MCTX(id)
Definition: mcxt.c:45
static void end_memorycontext_reporting(void)
Definition: mcxt.c:1702
static const MemoryContextMethods mcxt_methods[]
Definition: mcxt.c:51
static void MemoryContextStatsInternal(MemoryContext context, int level, int max_level, int max_children, MemoryContextCounters *totals, PrintDestination print_location, int *num_contexts)
Definition: mcxt.c:916
void * repalloc_extended(void *pointer, Size size, int flags)
Definition: mcxt.c:2208
MemoryContext MemoryContextGetParent(MemoryContext context)
Definition: mcxt.c:762
void ProcessLogMemoryContextInterrupt(void)
Definition: mcxt.c:1380
MemoryContext ErrorContext
Definition: mcxt.c:166
static MemoryContext BogusGetChunkContext(void *pointer)
Definition: mcxt.c:332
dsa_area * MemoryStatsDsaArea
Definition: mcxt.c:175
MemoryContext CacheMemoryContext
Definition: mcxt.c:168
void MemoryContextSizeFailure(MemoryContext context, Size size, int flags)
Definition: mcxt.c:1243
void * MemoryContextAllocHuge(MemoryContext context, Size size)
Definition: mcxt.c:2266
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:485
void MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
Definition: mcxt.c:725
static List * compute_context_path(MemoryContext c, HTAB *context_id_lookup)
Definition: mcxt.c:1716
static void MemoryContextDeleteOnly(MemoryContext context)
Definition: mcxt.c:527
void MemoryContextResetChildren(MemoryContext context)
Definition: mcxt.c:464
static void MemoryContextStatsPrint(MemoryContext context, void *passthru, const char *stats_string, bool print_to_stderr)
Definition: mcxt.c:1048
static void PublishMemoryContext(MemoryStatsEntry *memcxt_infos, int curr_id, MemoryContext context, List *path, MemoryContextCounters stat, int num_contexts, dsa_area *area, int max_levels)
Definition: mcxt.c:1790
void * repalloc_huge(void *pointer, Size size)
Definition: mcxt.c:2298
void MemoryContextSetIdentifier(MemoryContext context, const char *id)
Definition: mcxt.c:643
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:433
static uint64 GetMemoryChunkHeader(const void *pointer)
Definition: mcxt.c:251
MemoryContext PortalContext
Definition: mcxt.c:174
void * palloc_aligned(Size size, Size alignto, int flags)
Definition: mcxt.c:2137
#define MCXT_METHOD(pointer, method)
Definition: mcxt.c:213
struct MemoryStatsBackendState * memCxtState
Definition: mcxtfuncs.c:37
struct MemoryStatsCtl * memCxtArea
Definition: mcxtfuncs.c:38
#define VALGRIND_DESTROY_MEMPOOL(context)
Definition: memdebug.h:25
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
#define VALGRIND_MEMPOOL_CHANGE(context, optr, nptr, size)
Definition: memdebug.h:31
#define VALGRIND_CREATE_MEMPOOL(context, redzones, zeroed)
Definition: memdebug.h:24
#define VALGRIND_MEMPOOL_ALLOC(context, addr, size)
Definition: memdebug.h:29
#define VALGRIND_MEMPOOL_FREE(context, addr)
Definition: memdebug.h:30
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
#define MemoryContextIsValid(context)
Definition: memnodes.h:145
#define AllocSetContextCreate
Definition: memutils.h:149
#define MEMORY_CONTEXT_REPORT_MAX_PER_BACKEND
Definition: memutils.h:61
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:180
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:52
#define AllocSizeIsValid(size)
Definition: memutils.h:45
#define MEMORY_CONTEXT_IDENT_SHMEM_SIZE
Definition: memutils.h:59
struct MemoryStatsContextId MemoryStatsContextId
#define MAX_MEMORY_CONTEXT_STATS_SIZE
Definition: memutils.h:69
#define MEMORY_CONTEXT_METHODID_MASK
#define PallocAlignedExtraBytes(alignto)
MemoryContextMethodID
@ MCTX_15_RESERVED_WIPEDMEM_ID
@ MCTX_GENERATION_ID
@ MCTX_14_UNUSED_ID
@ MCTX_12_UNUSED_ID
@ MCTX_10_UNUSED_ID
@ MCTX_BUMP_ID
@ MCTX_11_UNUSED_ID
@ MCTX_8_UNUSED_ID
@ MCTX_1_RESERVED_GLIBC_ID
@ MCTX_SLAB_ID
@ MCTX_9_UNUSED_ID
@ MCTX_0_RESERVED_UNUSEDMEM_ID
@ MCTX_ASET_ID
@ MCTX_2_RESERVED_GLIBC_ID
@ MCTX_ALIGNED_REDIRECT_ID
@ MCTX_13_UNUSED_ID
struct MemoryChunk MemoryChunk
#define PointerGetMemoryChunk(p)
static void MemoryChunkSetHdrMask(MemoryChunk *chunk, void *block, Size value, MemoryContextMethodID methodid)
NodeTag
Definition: nodes.h:27
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
struct MemoryContextData * MemoryContext
Definition: palloc.h:36
void * arg
const void size_t len
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define list_make1(x1)
Definition: pg_list.h:212
#define foreach_ptr(type, var, lst)
Definition: pg_list.h:469
#define foreach_int(var, lst)
Definition: pg_list.h:470
size_t strnlen(const char *str, size_t maxlen)
Definition: strnlen.c:26
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition: strlcpy.c:45
uintptr_t Datum
Definition: postgres.h:69
char * c
tree ctl
Definition: radixtree.h:1838
void * SlabAlloc(MemoryContext context, Size size, int flags)
Definition: slab.c:630
void SlabFree(void *pointer)
Definition: slab.c:701
void SlabReset(MemoryContext context)
Definition: slab.c:431
Size SlabGetChunkSpace(void *pointer)
Definition: slab.c:887
bool SlabIsEmpty(MemoryContext context)
Definition: slab.c:912
MemoryContext SlabGetChunkContext(void *pointer)
Definition: slab.c:863
void * SlabRealloc(void *pointer, Size size, int flags)
Definition: slab.c:826
void SlabStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition: slab.c:929
void SlabDelete(MemoryContext context)
Definition: slab.c:485
bool stack_is_too_deep(void)
Definition: stack_depth.c:109
Definition: dynahash.c:220
uint16 tranche
Definition: lwlock.h:43
Definition: pg_list.h:54
struct MemoryContextCallback * next
Definition: palloc.h:51
MemoryContextCallbackFunction func
Definition: palloc.h:49
MemoryContext prevchild
Definition: memnodes.h:129
MemoryContext firstchild
Definition: memnodes.h:128
bool allowInCritSection
Definition: memnodes.h:124
const char * ident
Definition: memnodes.h:132
MemoryContext parent
Definition: memnodes.h:127
MemoryContextCallback * reset_cbs
Definition: memnodes.h:133
const MemoryContextMethods * methods
Definition: memnodes.h:126
MemoryContext nextchild
Definition: memnodes.h:130
const char * name
Definition: memnodes.h:131
void(* delete_context)(MemoryContext context)
Definition: memnodes.h:86
void(* stats)(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition: memnodes.h:102
bool(* is_empty)(MemoryContext context)
Definition: memnodes.h:101
void *(* alloc)(MemoryContext context, Size size, int flags)
Definition: memnodes.h:66
void(* reset)(MemoryContext context)
Definition: memnodes.h:83
TimestampTz stats_timestamp
Definition: memutils.h:382
dsa_pointer memstats_dsa_pointer
Definition: memutils.h:381
dsa_handle memstats_dsa_handle
Definition: memutils.h:366
LWLock lw_lock
Definition: memutils.h:367
int64 totalspace
Definition: memutils.h:351
dsa_pointer ident
Definition: memutils.h:346
dsa_pointer name
Definition: memutils.h:345
int64 freechunks
Definition: memutils.h:354
dsa_pointer path
Definition: memutils.h:347
NodeTag type
Definition: memutils.h:348
Definition: dsa.c:348
const char * name
#define stat
Definition: win32_port.h:274