PostgreSQL Source Code  git master
aset.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * aset.c
4  * Allocation set definitions.
5  *
6  * AllocSet is our standard implementation of the abstract MemoryContext
7  * type.
8  *
9  *
10  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
11  * Portions Copyright (c) 1994, Regents of the University of California
12  *
13  * IDENTIFICATION
14  * src/backend/utils/mmgr/aset.c
15  *
16  * NOTE:
17  * This is a new (Feb. 05, 1999) implementation of the allocation set
18  * routines. AllocSet...() does not use OrderedSet...() any more.
19  * Instead it manages allocations in a block pool by itself, combining
20  * many small allocations in a few bigger blocks. AllocSetFree() normally
21  * doesn't free() memory really. It just add's the free'd area to some
22  * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23  * at once on AllocSetReset(), which happens when the memory context gets
24  * destroyed.
25  * Jan Wieck
26  *
27  * Performance improvement from Tom Lane, 8/99: for extremely large request
28  * sizes, we do want to be able to give the memory back to free() as soon
29  * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30  * freelist entries that might never be usable. This is specially needed
31  * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32  * the previous instances of the block were guaranteed to be wasted until
33  * AllocSetReset() under the old way.
34  *
35  * Further improvement 12/00: as the code stood, request sizes in the
36  * midrange between "small" and "large" were handled very inefficiently,
37  * because any sufficiently large free chunk would be used to satisfy a
38  * request, even if it was much larger than necessary. This led to more
39  * and more wasted space in allocated chunks over time. To fix, get rid
40  * of the midrange behavior: we now handle only "small" power-of-2-size
41  * chunks as chunks. Anything "large" is passed off to malloc(). Change
42  * the number of freelists to change the small/large boundary.
43  *
44  *-------------------------------------------------------------------------
45  */
46 
47 #include "postgres.h"
48 
49 #include "port/pg_bitutils.h"
50 #include "utils/memdebug.h"
51 #include "utils/memutils.h"
52 
53 /* Define this to detail debug alloc information */
54 /* #define HAVE_ALLOCINFO */
55 
56 /*--------------------
57  * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
58  * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
59  *
60  * Note that all chunks in the freelists have power-of-2 sizes. This
61  * improves recyclability: we may waste some space, but the wasted space
62  * should stay pretty constant as requests are made and released.
63  *
64  * A request too large for the last freelist is handled by allocating a
65  * dedicated block from malloc(). The block still has a block header and
66  * chunk header, but when the chunk is freed we'll return the whole block
67  * to malloc(), not put it on our freelists.
68  *
69  * CAUTION: ALLOC_MINBITS must be large enough so that
70  * 1<<ALLOC_MINBITS is at least MAXALIGN,
71  * or we may fail to align the smallest chunks adequately.
72  * 8-byte alignment is enough on all currently known machines.
73  *
74  * With the current parameters, request sizes up to 8K are treated as chunks,
75  * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
76  * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
77  * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
78  * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
79  *--------------------
80  */
81 
82 #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
83 #define ALLOCSET_NUM_FREELISTS 11
84 #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
85 /* Size of largest chunk that we use a fixed size for */
86 #define ALLOC_CHUNK_FRACTION 4
87 /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
88 
89 /*--------------------
90  * The first block allocated for an allocset has size initBlockSize.
91  * Each time we have to allocate another block, we double the block size
92  * (if possible, and without exceeding maxBlockSize), so as to reduce
93  * the bookkeeping load on malloc().
94  *
95  * Blocks allocated to hold oversize chunks do not follow this rule, however;
96  * they are just however big they need to be to hold that single chunk.
97  *
98  * Also, if a minContextSize is specified, the first block has that size,
99  * and then initBlockSize is used for the next one.
100  *--------------------
101  */
102 
103 #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
104 #define ALLOC_CHUNKHDRSZ sizeof(struct AllocChunkData)
105 
106 typedef struct AllocBlockData *AllocBlock; /* forward reference */
107 typedef struct AllocChunkData *AllocChunk;
108 
109 /*
110  * AllocPointer
111  * Aligned pointer which may be a member of an allocation set.
112  */
113 typedef void *AllocPointer;
114 
115 /*
116  * AllocSetContext is our standard implementation of MemoryContext.
117  *
118  * Note: header.isReset means there is nothing for AllocSetReset to do.
119  * This is different from the aset being physically empty (empty blocks list)
120  * because we will still have a keeper block. It's also different from the set
121  * being logically empty, because we don't attempt to detect pfree'ing the
122  * last active chunk.
123  */
124 typedef struct AllocSetContext
125 {
126  MemoryContextData header; /* Standard memory-context fields */
127  /* Info about storage allocated in this context: */
128  AllocBlock blocks; /* head of list of blocks in this set */
129  AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
130  /* Allocation parameters for this context: */
131  Size initBlockSize; /* initial block size */
132  Size maxBlockSize; /* maximum block size */
133  Size nextBlockSize; /* next block size to allocate */
134  Size allocChunkLimit; /* effective chunk size limit */
135  AllocBlock keeper; /* keep this block over resets */
136  /* freelist this context could be put in, or -1 if not a candidate: */
137  int freeListIndex; /* index in context_freelists[], or -1 */
139 
141 
142 /*
143  * AllocBlock
144  * An AllocBlock is the unit of memory that is obtained by aset.c
145  * from malloc(). It contains one or more AllocChunks, which are
146  * the units requested by palloc() and freed by pfree(). AllocChunks
147  * cannot be returned to malloc() individually, instead they are put
148  * on freelists by pfree() and re-used by the next palloc() that has
149  * a matching request size.
150  *
151  * AllocBlockData is the header data for a block --- the usable space
152  * within the block begins at the next alignment boundary.
153  */
154 typedef struct AllocBlockData
155 {
156  AllocSet aset; /* aset that owns this block */
157  AllocBlock prev; /* prev block in aset's blocks list, if any */
158  AllocBlock next; /* next block in aset's blocks list, if any */
159  char *freeptr; /* start of free space in this block */
160  char *endptr; /* end of space in this block */
162 
163 /*
164  * AllocChunk
165  * The prefix of each piece of memory in an AllocBlock
166  *
167  * Note: to meet the memory context APIs, the payload area of the chunk must
168  * be maxaligned, and the "aset" link must be immediately adjacent to the
169  * payload area (cf. GetMemoryChunkContext). We simplify matters for this
170  * module by requiring sizeof(AllocChunkData) to be maxaligned, and then
171  * we can ensure things work by adding any required alignment padding before
172  * the "aset" field. There is a static assertion below that the alignment
173  * is done correctly.
174  */
175 typedef struct AllocChunkData
176 {
177  /* size is always the size of the usable space in the chunk */
179 #ifdef MEMORY_CONTEXT_CHECKING
180  /* when debugging memory usage, also store actual requested size */
181  /* this is zero in a free chunk */
182  Size requested_size;
183 
184 #define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T * 2 + SIZEOF_VOID_P)
185 #else
186 #define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T + SIZEOF_VOID_P)
187 #endif /* MEMORY_CONTEXT_CHECKING */
188 
189  /* ensure proper alignment by adding padding if needed */
190 #if (ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF) != 0
191  char padding[MAXIMUM_ALIGNOF - ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF];
192 #endif
193 
194  /* aset is the owning aset if allocated, or the freelist link if free */
195  void *aset;
196  /* there must not be any padding to reach a MAXALIGN boundary here! */
198 
199 /*
200  * Only the "aset" field should be accessed outside this module.
201  * We keep the rest of an allocated chunk's header marked NOACCESS when using
202  * valgrind. But note that chunk headers that are in a freelist are kept
203  * accessible, for simplicity.
204  */
205 #define ALLOCCHUNK_PRIVATE_LEN offsetof(AllocChunkData, aset)
206 
207 /*
208  * AllocPointerIsValid
209  * True iff pointer is valid allocation pointer.
210  */
211 #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
212 
213 /*
214  * AllocSetIsValid
215  * True iff set is valid allocation set.
216  */
217 #define AllocSetIsValid(set) PointerIsValid(set)
218 
219 #define AllocPointerGetChunk(ptr) \
220  ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
221 #define AllocChunkGetPointer(chk) \
222  ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
223 
224 /*
225  * Rather than repeatedly creating and deleting memory contexts, we keep some
226  * freed contexts in freelists so that we can hand them out again with little
227  * work. Before putting a context in a freelist, we reset it so that it has
228  * only its initial malloc chunk and no others. To be a candidate for a
229  * freelist, a context must have the same minContextSize/initBlockSize as
230  * other contexts in the list; but its maxBlockSize is irrelevant since that
231  * doesn't affect the size of the initial chunk.
232  *
233  * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
234  * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
235  * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
236  *
237  * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
238  * hopes of improving locality of reference. But if there get to be too
239  * many contexts in the list, we'd prefer to drop the most-recently-created
240  * contexts in hopes of keeping the process memory map compact.
241  * We approximate that by simply deleting all existing entries when the list
242  * overflows, on the assumption that queries that allocate a lot of contexts
243  * will probably free them in more or less reverse order of allocation.
244  *
245  * Contexts in a freelist are chained via their nextchild pointers.
246  */
247 #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
248 
249 typedef struct AllocSetFreeList
250 {
251  int num_free; /* current list length */
252  AllocSetContext *first_free; /* list header */
254 
255 /* context_freelists[0] is for default params, [1] for small params */
257 {
258  {
259  0, NULL
260  },
261  {
262  0, NULL
263  }
264 };
265 
266 /*
267  * These functions implement the MemoryContext API for AllocSet contexts.
268  */
269 static void *AllocSetAlloc(MemoryContext context, Size size);
270 static void AllocSetFree(MemoryContext context, void *pointer);
271 static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
272 static void AllocSetReset(MemoryContext context);
273 static void AllocSetDelete(MemoryContext context);
274 static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
275 static bool AllocSetIsEmpty(MemoryContext context);
276 static void AllocSetStats(MemoryContext context,
277  MemoryStatsPrintFunc printfunc, void *passthru,
278  MemoryContextCounters *totals);
279 
280 #ifdef MEMORY_CONTEXT_CHECKING
281 static void AllocSetCheck(MemoryContext context);
282 #endif
283 
284 /*
285  * This is the virtual function table for AllocSet contexts.
286  */
289  AllocSetFree,
296 #ifdef MEMORY_CONTEXT_CHECKING
297  ,AllocSetCheck
298 #endif
299 };
300 
301 /* ----------
302  * Debug macros
303  * ----------
304  */
305 #ifdef HAVE_ALLOCINFO
306 #define AllocFreeInfo(_cxt, _chunk) \
307  fprintf(stderr, "AllocFree: %s: %p, %zu\n", \
308  (_cxt)->header.name, (_chunk), (_chunk)->size)
309 #define AllocAllocInfo(_cxt, _chunk) \
310  fprintf(stderr, "AllocAlloc: %s: %p, %zu\n", \
311  (_cxt)->header.name, (_chunk), (_chunk)->size)
312 #else
313 #define AllocFreeInfo(_cxt, _chunk)
314 #define AllocAllocInfo(_cxt, _chunk)
315 #endif
316 
317 /* ----------
318  * AllocSetFreeIndex -
319  *
320  * Depending on the size of an allocation compute which freechunk
321  * list of the alloc set it belongs to. Caller must have verified
322  * that size <= ALLOC_CHUNK_LIMIT.
323  * ----------
324  */
325 static inline int
327 {
328  int idx;
329 
330  if (size > (1 << ALLOC_MINBITS))
331  {
332  /*----------
333  * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
334  * This is the same as
335  * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
336  * or equivalently
337  * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
338  *
339  * However, rather than just calling that function, we duplicate the
340  * logic here, allowing an additional optimization. It's reasonable
341  * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
342  * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
343  * the last two bytes.
344  *
345  * Yes, this function is enough of a hot-spot to make it worth this
346  * much trouble.
347  *----------
348  */
349 #ifdef HAVE__BUILTIN_CLZ
350  idx = 31 - __builtin_clz((uint32) size - 1) - ALLOC_MINBITS + 1;
351 #else
352  uint32 t,
353  tsize;
354 
355  /* Statically assert that we only have a 16-bit input value. */
357  "ALLOC_CHUNK_LIMIT must be less than 64kB");
358 
359  tsize = size - 1;
360  t = tsize >> 8;
361  idx = t ? pg_leftmost_one_pos[t] + 8 : pg_leftmost_one_pos[tsize];
362  idx -= ALLOC_MINBITS - 1;
363 #endif
364 
366  }
367  else
368  idx = 0;
369 
370  return idx;
371 }
372 
373 
374 /*
375  * Public routines
376  */
377 
378 
379 /*
380  * AllocSetContextCreateInternal
381  * Create a new AllocSet context.
382  *
383  * parent: parent context, or NULL if top-level context
384  * name: name of context (must be statically allocated)
385  * minContextSize: minimum context size
386  * initBlockSize: initial allocation block size
387  * maxBlockSize: maximum allocation block size
388  *
389  * Most callers should abstract the context size parameters using a macro
390  * such as ALLOCSET_DEFAULT_SIZES.
391  *
392  * Note: don't call this directly; go through the wrapper macro
393  * AllocSetContextCreate.
394  */
397  const char *name,
398  Size minContextSize,
401 {
402  int freeListIndex;
403  Size firstBlockSize;
404  AllocSet set;
405  AllocBlock block;
406 
407  /* Assert we padded AllocChunkData properly */
409  "sizeof(AllocChunkData) is not maxaligned");
412  "padding calculation in AllocChunkData is wrong");
413 
414  /*
415  * First, validate allocation parameters. Once these were regular runtime
416  * test and elog's, but in practice Asserts seem sufficient because nobody
417  * varies their parameters at runtime. We somewhat arbitrarily enforce a
418  * minimum 1K block size.
419  */
420  Assert(initBlockSize == MAXALIGN(initBlockSize) &&
421  initBlockSize >= 1024);
422  Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
423  maxBlockSize >= initBlockSize &&
424  AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
425  Assert(minContextSize == 0 ||
426  (minContextSize == MAXALIGN(minContextSize) &&
427  minContextSize >= 1024 &&
428  minContextSize <= maxBlockSize));
429 
430  /*
431  * Check whether the parameters match either available freelist. We do
432  * not need to demand a match of maxBlockSize.
433  */
434  if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
435  initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
436  freeListIndex = 0;
437  else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
438  initBlockSize == ALLOCSET_SMALL_INITSIZE)
439  freeListIndex = 1;
440  else
441  freeListIndex = -1;
442 
443  /*
444  * If a suitable freelist entry exists, just recycle that context.
445  */
446  if (freeListIndex >= 0)
447  {
448  AllocSetFreeList *freelist = &context_freelists[freeListIndex];
449 
450  if (freelist->first_free != NULL)
451  {
452  /* Remove entry from freelist */
453  set = freelist->first_free;
454  freelist->first_free = (AllocSet) set->header.nextchild;
455  freelist->num_free--;
456 
457  /* Update its maxBlockSize; everything else should be OK */
458  set->maxBlockSize = maxBlockSize;
459 
460  /* Reinitialize its header, installing correct name and parent */
464  parent,
465  name);
466 
467  ((MemoryContext) set)->mem_allocated =
468  set->keeper->endptr - ((char *) set);
469 
470  return (MemoryContext) set;
471  }
472  }
473 
474  /* Determine size of initial block */
475  firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
477  if (minContextSize != 0)
478  firstBlockSize = Max(firstBlockSize, minContextSize);
479  else
480  firstBlockSize = Max(firstBlockSize, initBlockSize);
481 
482  /*
483  * Allocate the initial block. Unlike other aset.c blocks, it starts with
484  * the context header and its block header follows that.
485  */
486  set = (AllocSet) malloc(firstBlockSize);
487  if (set == NULL)
488  {
489  if (TopMemoryContext)
491  ereport(ERROR,
492  (errcode(ERRCODE_OUT_OF_MEMORY),
493  errmsg("out of memory"),
494  errdetail("Failed while creating memory context \"%s\".",
495  name)));
496  }
497 
498  /*
499  * Avoid writing code that can fail between here and MemoryContextCreate;
500  * we'd leak the header/initial block if we ereport in this stretch.
501  */
502 
503  /* Fill in the initial block's block header */
504  block = (AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext)));
505  block->aset = set;
506  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
507  block->endptr = ((char *) set) + firstBlockSize;
508  block->prev = NULL;
509  block->next = NULL;
510 
511  /* Mark unallocated space NOACCESS; leave the block header alone. */
512  VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
513 
514  /* Remember block as part of block list */
515  set->blocks = block;
516  /* Mark block as not to be released at reset time */
517  set->keeper = block;
518 
519  /* Finish filling in aset-specific parts of the context header */
520  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
521 
522  set->initBlockSize = initBlockSize;
523  set->maxBlockSize = maxBlockSize;
524  set->nextBlockSize = initBlockSize;
525  set->freeListIndex = freeListIndex;
526 
527  /*
528  * Compute the allocation chunk size limit for this context. It can't be
529  * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
530  * If maxBlockSize is small then requests exceeding the maxBlockSize, or
531  * even a significant fraction of it, should be treated as large chunks
532  * too. For the typical case of maxBlockSize a power of 2, the chunk size
533  * limit will be at most 1/8th maxBlockSize, so that given a stream of
534  * requests that are all the maximum chunk size we will waste at most
535  * 1/8th of the allocated space.
536  *
537  * We have to have allocChunkLimit a power of two, because the requested
538  * and actually-allocated sizes of any chunk must be on the same side of
539  * the limit, else we get confused about whether the chunk is "big".
540  *
541  * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
542  */
544  "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
545 
546  set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
547  while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
548  (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
549  set->allocChunkLimit >>= 1;
550 
551  /* Finally, do the type-independent part of context creation */
554  &AllocSetMethods,
555  parent,
556  name);
557 
558  ((MemoryContext) set)->mem_allocated = firstBlockSize;
559 
560  return (MemoryContext) set;
561 }
562 
563 /*
564  * AllocSetReset
565  * Frees all memory which is allocated in the given set.
566  *
567  * Actually, this routine has some discretion about what to do.
568  * It should mark all allocated chunks freed, but it need not necessarily
569  * give back all the resources the set owns. Our actual implementation is
570  * that we give back all but the "keeper" block (which we must keep, since
571  * it shares a malloc chunk with the context header). In this way, we don't
572  * thrash malloc() when a context is repeatedly reset after small allocations,
573  * which is typical behavior for per-tuple contexts.
574  */
575 static void
577 {
578  AllocSet set = (AllocSet) context;
579  AllocBlock block;
580  Size keepersize PG_USED_FOR_ASSERTS_ONLY
581  = set->keeper->endptr - ((char *) set);
582 
584 
585 #ifdef MEMORY_CONTEXT_CHECKING
586  /* Check for corruption and leaks before freeing */
587  AllocSetCheck(context);
588 #endif
589 
590  /* Clear chunk freelists */
591  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
592 
593  block = set->blocks;
594 
595  /* New blocks list will be just the keeper block */
596  set->blocks = set->keeper;
597 
598  while (block != NULL)
599  {
600  AllocBlock next = block->next;
601 
602  if (block == set->keeper)
603  {
604  /* Reset the block, but don't return it to malloc */
605  char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
606 
607 #ifdef CLOBBER_FREED_MEMORY
608  wipe_mem(datastart, block->freeptr - datastart);
609 #else
610  /* wipe_mem() would have done this */
611  VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
612 #endif
613  block->freeptr = datastart;
614  block->prev = NULL;
615  block->next = NULL;
616  }
617  else
618  {
619  /* Normal case, release the block */
620  context->mem_allocated -= block->endptr - ((char*) block);
621 
622 #ifdef CLOBBER_FREED_MEMORY
623  wipe_mem(block, block->freeptr - ((char *) block));
624 #endif
625  free(block);
626  }
627  block = next;
628  }
629 
630  Assert(context->mem_allocated == keepersize);
631 
632  /* Reset block size allocation sequence, too */
633  set->nextBlockSize = set->initBlockSize;
634 }
635 
636 /*
637  * AllocSetDelete
638  * Frees all memory which is allocated in the given set,
639  * in preparation for deletion of the set.
640  *
641  * Unlike AllocSetReset, this *must* free all resources of the set.
642  */
643 static void
645 {
646  AllocSet set = (AllocSet) context;
647  AllocBlock block = set->blocks;
648  Size keepersize PG_USED_FOR_ASSERTS_ONLY
649  = set->keeper->endptr - ((char *) set);
650 
652 
653 #ifdef MEMORY_CONTEXT_CHECKING
654  /* Check for corruption and leaks before freeing */
655  AllocSetCheck(context);
656 #endif
657 
658  /*
659  * If the context is a candidate for a freelist, put it into that freelist
660  * instead of destroying it.
661  */
662  if (set->freeListIndex >= 0)
663  {
664  AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
665 
666  /*
667  * Reset the context, if it needs it, so that we aren't hanging on to
668  * more than the initial malloc chunk.
669  */
670  if (!context->isReset)
671  MemoryContextResetOnly(context);
672 
673  /*
674  * If the freelist is full, just discard what's already in it. See
675  * comments with context_freelists[].
676  */
677  if (freelist->num_free >= MAX_FREE_CONTEXTS)
678  {
679  while (freelist->first_free != NULL)
680  {
681  AllocSetContext *oldset = freelist->first_free;
682 
683  freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
684  freelist->num_free--;
685 
686  /* All that remains is to free the header/initial block */
687  free(oldset);
688  }
689  Assert(freelist->num_free == 0);
690  }
691 
692  /* Now add the just-deleted context to the freelist. */
693  set->header.nextchild = (MemoryContext) freelist->first_free;
694  freelist->first_free = set;
695  freelist->num_free++;
696 
697  return;
698  }
699 
700  /* Free all blocks, except the keeper which is part of context header */
701  while (block != NULL)
702  {
703  AllocBlock next = block->next;
704 
705  if (block != set->keeper)
706  context->mem_allocated -= block->endptr - ((char *) block);
707 
708 #ifdef CLOBBER_FREED_MEMORY
709  wipe_mem(block, block->freeptr - ((char *) block));
710 #endif
711 
712  if (block != set->keeper)
713  free(block);
714 
715  block = next;
716  }
717 
718  Assert(context->mem_allocated == keepersize);
719 
720  /* Finally, free the context header, including the keeper block */
721  free(set);
722 }
723 
724 /*
725  * AllocSetAlloc
726  * Returns pointer to allocated memory of given size or NULL if
727  * request could not be completed; memory is added to the set.
728  *
729  * No request may exceed:
730  * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
731  * All callers use a much-lower limit.
732  *
733  * Note: when using valgrind, it doesn't matter how the returned allocation
734  * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
735  * return space that is marked NOACCESS - AllocSetRealloc has to beware!
736  */
737 static void *
739 {
740  AllocSet set = (AllocSet) context;
741  AllocBlock block;
742  AllocChunk chunk;
743  int fidx;
744  Size chunk_size;
745  Size blksize;
746 
748 
749  /*
750  * If requested size exceeds maximum for chunks, allocate an entire block
751  * for this request.
752  */
753  if (size > set->allocChunkLimit)
754  {
755  chunk_size = MAXALIGN(size);
756  blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
757  block = (AllocBlock) malloc(blksize);
758  if (block == NULL)
759  return NULL;
760 
761  context->mem_allocated += blksize;
762 
763  block->aset = set;
764  block->freeptr = block->endptr = ((char *) block) + blksize;
765 
766  chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
767  chunk->aset = set;
768  chunk->size = chunk_size;
769 #ifdef MEMORY_CONTEXT_CHECKING
770  chunk->requested_size = size;
771  /* set mark to catch clobber of "unused" space */
772  if (size < chunk_size)
773  set_sentinel(AllocChunkGetPointer(chunk), size);
774 #endif
775 #ifdef RANDOMIZE_ALLOCATED_MEMORY
776  /* fill the allocated space with junk */
777  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
778 #endif
779 
780  /*
781  * Stick the new block underneath the active allocation block, if any,
782  * so that we don't lose the use of the space remaining therein.
783  */
784  if (set->blocks != NULL)
785  {
786  block->prev = set->blocks;
787  block->next = set->blocks->next;
788  if (block->next)
789  block->next->prev = block;
790  set->blocks->next = block;
791  }
792  else
793  {
794  block->prev = NULL;
795  block->next = NULL;
796  set->blocks = block;
797  }
798 
799  AllocAllocInfo(set, chunk);
800 
801  /* Ensure any padding bytes are marked NOACCESS. */
802  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
803  chunk_size - size);
804 
805  /* Disallow external access to private part of chunk header. */
807 
808  return AllocChunkGetPointer(chunk);
809  }
810 
811  /*
812  * Request is small enough to be treated as a chunk. Look in the
813  * corresponding free list to see if there is a free chunk we could reuse.
814  * If one is found, remove it from the free list, make it again a member
815  * of the alloc set and return its data address.
816  */
817  fidx = AllocSetFreeIndex(size);
818  chunk = set->freelist[fidx];
819  if (chunk != NULL)
820  {
821  Assert(chunk->size >= size);
822 
823  set->freelist[fidx] = (AllocChunk) chunk->aset;
824 
825  chunk->aset = (void *) set;
826 
827 #ifdef MEMORY_CONTEXT_CHECKING
828  chunk->requested_size = size;
829  /* set mark to catch clobber of "unused" space */
830  if (size < chunk->size)
831  set_sentinel(AllocChunkGetPointer(chunk), size);
832 #endif
833 #ifdef RANDOMIZE_ALLOCATED_MEMORY
834  /* fill the allocated space with junk */
835  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
836 #endif
837 
838  AllocAllocInfo(set, chunk);
839 
840  /* Ensure any padding bytes are marked NOACCESS. */
841  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
842  chunk->size - size);
843 
844  /* Disallow external access to private part of chunk header. */
846 
847  return AllocChunkGetPointer(chunk);
848  }
849 
850  /*
851  * Choose the actual chunk size to allocate.
852  */
853  chunk_size = (1 << ALLOC_MINBITS) << fidx;
854  Assert(chunk_size >= size);
855 
856  /*
857  * If there is enough room in the active allocation block, we will put the
858  * chunk into that block. Else must start a new one.
859  */
860  if ((block = set->blocks) != NULL)
861  {
862  Size availspace = block->endptr - block->freeptr;
863 
864  if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
865  {
866  /*
867  * The existing active (top) block does not have enough room for
868  * the requested allocation, but it might still have a useful
869  * amount of space in it. Once we push it down in the block list,
870  * we'll never try to allocate more space from it. So, before we
871  * do that, carve up its free space into chunks that we can put on
872  * the set's freelists.
873  *
874  * Because we can only get here when there's less than
875  * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
876  * more than ALLOCSET_NUM_FREELISTS-1 times.
877  */
878  while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
879  {
880  Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
881  int a_fidx = AllocSetFreeIndex(availchunk);
882 
883  /*
884  * In most cases, we'll get back the index of the next larger
885  * freelist than the one we need to put this chunk on. The
886  * exception is when availchunk is exactly a power of 2.
887  */
888  if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
889  {
890  a_fidx--;
891  Assert(a_fidx >= 0);
892  availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
893  }
894 
895  chunk = (AllocChunk) (block->freeptr);
896 
897  /* Prepare to initialize the chunk header. */
898  VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
899 
900  block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
901  availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
902 
903  chunk->size = availchunk;
904 #ifdef MEMORY_CONTEXT_CHECKING
905  chunk->requested_size = 0; /* mark it free */
906 #endif
907  chunk->aset = (void *) set->freelist[a_fidx];
908  set->freelist[a_fidx] = chunk;
909  }
910 
911  /* Mark that we need to create a new block */
912  block = NULL;
913  }
914  }
915 
916  /*
917  * Time to create a new regular (multi-chunk) block?
918  */
919  if (block == NULL)
920  {
921  Size required_size;
922 
923  /*
924  * The first such block has size initBlockSize, and we double the
925  * space in each succeeding block, but not more than maxBlockSize.
926  */
927  blksize = set->nextBlockSize;
928  set->nextBlockSize <<= 1;
929  if (set->nextBlockSize > set->maxBlockSize)
930  set->nextBlockSize = set->maxBlockSize;
931 
932  /*
933  * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
934  * space... but try to keep it a power of 2.
935  */
936  required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
937  while (blksize < required_size)
938  blksize <<= 1;
939 
940  /* Try to allocate it */
941  block = (AllocBlock) malloc(blksize);
942 
943  /*
944  * We could be asking for pretty big blocks here, so cope if malloc
945  * fails. But give up if there's less than 1 MB or so available...
946  */
947  while (block == NULL && blksize > 1024 * 1024)
948  {
949  blksize >>= 1;
950  if (blksize < required_size)
951  break;
952  block = (AllocBlock) malloc(blksize);
953  }
954 
955  if (block == NULL)
956  return NULL;
957 
958  context->mem_allocated += blksize;
959 
960  block->aset = set;
961  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
962  block->endptr = ((char *) block) + blksize;
963 
964  /* Mark unallocated space NOACCESS. */
966  blksize - ALLOC_BLOCKHDRSZ);
967 
968  block->prev = NULL;
969  block->next = set->blocks;
970  if (block->next)
971  block->next->prev = block;
972  set->blocks = block;
973  }
974 
975  /*
976  * OK, do the allocation
977  */
978  chunk = (AllocChunk) (block->freeptr);
979 
980  /* Prepare to initialize the chunk header. */
982 
983  block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
984  Assert(block->freeptr <= block->endptr);
985 
986  chunk->aset = (void *) set;
987  chunk->size = chunk_size;
988 #ifdef MEMORY_CONTEXT_CHECKING
989  chunk->requested_size = size;
990  /* set mark to catch clobber of "unused" space */
991  if (size < chunk->size)
992  set_sentinel(AllocChunkGetPointer(chunk), size);
993 #endif
994 #ifdef RANDOMIZE_ALLOCATED_MEMORY
995  /* fill the allocated space with junk */
996  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
997 #endif
998 
999  AllocAllocInfo(set, chunk);
1000 
1001  /* Ensure any padding bytes are marked NOACCESS. */
1002  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
1003  chunk_size - size);
1004 
1005  /* Disallow external access to private part of chunk header. */
1007 
1008  return AllocChunkGetPointer(chunk);
1009 }
1010 
1011 /*
1012  * AllocSetFree
1013  * Frees allocated memory; memory is removed from the set.
1014  */
1015 static void
1016 AllocSetFree(MemoryContext context, void *pointer)
1017 {
1018  AllocSet set = (AllocSet) context;
1019  AllocChunk chunk = AllocPointerGetChunk(pointer);
1020 
1021  /* Allow access to private part of chunk header. */
1023 
1024  AllocFreeInfo(set, chunk);
1025 
1026 #ifdef MEMORY_CONTEXT_CHECKING
1027  /* Test for someone scribbling on unused space in chunk */
1028  if (chunk->requested_size < chunk->size)
1029  if (!sentinel_ok(pointer, chunk->requested_size))
1030  elog(WARNING, "detected write past chunk end in %s %p",
1031  set->header.name, chunk);
1032 #endif
1033 
1034  if (chunk->size > set->allocChunkLimit)
1035  {
1036  /*
1037  * Big chunks are certain to have been allocated as single-chunk
1038  * blocks. Just unlink that block and return it to malloc().
1039  */
1040  AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
1041 
1042  /*
1043  * Try to verify that we have a sane block pointer: it should
1044  * reference the correct aset, and freeptr and endptr should point
1045  * just past the chunk.
1046  */
1047  if (block->aset != set ||
1048  block->freeptr != block->endptr ||
1049  block->freeptr != ((char *) block) +
1050  (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1051  elog(ERROR, "could not find block containing chunk %p", chunk);
1052 
1053  /* OK, remove block from aset's list and free it */
1054  if (block->prev)
1055  block->prev->next = block->next;
1056  else
1057  set->blocks = block->next;
1058  if (block->next)
1059  block->next->prev = block->prev;
1060 
1061  context->mem_allocated -= block->endptr - ((char*) block);
1062 
1063 #ifdef CLOBBER_FREED_MEMORY
1064  wipe_mem(block, block->freeptr - ((char *) block));
1065 #endif
1066  free(block);
1067  }
1068  else
1069  {
1070  /* Normal case, put the chunk into appropriate freelist */
1071  int fidx = AllocSetFreeIndex(chunk->size);
1072 
1073  chunk->aset = (void *) set->freelist[fidx];
1074 
1075 #ifdef CLOBBER_FREED_MEMORY
1076  wipe_mem(pointer, chunk->size);
1077 #endif
1078 
1079 #ifdef MEMORY_CONTEXT_CHECKING
1080  /* Reset requested_size to 0 in chunks that are on freelist */
1081  chunk->requested_size = 0;
1082 #endif
1083  set->freelist[fidx] = chunk;
1084  }
1085 }
1086 
1087 /*
1088  * AllocSetRealloc
1089  * Returns new pointer to allocated memory of given size or NULL if
1090  * request could not be completed; this memory is added to the set.
1091  * Memory associated with given pointer is copied into the new memory,
1092  * and the old memory is freed.
1093  *
1094  * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1095  * makes our Valgrind client requests less-precise, hazarding false negatives.
1096  * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1097  * request size.)
1098  */
1099 static void *
1100 AllocSetRealloc(MemoryContext context, void *pointer, Size size)
1101 {
1102  AllocSet set = (AllocSet) context;
1103  AllocChunk chunk = AllocPointerGetChunk(pointer);
1104  Size oldsize;
1105 
1106  /* Allow access to private part of chunk header. */
1108 
1109  oldsize = chunk->size;
1110 
1111 #ifdef MEMORY_CONTEXT_CHECKING
1112  /* Test for someone scribbling on unused space in chunk */
1113  if (chunk->requested_size < oldsize)
1114  if (!sentinel_ok(pointer, chunk->requested_size))
1115  elog(WARNING, "detected write past chunk end in %s %p",
1116  set->header.name, chunk);
1117 #endif
1118 
1119  if (oldsize > set->allocChunkLimit)
1120  {
1121  /*
1122  * The chunk must have been allocated as a single-chunk block. Use
1123  * realloc() to make the containing block bigger, or smaller, with
1124  * minimum space wastage.
1125  */
1126  AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
1127  Size chksize;
1128  Size blksize;
1129  Size oldblksize;
1130 
1131  /*
1132  * Try to verify that we have a sane block pointer: it should
1133  * reference the correct aset, and freeptr and endptr should point
1134  * just past the chunk.
1135  */
1136  if (block->aset != set ||
1137  block->freeptr != block->endptr ||
1138  block->freeptr != ((char *) block) +
1139  (oldsize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1140  elog(ERROR, "could not find block containing chunk %p", chunk);
1141 
1142  /*
1143  * Even if the new request is less than set->allocChunkLimit, we stick
1144  * with the single-chunk block approach. Therefore we need
1145  * chunk->size to be bigger than set->allocChunkLimit, so we don't get
1146  * confused about the chunk's status in future calls.
1147  */
1148  chksize = Max(size, set->allocChunkLimit + 1);
1149  chksize = MAXALIGN(chksize);
1150 
1151  /* Do the realloc */
1152  blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1153  oldblksize = block->endptr - ((char *)block);
1154 
1155  block = (AllocBlock) realloc(block, blksize);
1156  if (block == NULL)
1157  {
1158  /* Disallow external access to private part of chunk header. */
1160  return NULL;
1161  }
1162 
1163  /* updated separately, not to underflow when (oldblksize > blksize) */
1164  context->mem_allocated -= oldblksize;
1165  context->mem_allocated += blksize;
1166 
1167  block->freeptr = block->endptr = ((char *) block) + blksize;
1168 
1169  /* Update pointers since block has likely been moved */
1170  chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
1171  pointer = AllocChunkGetPointer(chunk);
1172  if (block->prev)
1173  block->prev->next = block;
1174  else
1175  set->blocks = block;
1176  if (block->next)
1177  block->next->prev = block;
1178  chunk->size = chksize;
1179 
1180 #ifdef MEMORY_CONTEXT_CHECKING
1181 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1182  /* We can only fill the extra space if we know the prior request */
1183  if (size > chunk->requested_size)
1184  randomize_mem((char *) pointer + chunk->requested_size,
1185  size - chunk->requested_size);
1186 #endif
1187 
1188  /*
1189  * realloc() (or randomize_mem()) will have left any newly-allocated
1190  * part UNDEFINED, but we may need to adjust trailing bytes from the
1191  * old allocation.
1192  */
1193 #ifdef USE_VALGRIND
1194  if (oldsize > chunk->requested_size)
1195  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1196  oldsize - chunk->requested_size);
1197 #endif
1198 
1199  chunk->requested_size = size;
1200 
1201  /* set mark to catch clobber of "unused" space */
1202  if (size < chunk->size)
1203  set_sentinel(pointer, size);
1204 #else /* !MEMORY_CONTEXT_CHECKING */
1205 
1206  /*
1207  * We don't know how much of the old chunk size was the actual
1208  * allocation; it could have been as small as one byte. We have to be
1209  * conservative and just mark the entire old portion DEFINED.
1210  */
1211  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1212 #endif
1213 
1214  /* Ensure any padding bytes are marked NOACCESS. */
1215  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1216 
1217  /* Disallow external access to private part of chunk header. */
1219 
1220  return pointer;
1221  }
1222 
1223  /*
1224  * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1225  * allocated area already is >= the new size. (In particular, we will
1226  * fall out here if the requested size is a decrease.)
1227  */
1228  else if (oldsize >= size)
1229  {
1230 #ifdef MEMORY_CONTEXT_CHECKING
1231  Size oldrequest = chunk->requested_size;
1232 
1233 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1234  /* We can only fill the extra space if we know the prior request */
1235  if (size > oldrequest)
1236  randomize_mem((char *) pointer + oldrequest,
1237  size - oldrequest);
1238 #endif
1239 
1240  chunk->requested_size = size;
1241 
1242  /*
1243  * If this is an increase, mark any newly-available part UNDEFINED.
1244  * Otherwise, mark the obsolete part NOACCESS.
1245  */
1246  if (size > oldrequest)
1247  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1248  size - oldrequest);
1249  else
1250  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1251  oldsize - size);
1252 
1253  /* set mark to catch clobber of "unused" space */
1254  if (size < oldsize)
1255  set_sentinel(pointer, size);
1256 #else /* !MEMORY_CONTEXT_CHECKING */
1257 
1258  /*
1259  * We don't have the information to determine whether we're growing
1260  * the old request or shrinking it, so we conservatively mark the
1261  * entire new allocation DEFINED.
1262  */
1263  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
1264  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1265 #endif
1266 
1267  /* Disallow external access to private part of chunk header. */
1269 
1270  return pointer;
1271  }
1272  else
1273  {
1274  /*
1275  * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1276  * allocate a new chunk and copy the data. Since we know the existing
1277  * data isn't huge, this won't involve any great memcpy expense, so
1278  * it's not worth being smarter. (At one time we tried to avoid
1279  * memcpy when it was possible to enlarge the chunk in-place, but that
1280  * turns out to misbehave unpleasantly for repeated cycles of
1281  * palloc/repalloc/pfree: the eventually freed chunks go into the
1282  * wrong freelist for the next initial palloc request, and so we leak
1283  * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1284  */
1285  AllocPointer newPointer;
1286 
1287  /* allocate new chunk */
1288  newPointer = AllocSetAlloc((MemoryContext) set, size);
1289 
1290  /* leave immediately if request was not completed */
1291  if (newPointer == NULL)
1292  {
1293  /* Disallow external access to private part of chunk header. */
1295  return NULL;
1296  }
1297 
1298  /*
1299  * AllocSetAlloc() may have returned a region that is still NOACCESS.
1300  * Change it to UNDEFINED for the moment; memcpy() will then transfer
1301  * definedness from the old allocation to the new. If we know the old
1302  * allocation, copy just that much. Otherwise, make the entire old
1303  * chunk defined to avoid errors as we copy the currently-NOACCESS
1304  * trailing bytes.
1305  */
1306  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1307 #ifdef MEMORY_CONTEXT_CHECKING
1308  oldsize = chunk->requested_size;
1309 #else
1310  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1311 #endif
1312 
1313  /* transfer existing data (certain to fit) */
1314  memcpy(newPointer, pointer, oldsize);
1315 
1316  /* free old chunk */
1317  AllocSetFree((MemoryContext) set, pointer);
1318 
1319  return newPointer;
1320  }
1321 }
1322 
1323 /*
1324  * AllocSetGetChunkSpace
1325  * Given a currently-allocated chunk, determine the total space
1326  * it occupies (including all memory-allocation overhead).
1327  */
1328 static Size
1329 AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1330 {
1331  AllocChunk chunk = AllocPointerGetChunk(pointer);
1332  Size result;
1333 
1335  result = chunk->size + ALLOC_CHUNKHDRSZ;
1337  return result;
1338 }
1339 
1340 /*
1341  * AllocSetIsEmpty
1342  * Is an allocset empty of any allocated space?
1343  */
1344 static bool
1346 {
1347  /*
1348  * For now, we say "empty" only if the context is new or just reset. We
1349  * could examine the freelists to determine if all space has been freed,
1350  * but it's not really worth the trouble for present uses of this
1351  * functionality.
1352  */
1353  if (context->isReset)
1354  return true;
1355  return false;
1356 }
1357 
1358 /*
1359  * AllocSetStats
1360  * Compute stats about memory consumption of an allocset.
1361  *
1362  * printfunc: if not NULL, pass a human-readable stats string to this.
1363  * passthru: pass this pointer through to printfunc.
1364  * totals: if not NULL, add stats about this context into *totals.
1365  */
1366 static void
1368  MemoryStatsPrintFunc printfunc, void *passthru,
1369  MemoryContextCounters *totals)
1370 {
1371  AllocSet set = (AllocSet) context;
1372  Size nblocks = 0;
1373  Size freechunks = 0;
1374  Size totalspace;
1375  Size freespace = 0;
1376  AllocBlock block;
1377  int fidx;
1378 
1379  /* Include context header in totalspace */
1380  totalspace = MAXALIGN(sizeof(AllocSetContext));
1381 
1382  for (block = set->blocks; block != NULL; block = block->next)
1383  {
1384  nblocks++;
1385  totalspace += block->endptr - ((char *) block);
1386  freespace += block->endptr - block->freeptr;
1387  }
1388  for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1389  {
1390  AllocChunk chunk;
1391 
1392  for (chunk = set->freelist[fidx]; chunk != NULL;
1393  chunk = (AllocChunk) chunk->aset)
1394  {
1395  freechunks++;
1396  freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1397  }
1398  }
1399 
1400  if (printfunc)
1401  {
1402  char stats_string[200];
1403 
1404  snprintf(stats_string, sizeof(stats_string),
1405  "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
1406  totalspace, nblocks, freespace, freechunks,
1407  totalspace - freespace);
1408  printfunc(context, passthru, stats_string);
1409  }
1410 
1411  if (totals)
1412  {
1413  totals->nblocks += nblocks;
1414  totals->freechunks += freechunks;
1415  totals->totalspace += totalspace;
1416  totals->freespace += freespace;
1417  }
1418 }
1419 
1420 
1421 #ifdef MEMORY_CONTEXT_CHECKING
1422 
1423 /*
1424  * AllocSetCheck
1425  * Walk through chunks and check consistency of memory.
1426  *
1427  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1428  * find yourself in an infinite loop when trouble occurs, because this
1429  * routine will be entered again when elog cleanup tries to release memory!
1430  */
1431 static void
1432 AllocSetCheck(MemoryContext context)
1433 {
1434  AllocSet set = (AllocSet) context;
1435  const char *name = set->header.name;
1436  AllocBlock prevblock;
1437  AllocBlock block;
1438  Size total_allocated = 0;
1439 
1440  for (prevblock = NULL, block = set->blocks;
1441  block != NULL;
1442  prevblock = block, block = block->next)
1443  {
1444  char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1445  long blk_used = block->freeptr - bpoz;
1446  long blk_data = 0;
1447  long nchunks = 0;
1448 
1449  if (set->keeper == block)
1450  total_allocated += block->endptr - ((char *) set);
1451  else
1452  total_allocated += block->endptr - ((char *) block);
1453 
1454  /*
1455  * Empty block - empty can be keeper-block only
1456  */
1457  if (!blk_used)
1458  {
1459  if (set->keeper != block)
1460  elog(WARNING, "problem in alloc set %s: empty block %p",
1461  name, block);
1462  }
1463 
1464  /*
1465  * Check block header fields
1466  */
1467  if (block->aset != set ||
1468  block->prev != prevblock ||
1469  block->freeptr < bpoz ||
1470  block->freeptr > block->endptr)
1471  elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1472  name, block);
1473 
1474  /*
1475  * Chunk walker
1476  */
1477  while (bpoz < block->freeptr)
1478  {
1479  AllocChunk chunk = (AllocChunk) bpoz;
1480  Size chsize,
1481  dsize;
1482 
1483  /* Allow access to private part of chunk header. */
1485 
1486  chsize = chunk->size; /* aligned chunk size */
1487  dsize = chunk->requested_size; /* real data */
1488 
1489  /*
1490  * Check chunk size
1491  */
1492  if (dsize > chsize)
1493  elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1494  name, chunk, block);
1495  if (chsize < (1 << ALLOC_MINBITS))
1496  elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1497  name, chsize, chunk, block);
1498 
1499  /* single-chunk block? */
1500  if (chsize > set->allocChunkLimit &&
1501  chsize + ALLOC_CHUNKHDRSZ != blk_used)
1502  elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1503  name, chunk, block);
1504 
1505  /*
1506  * If chunk is allocated, check for correct aset pointer. (If it's
1507  * free, the aset is the freelist pointer, which we can't check as
1508  * easily...) Note this is an incomplete test, since palloc(0)
1509  * produces an allocated chunk with requested_size == 0.
1510  */
1511  if (dsize > 0 && chunk->aset != (void *) set)
1512  elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1513  name, block, chunk);
1514 
1515  /*
1516  * Check for overwrite of padding space in an allocated chunk.
1517  */
1518  if (chunk->aset == (void *) set && dsize < chsize &&
1519  !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1520  elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1521  name, block, chunk);
1522 
1523  /*
1524  * If chunk is allocated, disallow external access to private part
1525  * of chunk header.
1526  */
1527  if (chunk->aset == (void *) set)
1529 
1530  blk_data += chsize;
1531  nchunks++;
1532 
1533  bpoz += ALLOC_CHUNKHDRSZ + chsize;
1534  }
1535 
1536  if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1537  elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1538  name, block);
1539  }
1540 
1541  Assert(total_allocated == context->mem_allocated);
1542 }
1543 
1544 #endif /* MEMORY_CONTEXT_CHECKING */
#define MemSetAligned(start, val, len)
Definition: c.h:995
Size initBlockSize
Definition: aset.c:131
#define AllocFreeInfo(_cxt, _chunk)
Definition: aset.c:313
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
AllocBlock blocks
Definition: aset.c:128
static int32 next
Definition: blutils.c:217
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string)
Definition: memnodes.h:54
struct AllocBlockData AllocBlockData
static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer)
Definition: aset.c:1329
MemoryContextData header
Definition: aset.c:126
void * AllocPointer
Definition: aset.c:113
#define AllocSetIsValid(set)
Definition: aset.c:217
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
struct AllocSetFreeList AllocSetFreeList
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
static int AllocSetFreeIndex(Size size)
Definition: aset.c:326
static void AllocSetReset(MemoryContext context)
Definition: aset.c:576
int num_free
Definition: aset.c:251
#define AllocChunkGetPointer(chk)
Definition: aset.c:221
#define ALLOCCHUNK_PRIVATE_LEN
Definition: aset.c:205
int errcode(int sqlerrcode)
Definition: elog.c:608
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:263
#define ALLOC_BLOCKHDRSZ
Definition: aset.c:103
AllocBlock keeper
Definition: aset.c:135
AllocSet aset
Definition: aset.c:156
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:155
char * freeptr
Definition: aset.c:159
#define ALLOCSET_DEFAULT_MINSIZE
Definition: memutils.h:189
#define ALLOCSET_SMALL_MINSIZE
Definition: memutils.h:199
AllocSetContext * first_free
Definition: aset.c:252
#define malloc(a)
Definition: header.h:50
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:849
static AllocSetFreeList context_freelists[2]
Definition: aset.c:256
#define ERROR
Definition: elog.h:43
#define ALLOC_CHUNK_LIMIT
Definition: aset.c:84
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:498
PGDLLIMPORT const uint8 pg_leftmost_one_pos[256]
Definition: pg_bitutils.c:34
char * endptr
Definition: aset.c:160
#define ALLOCSET_NUM_FREELISTS
Definition: aset.c:83
static void * AllocSetRealloc(MemoryContext context, void *pointer, Size size)
Definition: aset.c:1100
static void AllocSetStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals)
Definition: aset.c:1367
#define ALLOCSET_SEPARATE_THRESHOLD
Definition: memutils.h:219
int errdetail(const char *fmt,...)
Definition: elog.c:955
#define ALLOC_CHUNKHDRSZ
Definition: aset.c:104
unsigned int uint32
Definition: c.h:359
AllocBlock next
Definition: aset.c:158
void MemoryContextCreate(MemoryContext node, NodeTag tag, const MemoryContextMethods *methods, MemoryContext parent, const char *name)
Definition: mcxt.c:748
#define ereport(elevel, rest)
Definition: elog.h:141
#define AssertArg(condition)
Definition: c.h:741
MemoryContext TopMemoryContext
Definition: mcxt.c:44
struct MemoryContextData * MemoryContext
Definition: palloc.h:36
MemoryContext AllocSetContextCreateInternal(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:396
static bool AllocSetIsEmpty(MemoryContext context)
Definition: aset.c:1345
#define WARNING
Definition: elog.h:40
#define ALLOCSET_SMALL_INITSIZE
Definition: memutils.h:200
int freeListIndex
Definition: aset.c:137
#define ALLOCCHUNK_RAWSIZE
Definition: aset.c:186
#define ALLOC_CHUNK_FRACTION
Definition: aset.c:86
struct AllocBlockData * AllocBlock
Definition: aset.c:106
#define MAX_FREE_CONTEXTS
Definition: aset.c:247
AllocChunk freelist[ALLOCSET_NUM_FREELISTS]
Definition: aset.c:129
Size nextBlockSize
Definition: aset.c:133
AllocBlock prev
Definition: aset.c:157
#define free(a)
Definition: header.h:65
Size allocChunkLimit
Definition: aset.c:134
#define Max(x, y)
Definition: c.h:905
struct AllocChunkData * AllocChunk
Definition: aset.c:107
#define Assert(condition)
Definition: c.h:739
static void AllocSetFree(MemoryContext context, void *pointer)
Definition: aset.c:1016
size_t Size
Definition: c.h:467
#define MAXALIGN(LEN)
Definition: c.h:692
struct AllocChunkData AllocChunkData
static void AllocSetDelete(MemoryContext context)
Definition: aset.c:644
#define realloc(a, b)
Definition: header.h:60
const char * name
Definition: encode.c:521
Size mem_allocated
Definition: memnodes.h:82
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:46
void * aset
Definition: aset.c:195
int errmsg(const char *fmt,...)
Definition: elog.c:822
#define elog(elevel,...)
Definition: elog.h:228
#define ALLOCSET_DEFAULT_INITSIZE
Definition: memutils.h:190
static void * AllocSetAlloc(MemoryContext context, Size size)
Definition: aset.c:738
static const MemoryContextMethods AllocSetMethods
Definition: aset.c:287
AllocSetContext * AllocSet
Definition: aset.c:140
struct AllocSetContext AllocSetContext
#define ALLOC_MINBITS
Definition: aset.c:82
Size maxBlockSize
Definition: aset.c:132
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:123
#define snprintf
Definition: port.h:192
#define AllocAllocInfo(_cxt, _chunk)
Definition: aset.c:314
#define offsetof(type, field)
Definition: c.h:662
MemoryContext nextchild
Definition: memnodes.h:87
Size size
Definition: aset.c:178
#define AllocPointerGetChunk(ptr)
Definition: aset.c:219