PostgreSQL Source Code  git master
aset.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * aset.c
4  * Allocation set definitions.
5  *
6  * AllocSet is our standard implementation of the abstract MemoryContext
7  * type.
8  *
9  *
10  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
11  * Portions Copyright (c) 1994, Regents of the University of California
12  *
13  * IDENTIFICATION
14  * src/backend/utils/mmgr/aset.c
15  *
16  * NOTE:
17  * This is a new (Feb. 05, 1999) implementation of the allocation set
18  * routines. AllocSet...() does not use OrderedSet...() any more.
19  * Instead it manages allocations in a block pool by itself, combining
20  * many small allocations in a few bigger blocks. AllocSetFree() normally
21  * doesn't free() memory really. It just add's the free'd area to some
22  * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23  * at once on AllocSetReset(), which happens when the memory context gets
24  * destroyed.
25  * Jan Wieck
26  *
27  * Performance improvement from Tom Lane, 8/99: for extremely large request
28  * sizes, we do want to be able to give the memory back to free() as soon
29  * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30  * freelist entries that might never be usable. This is specially needed
31  * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32  * the previous instances of the block were guaranteed to be wasted until
33  * AllocSetReset() under the old way.
34  *
35  * Further improvement 12/00: as the code stood, request sizes in the
36  * midrange between "small" and "large" were handled very inefficiently,
37  * because any sufficiently large free chunk would be used to satisfy a
38  * request, even if it was much larger than necessary. This led to more
39  * and more wasted space in allocated chunks over time. To fix, get rid
40  * of the midrange behavior: we now handle only "small" power-of-2-size
41  * chunks as chunks. Anything "large" is passed off to malloc(). Change
42  * the number of freelists to change the small/large boundary.
43  *
44  *-------------------------------------------------------------------------
45  */
46 
47 #include "postgres.h"
48 
49 #include "utils/memdebug.h"
50 #include "utils/memutils.h"
51 
52 /* Define this to detail debug alloc information */
53 /* #define HAVE_ALLOCINFO */
54 
55 /*--------------------
56  * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57  * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58  *
59  * Note that all chunks in the freelists have power-of-2 sizes. This
60  * improves recyclability: we may waste some space, but the wasted space
61  * should stay pretty constant as requests are made and released.
62  *
63  * A request too large for the last freelist is handled by allocating a
64  * dedicated block from malloc(). The block still has a block header and
65  * chunk header, but when the chunk is freed we'll return the whole block
66  * to malloc(), not put it on our freelists.
67  *
68  * CAUTION: ALLOC_MINBITS must be large enough so that
69  * 1<<ALLOC_MINBITS is at least MAXALIGN,
70  * or we may fail to align the smallest chunks adequately.
71  * 8-byte alignment is enough on all currently known machines.
72  *
73  * With the current parameters, request sizes up to 8K are treated as chunks,
74  * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
75  * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
76  * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
77  * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
78  *--------------------
79  */
80 
81 #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
82 #define ALLOCSET_NUM_FREELISTS 11
83 #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
84 /* Size of largest chunk that we use a fixed size for */
85 #define ALLOC_CHUNK_FRACTION 4
86 /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
87 
88 /*--------------------
89  * The first block allocated for an allocset has size initBlockSize.
90  * Each time we have to allocate another block, we double the block size
91  * (if possible, and without exceeding maxBlockSize), so as to reduce
92  * the bookkeeping load on malloc().
93  *
94  * Blocks allocated to hold oversize chunks do not follow this rule, however;
95  * they are just however big they need to be to hold that single chunk.
96  *
97  * Also, if a minContextSize is specified, the first block has that size,
98  * and then initBlockSize is used for the next one.
99  *--------------------
100  */
101 
102 #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
103 #define ALLOC_CHUNKHDRSZ sizeof(struct AllocChunkData)
104 
105 typedef struct AllocBlockData *AllocBlock; /* forward reference */
106 typedef struct AllocChunkData *AllocChunk;
107 
108 /*
109  * AllocPointer
110  * Aligned pointer which may be a member of an allocation set.
111  */
112 typedef void *AllocPointer;
113 
114 /*
115  * AllocSetContext is our standard implementation of MemoryContext.
116  *
117  * Note: header.isReset means there is nothing for AllocSetReset to do.
118  * This is different from the aset being physically empty (empty blocks list)
119  * because we will still have a keeper block. It's also different from the set
120  * being logically empty, because we don't attempt to detect pfree'ing the
121  * last active chunk.
122  */
123 typedef struct AllocSetContext
124 {
125  MemoryContextData header; /* Standard memory-context fields */
126  /* Info about storage allocated in this context: */
127  AllocBlock blocks; /* head of list of blocks in this set */
128  AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
129  /* Allocation parameters for this context: */
130  Size initBlockSize; /* initial block size */
131  Size maxBlockSize; /* maximum block size */
132  Size nextBlockSize; /* next block size to allocate */
133  Size allocChunkLimit; /* effective chunk size limit */
134  AllocBlock keeper; /* keep this block over resets */
135  /* freelist this context could be put in, or -1 if not a candidate: */
136  int freeListIndex; /* index in context_freelists[], or -1 */
138 
140 
141 /*
142  * AllocBlock
143  * An AllocBlock is the unit of memory that is obtained by aset.c
144  * from malloc(). It contains one or more AllocChunks, which are
145  * the units requested by palloc() and freed by pfree(). AllocChunks
146  * cannot be returned to malloc() individually, instead they are put
147  * on freelists by pfree() and re-used by the next palloc() that has
148  * a matching request size.
149  *
150  * AllocBlockData is the header data for a block --- the usable space
151  * within the block begins at the next alignment boundary.
152  */
153 typedef struct AllocBlockData
154 {
155  AllocSet aset; /* aset that owns this block */
156  AllocBlock prev; /* prev block in aset's blocks list, if any */
157  AllocBlock next; /* next block in aset's blocks list, if any */
158  char *freeptr; /* start of free space in this block */
159  char *endptr; /* end of space in this block */
161 
162 /*
163  * AllocChunk
164  * The prefix of each piece of memory in an AllocBlock
165  *
166  * Note: to meet the memory context APIs, the payload area of the chunk must
167  * be maxaligned, and the "aset" link must be immediately adjacent to the
168  * payload area (cf. GetMemoryChunkContext). We simplify matters for this
169  * module by requiring sizeof(AllocChunkData) to be maxaligned, and then
170  * we can ensure things work by adding any required alignment padding before
171  * the "aset" field. There is a static assertion below that the alignment
172  * is done correctly.
173  */
174 typedef struct AllocChunkData
175 {
176  /* size is always the size of the usable space in the chunk */
178 #ifdef MEMORY_CONTEXT_CHECKING
179  /* when debugging memory usage, also store actual requested size */
180  /* this is zero in a free chunk */
181  Size requested_size;
182 
183 #define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T * 2 + SIZEOF_VOID_P)
184 #else
185 #define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T + SIZEOF_VOID_P)
186 #endif /* MEMORY_CONTEXT_CHECKING */
187 
188  /* ensure proper alignment by adding padding if needed */
189 #if (ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF) != 0
190  char padding[MAXIMUM_ALIGNOF - ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF];
191 #endif
192 
193  /* aset is the owning aset if allocated, or the freelist link if free */
194  void *aset;
195  /* there must not be any padding to reach a MAXALIGN boundary here! */
197 
198 /*
199  * Only the "aset" field should be accessed outside this module.
200  * We keep the rest of an allocated chunk's header marked NOACCESS when using
201  * valgrind. But note that chunk headers that are in a freelist are kept
202  * accessible, for simplicity.
203  */
204 #define ALLOCCHUNK_PRIVATE_LEN offsetof(AllocChunkData, aset)
205 
206 /*
207  * AllocPointerIsValid
208  * True iff pointer is valid allocation pointer.
209  */
210 #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
211 
212 /*
213  * AllocSetIsValid
214  * True iff set is valid allocation set.
215  */
216 #define AllocSetIsValid(set) PointerIsValid(set)
217 
218 #define AllocPointerGetChunk(ptr) \
219  ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
220 #define AllocChunkGetPointer(chk) \
221  ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
222 
223 /*
224  * Rather than repeatedly creating and deleting memory contexts, we keep some
225  * freed contexts in freelists so that we can hand them out again with little
226  * work. Before putting a context in a freelist, we reset it so that it has
227  * only its initial malloc chunk and no others. To be a candidate for a
228  * freelist, a context must have the same minContextSize/initBlockSize as
229  * other contexts in the list; but its maxBlockSize is irrelevant since that
230  * doesn't affect the size of the initial chunk.
231  *
232  * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
233  * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
234  * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
235  *
236  * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
237  * hopes of improving locality of reference. But if there get to be too
238  * many contexts in the list, we'd prefer to drop the most-recently-created
239  * contexts in hopes of keeping the process memory map compact.
240  * We approximate that by simply deleting all existing entries when the list
241  * overflows, on the assumption that queries that allocate a lot of contexts
242  * will probably free them in more or less reverse order of allocation.
243  *
244  * Contexts in a freelist are chained via their nextchild pointers.
245  */
246 #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
247 
248 typedef struct AllocSetFreeList
249 {
250  int num_free; /* current list length */
251  AllocSetContext *first_free; /* list header */
253 
254 /* context_freelists[0] is for default params, [1] for small params */
256 {
257  {
258  0, NULL
259  },
260  {
261  0, NULL
262  }
263 };
264 
265 /*
266  * These functions implement the MemoryContext API for AllocSet contexts.
267  */
268 static void *AllocSetAlloc(MemoryContext context, Size size);
269 static void AllocSetFree(MemoryContext context, void *pointer);
270 static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
271 static void AllocSetReset(MemoryContext context);
272 static void AllocSetDelete(MemoryContext context);
273 static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
274 static bool AllocSetIsEmpty(MemoryContext context);
275 static void AllocSetStats(MemoryContext context,
276  MemoryStatsPrintFunc printfunc, void *passthru,
277  MemoryContextCounters *totals);
278 
279 #ifdef MEMORY_CONTEXT_CHECKING
280 static void AllocSetCheck(MemoryContext context);
281 #endif
282 
283 /*
284  * This is the virtual function table for AllocSet contexts.
285  */
288  AllocSetFree,
295 #ifdef MEMORY_CONTEXT_CHECKING
296  ,AllocSetCheck
297 #endif
298 };
299 
300 /*
301  * Table for AllocSetFreeIndex
302  */
303 #define LT16(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
304 
305 static const unsigned char LogTable256[256] =
306 {
307  0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
308  LT16(5), LT16(6), LT16(6), LT16(7), LT16(7), LT16(7), LT16(7),
309  LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8)
310 };
311 
312 /* ----------
313  * Debug macros
314  * ----------
315  */
316 #ifdef HAVE_ALLOCINFO
317 #define AllocFreeInfo(_cxt, _chunk) \
318  fprintf(stderr, "AllocFree: %s: %p, %zu\n", \
319  (_cxt)->header.name, (_chunk), (_chunk)->size)
320 #define AllocAllocInfo(_cxt, _chunk) \
321  fprintf(stderr, "AllocAlloc: %s: %p, %zu\n", \
322  (_cxt)->header.name, (_chunk), (_chunk)->size)
323 #else
324 #define AllocFreeInfo(_cxt, _chunk)
325 #define AllocAllocInfo(_cxt, _chunk)
326 #endif
327 
328 /* ----------
329  * AllocSetFreeIndex -
330  *
331  * Depending on the size of an allocation compute which freechunk
332  * list of the alloc set it belongs to. Caller must have verified
333  * that size <= ALLOC_CHUNK_LIMIT.
334  * ----------
335  */
336 static inline int
338 {
339  int idx;
340  unsigned int t,
341  tsize;
342 
343  if (size > (1 << ALLOC_MINBITS))
344  {
345  tsize = (size - 1) >> ALLOC_MINBITS;
346 
347  /*
348  * At this point we need to obtain log2(tsize)+1, ie, the number of
349  * not-all-zero bits at the right. We used to do this with a
350  * shift-and-count loop, but this function is enough of a hotspot to
351  * justify micro-optimization effort. The best approach seems to be
352  * to use a lookup table. Note that this code assumes that
353  * ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes of
354  * the tsize value.
355  */
356  t = tsize >> 8;
357  idx = t ? LogTable256[t] + 8 : LogTable256[tsize];
358 
360  }
361  else
362  idx = 0;
363 
364  return idx;
365 }
366 
367 
368 /*
369  * Public routines
370  */
371 
372 
373 /*
374  * AllocSetContextCreateInternal
375  * Create a new AllocSet context.
376  *
377  * parent: parent context, or NULL if top-level context
378  * name: name of context (must be statically allocated)
379  * minContextSize: minimum context size
380  * initBlockSize: initial allocation block size
381  * maxBlockSize: maximum allocation block size
382  *
383  * Most callers should abstract the context size parameters using a macro
384  * such as ALLOCSET_DEFAULT_SIZES.
385  *
386  * Note: don't call this directly; go through the wrapper macro
387  * AllocSetContextCreate.
388  */
391  const char *name,
392  Size minContextSize,
395 {
396  int freeListIndex;
397  Size firstBlockSize;
398  AllocSet set;
399  AllocBlock block;
400 
401  /* Assert we padded AllocChunkData properly */
403  "sizeof(AllocChunkData) is not maxaligned");
406  "padding calculation in AllocChunkData is wrong");
407 
408  /*
409  * First, validate allocation parameters. Once these were regular runtime
410  * test and elog's, but in practice Asserts seem sufficient because nobody
411  * varies their parameters at runtime. We somewhat arbitrarily enforce a
412  * minimum 1K block size.
413  */
414  Assert(initBlockSize == MAXALIGN(initBlockSize) &&
415  initBlockSize >= 1024);
416  Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
417  maxBlockSize >= initBlockSize &&
418  AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
419  Assert(minContextSize == 0 ||
420  (minContextSize == MAXALIGN(minContextSize) &&
421  minContextSize >= 1024 &&
422  minContextSize <= maxBlockSize));
423 
424  /*
425  * Check whether the parameters match either available freelist. We do
426  * not need to demand a match of maxBlockSize.
427  */
428  if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
429  initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
430  freeListIndex = 0;
431  else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
432  initBlockSize == ALLOCSET_SMALL_INITSIZE)
433  freeListIndex = 1;
434  else
435  freeListIndex = -1;
436 
437  /*
438  * If a suitable freelist entry exists, just recycle that context.
439  */
440  if (freeListIndex >= 0)
441  {
442  AllocSetFreeList *freelist = &context_freelists[freeListIndex];
443 
444  if (freelist->first_free != NULL)
445  {
446  /* Remove entry from freelist */
447  set = freelist->first_free;
448  freelist->first_free = (AllocSet) set->header.nextchild;
449  freelist->num_free--;
450 
451  /* Update its maxBlockSize; everything else should be OK */
452  set->maxBlockSize = maxBlockSize;
453 
454  /* Reinitialize its header, installing correct name and parent */
458  parent,
459  name);
460 
461  ((MemoryContext) set)->mem_allocated =
462  set->keeper->endptr - ((char *) set);
463 
464  return (MemoryContext) set;
465  }
466  }
467 
468  /* Determine size of initial block */
469  firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
471  if (minContextSize != 0)
472  firstBlockSize = Max(firstBlockSize, minContextSize);
473  else
474  firstBlockSize = Max(firstBlockSize, initBlockSize);
475 
476  /*
477  * Allocate the initial block. Unlike other aset.c blocks, it starts with
478  * the context header and its block header follows that.
479  */
480  set = (AllocSet) malloc(firstBlockSize);
481  if (set == NULL)
482  {
483  if (TopMemoryContext)
485  ereport(ERROR,
486  (errcode(ERRCODE_OUT_OF_MEMORY),
487  errmsg("out of memory"),
488  errdetail("Failed while creating memory context \"%s\".",
489  name)));
490  }
491 
492  /*
493  * Avoid writing code that can fail between here and MemoryContextCreate;
494  * we'd leak the header/initial block if we ereport in this stretch.
495  */
496 
497  /* Fill in the initial block's block header */
498  block = (AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext)));
499  block->aset = set;
500  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
501  block->endptr = ((char *) set) + firstBlockSize;
502  block->prev = NULL;
503  block->next = NULL;
504 
505  /* Mark unallocated space NOACCESS; leave the block header alone. */
506  VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
507 
508  /* Remember block as part of block list */
509  set->blocks = block;
510  /* Mark block as not to be released at reset time */
511  set->keeper = block;
512 
513  /* Finish filling in aset-specific parts of the context header */
514  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
515 
516  set->initBlockSize = initBlockSize;
517  set->maxBlockSize = maxBlockSize;
518  set->nextBlockSize = initBlockSize;
519  set->freeListIndex = freeListIndex;
520 
521  /*
522  * Compute the allocation chunk size limit for this context. It can't be
523  * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
524  * If maxBlockSize is small then requests exceeding the maxBlockSize, or
525  * even a significant fraction of it, should be treated as large chunks
526  * too. For the typical case of maxBlockSize a power of 2, the chunk size
527  * limit will be at most 1/8th maxBlockSize, so that given a stream of
528  * requests that are all the maximum chunk size we will waste at most
529  * 1/8th of the allocated space.
530  *
531  * We have to have allocChunkLimit a power of two, because the requested
532  * and actually-allocated sizes of any chunk must be on the same side of
533  * the limit, else we get confused about whether the chunk is "big".
534  *
535  * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
536  */
538  "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
539 
540  set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
541  while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
542  (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
543  set->allocChunkLimit >>= 1;
544 
545  /* Finally, do the type-independent part of context creation */
548  &AllocSetMethods,
549  parent,
550  name);
551 
552  ((MemoryContext) set)->mem_allocated = firstBlockSize;
553 
554  return (MemoryContext) set;
555 }
556 
557 /*
558  * AllocSetReset
559  * Frees all memory which is allocated in the given set.
560  *
561  * Actually, this routine has some discretion about what to do.
562  * It should mark all allocated chunks freed, but it need not necessarily
563  * give back all the resources the set owns. Our actual implementation is
564  * that we give back all but the "keeper" block (which we must keep, since
565  * it shares a malloc chunk with the context header). In this way, we don't
566  * thrash malloc() when a context is repeatedly reset after small allocations,
567  * which is typical behavior for per-tuple contexts.
568  */
569 static void
571 {
572  AllocSet set = (AllocSet) context;
573  AllocBlock block;
574  Size keepersize PG_USED_FOR_ASSERTS_ONLY
575  = set->keeper->endptr - ((char *) set);
576 
578 
579 #ifdef MEMORY_CONTEXT_CHECKING
580  /* Check for corruption and leaks before freeing */
581  AllocSetCheck(context);
582 #endif
583 
584  /* Clear chunk freelists */
585  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
586 
587  block = set->blocks;
588 
589  /* New blocks list will be just the keeper block */
590  set->blocks = set->keeper;
591 
592  while (block != NULL)
593  {
594  AllocBlock next = block->next;
595 
596  if (block == set->keeper)
597  {
598  /* Reset the block, but don't return it to malloc */
599  char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
600 
601 #ifdef CLOBBER_FREED_MEMORY
602  wipe_mem(datastart, block->freeptr - datastart);
603 #else
604  /* wipe_mem() would have done this */
605  VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
606 #endif
607  block->freeptr = datastart;
608  block->prev = NULL;
609  block->next = NULL;
610  }
611  else
612  {
613  /* Normal case, release the block */
614  context->mem_allocated -= block->endptr - ((char*) block);
615 
616 #ifdef CLOBBER_FREED_MEMORY
617  wipe_mem(block, block->freeptr - ((char *) block));
618 #endif
619  free(block);
620  }
621  block = next;
622  }
623 
624  Assert(context->mem_allocated == keepersize);
625 
626  /* Reset block size allocation sequence, too */
627  set->nextBlockSize = set->initBlockSize;
628 }
629 
630 /*
631  * AllocSetDelete
632  * Frees all memory which is allocated in the given set,
633  * in preparation for deletion of the set.
634  *
635  * Unlike AllocSetReset, this *must* free all resources of the set.
636  */
637 static void
639 {
640  AllocSet set = (AllocSet) context;
641  AllocBlock block = set->blocks;
642  Size keepersize PG_USED_FOR_ASSERTS_ONLY
643  = set->keeper->endptr - ((char *) set);
644 
646 
647 #ifdef MEMORY_CONTEXT_CHECKING
648  /* Check for corruption and leaks before freeing */
649  AllocSetCheck(context);
650 #endif
651 
652  /*
653  * If the context is a candidate for a freelist, put it into that freelist
654  * instead of destroying it.
655  */
656  if (set->freeListIndex >= 0)
657  {
658  AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
659 
660  /*
661  * Reset the context, if it needs it, so that we aren't hanging on to
662  * more than the initial malloc chunk.
663  */
664  if (!context->isReset)
665  MemoryContextResetOnly(context);
666 
667  /*
668  * If the freelist is full, just discard what's already in it. See
669  * comments with context_freelists[].
670  */
671  if (freelist->num_free >= MAX_FREE_CONTEXTS)
672  {
673  while (freelist->first_free != NULL)
674  {
675  AllocSetContext *oldset = freelist->first_free;
676 
677  freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
678  freelist->num_free--;
679 
680  /* All that remains is to free the header/initial block */
681  free(oldset);
682  }
683  Assert(freelist->num_free == 0);
684  }
685 
686  /* Now add the just-deleted context to the freelist. */
687  set->header.nextchild = (MemoryContext) freelist->first_free;
688  freelist->first_free = set;
689  freelist->num_free++;
690 
691  return;
692  }
693 
694  /* Free all blocks, except the keeper which is part of context header */
695  while (block != NULL)
696  {
697  AllocBlock next = block->next;
698 
699  if (block != set->keeper)
700  context->mem_allocated -= block->endptr - ((char *) block);
701 
702 #ifdef CLOBBER_FREED_MEMORY
703  wipe_mem(block, block->freeptr - ((char *) block));
704 #endif
705 
706  if (block != set->keeper)
707  free(block);
708 
709  block = next;
710  }
711 
712  Assert(context->mem_allocated == keepersize);
713 
714  /* Finally, free the context header, including the keeper block */
715  free(set);
716 }
717 
718 /*
719  * AllocSetAlloc
720  * Returns pointer to allocated memory of given size or NULL if
721  * request could not be completed; memory is added to the set.
722  *
723  * No request may exceed:
724  * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
725  * All callers use a much-lower limit.
726  *
727  * Note: when using valgrind, it doesn't matter how the returned allocation
728  * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
729  * return space that is marked NOACCESS - AllocSetRealloc has to beware!
730  */
731 static void *
733 {
734  AllocSet set = (AllocSet) context;
735  AllocBlock block;
736  AllocChunk chunk;
737  int fidx;
738  Size chunk_size;
739  Size blksize;
740 
742 
743  /*
744  * If requested size exceeds maximum for chunks, allocate an entire block
745  * for this request.
746  */
747  if (size > set->allocChunkLimit)
748  {
749  chunk_size = MAXALIGN(size);
750  blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
751  block = (AllocBlock) malloc(blksize);
752  if (block == NULL)
753  return NULL;
754 
755  context->mem_allocated += blksize;
756 
757  block->aset = set;
758  block->freeptr = block->endptr = ((char *) block) + blksize;
759 
760  chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
761  chunk->aset = set;
762  chunk->size = chunk_size;
763 #ifdef MEMORY_CONTEXT_CHECKING
764  chunk->requested_size = size;
765  /* set mark to catch clobber of "unused" space */
766  if (size < chunk_size)
767  set_sentinel(AllocChunkGetPointer(chunk), size);
768 #endif
769 #ifdef RANDOMIZE_ALLOCATED_MEMORY
770  /* fill the allocated space with junk */
771  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
772 #endif
773 
774  /*
775  * Stick the new block underneath the active allocation block, if any,
776  * so that we don't lose the use of the space remaining therein.
777  */
778  if (set->blocks != NULL)
779  {
780  block->prev = set->blocks;
781  block->next = set->blocks->next;
782  if (block->next)
783  block->next->prev = block;
784  set->blocks->next = block;
785  }
786  else
787  {
788  block->prev = NULL;
789  block->next = NULL;
790  set->blocks = block;
791  }
792 
793  AllocAllocInfo(set, chunk);
794 
795  /* Ensure any padding bytes are marked NOACCESS. */
796  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
797  chunk_size - size);
798 
799  /* Disallow external access to private part of chunk header. */
801 
802  return AllocChunkGetPointer(chunk);
803  }
804 
805  /*
806  * Request is small enough to be treated as a chunk. Look in the
807  * corresponding free list to see if there is a free chunk we could reuse.
808  * If one is found, remove it from the free list, make it again a member
809  * of the alloc set and return its data address.
810  */
811  fidx = AllocSetFreeIndex(size);
812  chunk = set->freelist[fidx];
813  if (chunk != NULL)
814  {
815  Assert(chunk->size >= size);
816 
817  set->freelist[fidx] = (AllocChunk) chunk->aset;
818 
819  chunk->aset = (void *) set;
820 
821 #ifdef MEMORY_CONTEXT_CHECKING
822  chunk->requested_size = size;
823  /* set mark to catch clobber of "unused" space */
824  if (size < chunk->size)
825  set_sentinel(AllocChunkGetPointer(chunk), size);
826 #endif
827 #ifdef RANDOMIZE_ALLOCATED_MEMORY
828  /* fill the allocated space with junk */
829  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
830 #endif
831 
832  AllocAllocInfo(set, chunk);
833 
834  /* Ensure any padding bytes are marked NOACCESS. */
835  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
836  chunk->size - size);
837 
838  /* Disallow external access to private part of chunk header. */
840 
841  return AllocChunkGetPointer(chunk);
842  }
843 
844  /*
845  * Choose the actual chunk size to allocate.
846  */
847  chunk_size = (1 << ALLOC_MINBITS) << fidx;
848  Assert(chunk_size >= size);
849 
850  /*
851  * If there is enough room in the active allocation block, we will put the
852  * chunk into that block. Else must start a new one.
853  */
854  if ((block = set->blocks) != NULL)
855  {
856  Size availspace = block->endptr - block->freeptr;
857 
858  if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
859  {
860  /*
861  * The existing active (top) block does not have enough room for
862  * the requested allocation, but it might still have a useful
863  * amount of space in it. Once we push it down in the block list,
864  * we'll never try to allocate more space from it. So, before we
865  * do that, carve up its free space into chunks that we can put on
866  * the set's freelists.
867  *
868  * Because we can only get here when there's less than
869  * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
870  * more than ALLOCSET_NUM_FREELISTS-1 times.
871  */
872  while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
873  {
874  Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
875  int a_fidx = AllocSetFreeIndex(availchunk);
876 
877  /*
878  * In most cases, we'll get back the index of the next larger
879  * freelist than the one we need to put this chunk on. The
880  * exception is when availchunk is exactly a power of 2.
881  */
882  if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
883  {
884  a_fidx--;
885  Assert(a_fidx >= 0);
886  availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
887  }
888 
889  chunk = (AllocChunk) (block->freeptr);
890 
891  /* Prepare to initialize the chunk header. */
892  VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
893 
894  block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
895  availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
896 
897  chunk->size = availchunk;
898 #ifdef MEMORY_CONTEXT_CHECKING
899  chunk->requested_size = 0; /* mark it free */
900 #endif
901  chunk->aset = (void *) set->freelist[a_fidx];
902  set->freelist[a_fidx] = chunk;
903  }
904 
905  /* Mark that we need to create a new block */
906  block = NULL;
907  }
908  }
909 
910  /*
911  * Time to create a new regular (multi-chunk) block?
912  */
913  if (block == NULL)
914  {
915  Size required_size;
916 
917  /*
918  * The first such block has size initBlockSize, and we double the
919  * space in each succeeding block, but not more than maxBlockSize.
920  */
921  blksize = set->nextBlockSize;
922  set->nextBlockSize <<= 1;
923  if (set->nextBlockSize > set->maxBlockSize)
924  set->nextBlockSize = set->maxBlockSize;
925 
926  /*
927  * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
928  * space... but try to keep it a power of 2.
929  */
930  required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
931  while (blksize < required_size)
932  blksize <<= 1;
933 
934  /* Try to allocate it */
935  block = (AllocBlock) malloc(blksize);
936 
937  /*
938  * We could be asking for pretty big blocks here, so cope if malloc
939  * fails. But give up if there's less than 1 MB or so available...
940  */
941  while (block == NULL && blksize > 1024 * 1024)
942  {
943  blksize >>= 1;
944  if (blksize < required_size)
945  break;
946  block = (AllocBlock) malloc(blksize);
947  }
948 
949  if (block == NULL)
950  return NULL;
951 
952  context->mem_allocated += blksize;
953 
954  block->aset = set;
955  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
956  block->endptr = ((char *) block) + blksize;
957 
958  /* Mark unallocated space NOACCESS. */
960  blksize - ALLOC_BLOCKHDRSZ);
961 
962  block->prev = NULL;
963  block->next = set->blocks;
964  if (block->next)
965  block->next->prev = block;
966  set->blocks = block;
967  }
968 
969  /*
970  * OK, do the allocation
971  */
972  chunk = (AllocChunk) (block->freeptr);
973 
974  /* Prepare to initialize the chunk header. */
976 
977  block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
978  Assert(block->freeptr <= block->endptr);
979 
980  chunk->aset = (void *) set;
981  chunk->size = chunk_size;
982 #ifdef MEMORY_CONTEXT_CHECKING
983  chunk->requested_size = size;
984  /* set mark to catch clobber of "unused" space */
985  if (size < chunk->size)
986  set_sentinel(AllocChunkGetPointer(chunk), size);
987 #endif
988 #ifdef RANDOMIZE_ALLOCATED_MEMORY
989  /* fill the allocated space with junk */
990  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
991 #endif
992 
993  AllocAllocInfo(set, chunk);
994 
995  /* Ensure any padding bytes are marked NOACCESS. */
996  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
997  chunk_size - size);
998 
999  /* Disallow external access to private part of chunk header. */
1001 
1002  return AllocChunkGetPointer(chunk);
1003 }
1004 
1005 /*
1006  * AllocSetFree
1007  * Frees allocated memory; memory is removed from the set.
1008  */
1009 static void
1010 AllocSetFree(MemoryContext context, void *pointer)
1011 {
1012  AllocSet set = (AllocSet) context;
1013  AllocChunk chunk = AllocPointerGetChunk(pointer);
1014 
1015  /* Allow access to private part of chunk header. */
1017 
1018  AllocFreeInfo(set, chunk);
1019 
1020 #ifdef MEMORY_CONTEXT_CHECKING
1021  /* Test for someone scribbling on unused space in chunk */
1022  if (chunk->requested_size < chunk->size)
1023  if (!sentinel_ok(pointer, chunk->requested_size))
1024  elog(WARNING, "detected write past chunk end in %s %p",
1025  set->header.name, chunk);
1026 #endif
1027 
1028  if (chunk->size > set->allocChunkLimit)
1029  {
1030  /*
1031  * Big chunks are certain to have been allocated as single-chunk
1032  * blocks. Just unlink that block and return it to malloc().
1033  */
1034  AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
1035 
1036  /*
1037  * Try to verify that we have a sane block pointer: it should
1038  * reference the correct aset, and freeptr and endptr should point
1039  * just past the chunk.
1040  */
1041  if (block->aset != set ||
1042  block->freeptr != block->endptr ||
1043  block->freeptr != ((char *) block) +
1044  (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1045  elog(ERROR, "could not find block containing chunk %p", chunk);
1046 
1047  /* OK, remove block from aset's list and free it */
1048  if (block->prev)
1049  block->prev->next = block->next;
1050  else
1051  set->blocks = block->next;
1052  if (block->next)
1053  block->next->prev = block->prev;
1054 
1055  context->mem_allocated -= block->endptr - ((char*) block);
1056 
1057 #ifdef CLOBBER_FREED_MEMORY
1058  wipe_mem(block, block->freeptr - ((char *) block));
1059 #endif
1060  free(block);
1061  }
1062  else
1063  {
1064  /* Normal case, put the chunk into appropriate freelist */
1065  int fidx = AllocSetFreeIndex(chunk->size);
1066 
1067  chunk->aset = (void *) set->freelist[fidx];
1068 
1069 #ifdef CLOBBER_FREED_MEMORY
1070  wipe_mem(pointer, chunk->size);
1071 #endif
1072 
1073 #ifdef MEMORY_CONTEXT_CHECKING
1074  /* Reset requested_size to 0 in chunks that are on freelist */
1075  chunk->requested_size = 0;
1076 #endif
1077  set->freelist[fidx] = chunk;
1078  }
1079 }
1080 
1081 /*
1082  * AllocSetRealloc
1083  * Returns new pointer to allocated memory of given size or NULL if
1084  * request could not be completed; this memory is added to the set.
1085  * Memory associated with given pointer is copied into the new memory,
1086  * and the old memory is freed.
1087  *
1088  * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1089  * makes our Valgrind client requests less-precise, hazarding false negatives.
1090  * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1091  * request size.)
1092  */
1093 static void *
1094 AllocSetRealloc(MemoryContext context, void *pointer, Size size)
1095 {
1096  AllocSet set = (AllocSet) context;
1097  AllocChunk chunk = AllocPointerGetChunk(pointer);
1098  Size oldsize;
1099 
1100  /* Allow access to private part of chunk header. */
1102 
1103  oldsize = chunk->size;
1104 
1105 #ifdef MEMORY_CONTEXT_CHECKING
1106  /* Test for someone scribbling on unused space in chunk */
1107  if (chunk->requested_size < oldsize)
1108  if (!sentinel_ok(pointer, chunk->requested_size))
1109  elog(WARNING, "detected write past chunk end in %s %p",
1110  set->header.name, chunk);
1111 #endif
1112 
1113  if (oldsize > set->allocChunkLimit)
1114  {
1115  /*
1116  * The chunk must have been allocated as a single-chunk block. Use
1117  * realloc() to make the containing block bigger, or smaller, with
1118  * minimum space wastage.
1119  */
1120  AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
1121  Size chksize;
1122  Size blksize;
1123  Size oldblksize;
1124 
1125  /*
1126  * Try to verify that we have a sane block pointer: it should
1127  * reference the correct aset, and freeptr and endptr should point
1128  * just past the chunk.
1129  */
1130  if (block->aset != set ||
1131  block->freeptr != block->endptr ||
1132  block->freeptr != ((char *) block) +
1133  (oldsize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1134  elog(ERROR, "could not find block containing chunk %p", chunk);
1135 
1136  /*
1137  * Even if the new request is less than set->allocChunkLimit, we stick
1138  * with the single-chunk block approach. Therefore we need
1139  * chunk->size to be bigger than set->allocChunkLimit, so we don't get
1140  * confused about the chunk's status in future calls.
1141  */
1142  chksize = Max(size, set->allocChunkLimit + 1);
1143  chksize = MAXALIGN(chksize);
1144 
1145  /* Do the realloc */
1146  blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1147  oldblksize = block->endptr - ((char *)block);
1148 
1149  block = (AllocBlock) realloc(block, blksize);
1150  if (block == NULL)
1151  {
1152  /* Disallow external access to private part of chunk header. */
1154  return NULL;
1155  }
1156 
1157  /* updated separately, not to underflow when (oldblksize > blksize) */
1158  context->mem_allocated -= oldblksize;
1159  context->mem_allocated += blksize;
1160 
1161  block->freeptr = block->endptr = ((char *) block) + blksize;
1162 
1163  /* Update pointers since block has likely been moved */
1164  chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
1165  pointer = AllocChunkGetPointer(chunk);
1166  if (block->prev)
1167  block->prev->next = block;
1168  else
1169  set->blocks = block;
1170  if (block->next)
1171  block->next->prev = block;
1172  chunk->size = chksize;
1173 
1174 #ifdef MEMORY_CONTEXT_CHECKING
1175 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1176  /* We can only fill the extra space if we know the prior request */
1177  if (size > chunk->requested_size)
1178  randomize_mem((char *) pointer + chunk->requested_size,
1179  size - chunk->requested_size);
1180 #endif
1181 
1182  /*
1183  * realloc() (or randomize_mem()) will have left any newly-allocated
1184  * part UNDEFINED, but we may need to adjust trailing bytes from the
1185  * old allocation.
1186  */
1187 #ifdef USE_VALGRIND
1188  if (oldsize > chunk->requested_size)
1189  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1190  oldsize - chunk->requested_size);
1191 #endif
1192 
1193  chunk->requested_size = size;
1194 
1195  /* set mark to catch clobber of "unused" space */
1196  if (size < chunk->size)
1197  set_sentinel(pointer, size);
1198 #else /* !MEMORY_CONTEXT_CHECKING */
1199 
1200  /*
1201  * We don't know how much of the old chunk size was the actual
1202  * allocation; it could have been as small as one byte. We have to be
1203  * conservative and just mark the entire old portion DEFINED.
1204  */
1205  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1206 #endif
1207 
1208  /* Ensure any padding bytes are marked NOACCESS. */
1209  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1210 
1211  /* Disallow external access to private part of chunk header. */
1213 
1214  return pointer;
1215  }
1216 
1217  /*
1218  * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1219  * allocated area already is >= the new size. (In particular, we will
1220  * fall out here if the requested size is a decrease.)
1221  */
1222  else if (oldsize >= size)
1223  {
1224 #ifdef MEMORY_CONTEXT_CHECKING
1225  Size oldrequest = chunk->requested_size;
1226 
1227 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1228  /* We can only fill the extra space if we know the prior request */
1229  if (size > oldrequest)
1230  randomize_mem((char *) pointer + oldrequest,
1231  size - oldrequest);
1232 #endif
1233 
1234  chunk->requested_size = size;
1235 
1236  /*
1237  * If this is an increase, mark any newly-available part UNDEFINED.
1238  * Otherwise, mark the obsolete part NOACCESS.
1239  */
1240  if (size > oldrequest)
1241  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1242  size - oldrequest);
1243  else
1244  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1245  oldsize - size);
1246 
1247  /* set mark to catch clobber of "unused" space */
1248  if (size < oldsize)
1249  set_sentinel(pointer, size);
1250 #else /* !MEMORY_CONTEXT_CHECKING */
1251 
1252  /*
1253  * We don't have the information to determine whether we're growing
1254  * the old request or shrinking it, so we conservatively mark the
1255  * entire new allocation DEFINED.
1256  */
1257  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
1258  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1259 #endif
1260 
1261  /* Disallow external access to private part of chunk header. */
1263 
1264  return pointer;
1265  }
1266  else
1267  {
1268  /*
1269  * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1270  * allocate a new chunk and copy the data. Since we know the existing
1271  * data isn't huge, this won't involve any great memcpy expense, so
1272  * it's not worth being smarter. (At one time we tried to avoid
1273  * memcpy when it was possible to enlarge the chunk in-place, but that
1274  * turns out to misbehave unpleasantly for repeated cycles of
1275  * palloc/repalloc/pfree: the eventually freed chunks go into the
1276  * wrong freelist for the next initial palloc request, and so we leak
1277  * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1278  */
1279  AllocPointer newPointer;
1280 
1281  /* allocate new chunk */
1282  newPointer = AllocSetAlloc((MemoryContext) set, size);
1283 
1284  /* leave immediately if request was not completed */
1285  if (newPointer == NULL)
1286  {
1287  /* Disallow external access to private part of chunk header. */
1289  return NULL;
1290  }
1291 
1292  /*
1293  * AllocSetAlloc() may have returned a region that is still NOACCESS.
1294  * Change it to UNDEFINED for the moment; memcpy() will then transfer
1295  * definedness from the old allocation to the new. If we know the old
1296  * allocation, copy just that much. Otherwise, make the entire old
1297  * chunk defined to avoid errors as we copy the currently-NOACCESS
1298  * trailing bytes.
1299  */
1300  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1301 #ifdef MEMORY_CONTEXT_CHECKING
1302  oldsize = chunk->requested_size;
1303 #else
1304  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1305 #endif
1306 
1307  /* transfer existing data (certain to fit) */
1308  memcpy(newPointer, pointer, oldsize);
1309 
1310  /* free old chunk */
1311  AllocSetFree((MemoryContext) set, pointer);
1312 
1313  return newPointer;
1314  }
1315 }
1316 
1317 /*
1318  * AllocSetGetChunkSpace
1319  * Given a currently-allocated chunk, determine the total space
1320  * it occupies (including all memory-allocation overhead).
1321  */
1322 static Size
1323 AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1324 {
1325  AllocChunk chunk = AllocPointerGetChunk(pointer);
1326  Size result;
1327 
1329  result = chunk->size + ALLOC_CHUNKHDRSZ;
1331  return result;
1332 }
1333 
1334 /*
1335  * AllocSetIsEmpty
1336  * Is an allocset empty of any allocated space?
1337  */
1338 static bool
1340 {
1341  /*
1342  * For now, we say "empty" only if the context is new or just reset. We
1343  * could examine the freelists to determine if all space has been freed,
1344  * but it's not really worth the trouble for present uses of this
1345  * functionality.
1346  */
1347  if (context->isReset)
1348  return true;
1349  return false;
1350 }
1351 
1352 /*
1353  * AllocSetStats
1354  * Compute stats about memory consumption of an allocset.
1355  *
1356  * printfunc: if not NULL, pass a human-readable stats string to this.
1357  * passthru: pass this pointer through to printfunc.
1358  * totals: if not NULL, add stats about this context into *totals.
1359  */
1360 static void
1362  MemoryStatsPrintFunc printfunc, void *passthru,
1363  MemoryContextCounters *totals)
1364 {
1365  AllocSet set = (AllocSet) context;
1366  Size nblocks = 0;
1367  Size freechunks = 0;
1368  Size totalspace;
1369  Size freespace = 0;
1370  AllocBlock block;
1371  int fidx;
1372 
1373  /* Include context header in totalspace */
1374  totalspace = MAXALIGN(sizeof(AllocSetContext));
1375 
1376  for (block = set->blocks; block != NULL; block = block->next)
1377  {
1378  nblocks++;
1379  totalspace += block->endptr - ((char *) block);
1380  freespace += block->endptr - block->freeptr;
1381  }
1382  for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1383  {
1384  AllocChunk chunk;
1385 
1386  for (chunk = set->freelist[fidx]; chunk != NULL;
1387  chunk = (AllocChunk) chunk->aset)
1388  {
1389  freechunks++;
1390  freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1391  }
1392  }
1393 
1394  if (printfunc)
1395  {
1396  char stats_string[200];
1397 
1398  snprintf(stats_string, sizeof(stats_string),
1399  "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
1400  totalspace, nblocks, freespace, freechunks,
1401  totalspace - freespace);
1402  printfunc(context, passthru, stats_string);
1403  }
1404 
1405  if (totals)
1406  {
1407  totals->nblocks += nblocks;
1408  totals->freechunks += freechunks;
1409  totals->totalspace += totalspace;
1410  totals->freespace += freespace;
1411  }
1412 }
1413 
1414 
1415 #ifdef MEMORY_CONTEXT_CHECKING
1416 
1417 /*
1418  * AllocSetCheck
1419  * Walk through chunks and check consistency of memory.
1420  *
1421  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1422  * find yourself in an infinite loop when trouble occurs, because this
1423  * routine will be entered again when elog cleanup tries to release memory!
1424  */
1425 static void
1426 AllocSetCheck(MemoryContext context)
1427 {
1428  AllocSet set = (AllocSet) context;
1429  const char *name = set->header.name;
1430  AllocBlock prevblock;
1431  AllocBlock block;
1432  Size total_allocated = 0;
1433 
1434  for (prevblock = NULL, block = set->blocks;
1435  block != NULL;
1436  prevblock = block, block = block->next)
1437  {
1438  char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1439  long blk_used = block->freeptr - bpoz;
1440  long blk_data = 0;
1441  long nchunks = 0;
1442 
1443  if (set->keeper == block)
1444  total_allocated += block->endptr - ((char *) set);
1445  else
1446  total_allocated += block->endptr - ((char *) block);
1447 
1448  /*
1449  * Empty block - empty can be keeper-block only
1450  */
1451  if (!blk_used)
1452  {
1453  if (set->keeper != block)
1454  elog(WARNING, "problem in alloc set %s: empty block %p",
1455  name, block);
1456  }
1457 
1458  /*
1459  * Check block header fields
1460  */
1461  if (block->aset != set ||
1462  block->prev != prevblock ||
1463  block->freeptr < bpoz ||
1464  block->freeptr > block->endptr)
1465  elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1466  name, block);
1467 
1468  /*
1469  * Chunk walker
1470  */
1471  while (bpoz < block->freeptr)
1472  {
1473  AllocChunk chunk = (AllocChunk) bpoz;
1474  Size chsize,
1475  dsize;
1476 
1477  /* Allow access to private part of chunk header. */
1479 
1480  chsize = chunk->size; /* aligned chunk size */
1481  dsize = chunk->requested_size; /* real data */
1482 
1483  /*
1484  * Check chunk size
1485  */
1486  if (dsize > chsize)
1487  elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1488  name, chunk, block);
1489  if (chsize < (1 << ALLOC_MINBITS))
1490  elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1491  name, chsize, chunk, block);
1492 
1493  /* single-chunk block? */
1494  if (chsize > set->allocChunkLimit &&
1495  chsize + ALLOC_CHUNKHDRSZ != blk_used)
1496  elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1497  name, chunk, block);
1498 
1499  /*
1500  * If chunk is allocated, check for correct aset pointer. (If it's
1501  * free, the aset is the freelist pointer, which we can't check as
1502  * easily...) Note this is an incomplete test, since palloc(0)
1503  * produces an allocated chunk with requested_size == 0.
1504  */
1505  if (dsize > 0 && chunk->aset != (void *) set)
1506  elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1507  name, block, chunk);
1508 
1509  /*
1510  * Check for overwrite of padding space in an allocated chunk.
1511  */
1512  if (chunk->aset == (void *) set && dsize < chsize &&
1513  !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1514  elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1515  name, block, chunk);
1516 
1517  /*
1518  * If chunk is allocated, disallow external access to private part
1519  * of chunk header.
1520  */
1521  if (chunk->aset == (void *) set)
1523 
1524  blk_data += chsize;
1525  nchunks++;
1526 
1527  bpoz += ALLOC_CHUNKHDRSZ + chsize;
1528  }
1529 
1530  if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1531  elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1532  name, block);
1533  }
1534 
1535  Assert(total_allocated == context->mem_allocated);
1536 }
1537 
1538 #endif /* MEMORY_CONTEXT_CHECKING */
#define MemSetAligned(start, val, len)
Definition: c.h:988
Size initBlockSize
Definition: aset.c:130
#define AllocFreeInfo(_cxt, _chunk)
Definition: aset.c:324
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
AllocBlock blocks
Definition: aset.c:127
static int32 next
Definition: blutils.c:215
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string)
Definition: memnodes.h:54
struct AllocBlockData AllocBlockData
static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer)
Definition: aset.c:1323
MemoryContextData header
Definition: aset.c:125
void * AllocPointer
Definition: aset.c:112
#define AllocSetIsValid(set)
Definition: aset.c:216
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
struct AllocSetFreeList AllocSetFreeList
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
static int AllocSetFreeIndex(Size size)
Definition: aset.c:337
static void AllocSetReset(MemoryContext context)
Definition: aset.c:570
int num_free
Definition: aset.c:250
#define AllocChunkGetPointer(chk)
Definition: aset.c:220
#define ALLOCCHUNK_PRIVATE_LEN
Definition: aset.c:204
#define LT16(n)
Definition: aset.c:303
int errcode(int sqlerrcode)
Definition: elog.c:570
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:264
#define ALLOC_BLOCKHDRSZ
Definition: aset.c:102
AllocBlock keeper
Definition: aset.c:134
AllocSet aset
Definition: aset.c:155
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:155
char * freeptr
Definition: aset.c:158
#define ALLOCSET_DEFAULT_MINSIZE
Definition: memutils.h:189
#define ALLOCSET_SMALL_MINSIZE
Definition: memutils.h:199
AllocSetContext * first_free
Definition: aset.c:251
#define malloc(a)
Definition: header.h:50
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:842
static AllocSetFreeList context_freelists[2]
Definition: aset.c:255
#define ERROR
Definition: elog.h:43
#define ALLOC_CHUNK_LIMIT
Definition: aset.c:83
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:498
char * endptr
Definition: aset.c:159
#define ALLOCSET_NUM_FREELISTS
Definition: aset.c:82
static void * AllocSetRealloc(MemoryContext context, void *pointer, Size size)
Definition: aset.c:1094
static void AllocSetStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals)
Definition: aset.c:1361
#define ALLOCSET_SEPARATE_THRESHOLD
Definition: memutils.h:219
int errdetail(const char *fmt,...)
Definition: elog.c:860
#define ALLOC_CHUNKHDRSZ
Definition: aset.c:103
AllocBlock next
Definition: aset.c:157
void MemoryContextCreate(MemoryContext node, NodeTag tag, const MemoryContextMethods *methods, MemoryContext parent, const char *name)
Definition: mcxt.c:748
#define ereport(elevel, rest)
Definition: elog.h:141
#define AssertArg(condition)
Definition: c.h:734
MemoryContext TopMemoryContext
Definition: mcxt.c:44
struct MemoryContextData * MemoryContext
Definition: palloc.h:36
MemoryContext AllocSetContextCreateInternal(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:390
static bool AllocSetIsEmpty(MemoryContext context)
Definition: aset.c:1339
#define WARNING
Definition: elog.h:40
#define ALLOCSET_SMALL_INITSIZE
Definition: memutils.h:200
int freeListIndex
Definition: aset.c:136
static const unsigned char LogTable256[256]
Definition: aset.c:305
#define ALLOCCHUNK_RAWSIZE
Definition: aset.c:185
#define ALLOC_CHUNK_FRACTION
Definition: aset.c:85
struct AllocBlockData * AllocBlock
Definition: aset.c:105
#define MAX_FREE_CONTEXTS
Definition: aset.c:246
AllocChunk freelist[ALLOCSET_NUM_FREELISTS]
Definition: aset.c:128
Size nextBlockSize
Definition: aset.c:132
AllocBlock prev
Definition: aset.c:156
#define free(a)
Definition: header.h:65
Size allocChunkLimit
Definition: aset.c:133
#define Max(x, y)
Definition: c.h:898
struct AllocChunkData * AllocChunk
Definition: aset.c:106
#define Assert(condition)
Definition: c.h:732
static void AllocSetFree(MemoryContext context, void *pointer)
Definition: aset.c:1010
size_t Size
Definition: c.h:466
#define MAXALIGN(LEN)
Definition: c.h:685
struct AllocChunkData AllocChunkData
static void AllocSetDelete(MemoryContext context)
Definition: aset.c:638
#define realloc(a, b)
Definition: header.h:60
const char * name
Definition: encode.c:521
Size mem_allocated
Definition: memnodes.h:82
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:46
void * aset
Definition: aset.c:194
int errmsg(const char *fmt,...)
Definition: elog.c:784
#define elog(elevel,...)
Definition: elog.h:226
#define ALLOCSET_DEFAULT_INITSIZE
Definition: memutils.h:190
static void * AllocSetAlloc(MemoryContext context, Size size)
Definition: aset.c:732
static const MemoryContextMethods AllocSetMethods
Definition: aset.c:286
AllocSetContext * AllocSet
Definition: aset.c:139
struct AllocSetContext AllocSetContext
#define ALLOC_MINBITS
Definition: aset.c:81
Size maxBlockSize
Definition: aset.c:131
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:123
#define snprintf
Definition: port.h:192
#define AllocAllocInfo(_cxt, _chunk)
Definition: aset.c:325
#define offsetof(type, field)
Definition: c.h:655
MemoryContext nextchild
Definition: memnodes.h:87
Size size
Definition: aset.c:177
#define AllocPointerGetChunk(ptr)
Definition: aset.c:218