PostgreSQL Source Code  git master
aset.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * aset.c
4  * Allocation set definitions.
5  *
6  * AllocSet is our standard implementation of the abstract MemoryContext
7  * type.
8  *
9  *
10  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
11  * Portions Copyright (c) 1994, Regents of the University of California
12  *
13  * IDENTIFICATION
14  * src/backend/utils/mmgr/aset.c
15  *
16  * NOTE:
17  * This is a new (Feb. 05, 1999) implementation of the allocation set
18  * routines. AllocSet...() does not use OrderedSet...() any more.
19  * Instead it manages allocations in a block pool by itself, combining
20  * many small allocations in a few bigger blocks. AllocSetFree() normally
21  * doesn't free() memory really. It just add's the free'd area to some
22  * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23  * at once on AllocSetReset(), which happens when the memory context gets
24  * destroyed.
25  * Jan Wieck
26  *
27  * Performance improvement from Tom Lane, 8/99: for extremely large request
28  * sizes, we do want to be able to give the memory back to free() as soon
29  * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30  * freelist entries that might never be usable. This is specially needed
31  * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32  * the previous instances of the block were guaranteed to be wasted until
33  * AllocSetReset() under the old way.
34  *
35  * Further improvement 12/00: as the code stood, request sizes in the
36  * midrange between "small" and "large" were handled very inefficiently,
37  * because any sufficiently large free chunk would be used to satisfy a
38  * request, even if it was much larger than necessary. This led to more
39  * and more wasted space in allocated chunks over time. To fix, get rid
40  * of the midrange behavior: we now handle only "small" power-of-2-size
41  * chunks as chunks. Anything "large" is passed off to malloc(). Change
42  * the number of freelists to change the small/large boundary.
43  *
44  *-------------------------------------------------------------------------
45  */
46 
47 #include "postgres.h"
48 
49 #include "utils/memdebug.h"
50 #include "utils/memutils.h"
51 
52 /* Define this to detail debug alloc information */
53 /* #define HAVE_ALLOCINFO */
54 
55 /*--------------------
56  * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57  * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58  *
59  * Note that all chunks in the freelists have power-of-2 sizes. This
60  * improves recyclability: we may waste some space, but the wasted space
61  * should stay pretty constant as requests are made and released.
62  *
63  * A request too large for the last freelist is handled by allocating a
64  * dedicated block from malloc(). The block still has a block header and
65  * chunk header, but when the chunk is freed we'll return the whole block
66  * to malloc(), not put it on our freelists.
67  *
68  * CAUTION: ALLOC_MINBITS must be large enough so that
69  * 1<<ALLOC_MINBITS is at least MAXALIGN,
70  * or we may fail to align the smallest chunks adequately.
71  * 8-byte alignment is enough on all currently known machines.
72  *
73  * With the current parameters, request sizes up to 8K are treated as chunks,
74  * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
75  * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
76  * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
77  * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
78  *--------------------
79  */
80 
81 #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
82 #define ALLOCSET_NUM_FREELISTS 11
83 #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
84 /* Size of largest chunk that we use a fixed size for */
85 #define ALLOC_CHUNK_FRACTION 4
86 /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
87 
88 /*--------------------
89  * The first block allocated for an allocset has size initBlockSize.
90  * Each time we have to allocate another block, we double the block size
91  * (if possible, and without exceeding maxBlockSize), so as to reduce
92  * the bookkeeping load on malloc().
93  *
94  * Blocks allocated to hold oversize chunks do not follow this rule, however;
95  * they are just however big they need to be to hold that single chunk.
96  *
97  * Also, if a minContextSize is specified, the first block has that size,
98  * and then initBlockSize is used for the next one.
99  *--------------------
100  */
101 
102 #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
103 #define ALLOC_CHUNKHDRSZ sizeof(struct AllocChunkData)
104 
105 typedef struct AllocBlockData *AllocBlock; /* forward reference */
106 typedef struct AllocChunkData *AllocChunk;
107 
108 /*
109  * AllocPointer
110  * Aligned pointer which may be a member of an allocation set.
111  */
112 typedef void *AllocPointer;
113 
114 /*
115  * AllocSetContext is our standard implementation of MemoryContext.
116  *
117  * Note: header.isReset means there is nothing for AllocSetReset to do.
118  * This is different from the aset being physically empty (empty blocks list)
119  * because we will still have a keeper block. It's also different from the set
120  * being logically empty, because we don't attempt to detect pfree'ing the
121  * last active chunk.
122  */
123 typedef struct AllocSetContext
124 {
125  MemoryContextData header; /* Standard memory-context fields */
126  /* Info about storage allocated in this context: */
127  AllocBlock blocks; /* head of list of blocks in this set */
128  AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
129  /* Allocation parameters for this context: */
130  Size initBlockSize; /* initial block size */
131  Size maxBlockSize; /* maximum block size */
132  Size nextBlockSize; /* next block size to allocate */
133  Size headerSize; /* allocated size of context header */
134  Size allocChunkLimit; /* effective chunk size limit */
135  AllocBlock keeper; /* keep this block over resets */
136  /* freelist this context could be put in, or -1 if not a candidate: */
137  int freeListIndex; /* index in context_freelists[], or -1 */
139 
141 
142 /*
143  * AllocBlock
144  * An AllocBlock is the unit of memory that is obtained by aset.c
145  * from malloc(). It contains one or more AllocChunks, which are
146  * the units requested by palloc() and freed by pfree(). AllocChunks
147  * cannot be returned to malloc() individually, instead they are put
148  * on freelists by pfree() and re-used by the next palloc() that has
149  * a matching request size.
150  *
151  * AllocBlockData is the header data for a block --- the usable space
152  * within the block begins at the next alignment boundary.
153  */
154 typedef struct AllocBlockData
155 {
156  AllocSet aset; /* aset that owns this block */
157  AllocBlock prev; /* prev block in aset's blocks list, if any */
158  AllocBlock next; /* next block in aset's blocks list, if any */
159  char *freeptr; /* start of free space in this block */
160  char *endptr; /* end of space in this block */
162 
163 /*
164  * AllocChunk
165  * The prefix of each piece of memory in an AllocBlock
166  *
167  * Note: to meet the memory context APIs, the payload area of the chunk must
168  * be maxaligned, and the "aset" link must be immediately adjacent to the
169  * payload area (cf. GetMemoryChunkContext). We simplify matters for this
170  * module by requiring sizeof(AllocChunkData) to be maxaligned, and then
171  * we can ensure things work by adding any required alignment padding before
172  * the "aset" field. There is a static assertion below that the alignment
173  * is done correctly.
174  */
175 typedef struct AllocChunkData
176 {
177  /* size is always the size of the usable space in the chunk */
179 #ifdef MEMORY_CONTEXT_CHECKING
180  /* when debugging memory usage, also store actual requested size */
181  /* this is zero in a free chunk */
182  Size requested_size;
183 
184 #define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T * 2 + SIZEOF_VOID_P)
185 #else
186 #define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T + SIZEOF_VOID_P)
187 #endif /* MEMORY_CONTEXT_CHECKING */
188 
189  /* ensure proper alignment by adding padding if needed */
190 #if (ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF) != 0
191  char padding[MAXIMUM_ALIGNOF - ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF];
192 #endif
193 
194  /* aset is the owning aset if allocated, or the freelist link if free */
195  void *aset;
196  /* there must not be any padding to reach a MAXALIGN boundary here! */
198 
199 /*
200  * Only the "aset" field should be accessed outside this module.
201  * We keep the rest of an allocated chunk's header marked NOACCESS when using
202  * valgrind. But note that chunk headers that are in a freelist are kept
203  * accessible, for simplicity.
204  */
205 #define ALLOCCHUNK_PRIVATE_LEN offsetof(AllocChunkData, aset)
206 
207 /*
208  * AllocPointerIsValid
209  * True iff pointer is valid allocation pointer.
210  */
211 #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
212 
213 /*
214  * AllocSetIsValid
215  * True iff set is valid allocation set.
216  */
217 #define AllocSetIsValid(set) PointerIsValid(set)
218 
219 #define AllocPointerGetChunk(ptr) \
220  ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
221 #define AllocChunkGetPointer(chk) \
222  ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
223 
224 /*
225  * Rather than repeatedly creating and deleting memory contexts, we keep some
226  * freed contexts in freelists so that we can hand them out again with little
227  * work. Before putting a context in a freelist, we reset it so that it has
228  * only its initial malloc chunk and no others. To be a candidate for a
229  * freelist, a context must have the same minContextSize/initBlockSize as
230  * other contexts in the list; but its maxBlockSize is irrelevant since that
231  * doesn't affect the size of the initial chunk. Also, candidate contexts
232  * *must not* use MEMCONTEXT_COPY_NAME since that would make their header size
233  * variable. (We currently insist that all flags be zero, since other flags
234  * would likely make the contexts less interchangeable, too.)
235  *
236  * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
237  * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
238  * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
239  *
240  * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
241  * hopes of improving locality of reference. But if there get to be too
242  * many contexts in the list, we'd prefer to drop the most-recently-created
243  * contexts in hopes of keeping the process memory map compact.
244  * We approximate that by simply deleting all existing entries when the list
245  * overflows, on the assumption that queries that allocate a lot of contexts
246  * will probably free them in more or less reverse order of allocation.
247  *
248  * Contexts in a freelist are chained via their nextchild pointers.
249  */
250 #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
251 
252 typedef struct AllocSetFreeList
253 {
254  int num_free; /* current list length */
255  AllocSetContext *first_free; /* list header */
257 
258 /* context_freelists[0] is for default params, [1] for small params */
260 {
261  {
262  0, NULL
263  },
264  {
265  0, NULL
266  }
267 };
268 
269 /*
270  * These functions implement the MemoryContext API for AllocSet contexts.
271  */
272 static void *AllocSetAlloc(MemoryContext context, Size size);
273 static void AllocSetFree(MemoryContext context, void *pointer);
274 static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
275 static void AllocSetReset(MemoryContext context);
276 static void AllocSetDelete(MemoryContext context);
277 static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
278 static bool AllocSetIsEmpty(MemoryContext context);
279 static void AllocSetStats(MemoryContext context, int level, bool print,
280  MemoryContextCounters *totals);
281 
282 #ifdef MEMORY_CONTEXT_CHECKING
283 static void AllocSetCheck(MemoryContext context);
284 #endif
285 
286 /*
287  * This is the virtual function table for AllocSet contexts.
288  */
291  AllocSetFree,
298 #ifdef MEMORY_CONTEXT_CHECKING
299  ,AllocSetCheck
300 #endif
301 };
302 
303 /*
304  * Table for AllocSetFreeIndex
305  */
306 #define LT16(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
307 
308 static const unsigned char LogTable256[256] =
309 {
310  0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
311  LT16(5), LT16(6), LT16(6), LT16(7), LT16(7), LT16(7), LT16(7),
312  LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8)
313 };
314 
315 /* ----------
316  * Debug macros
317  * ----------
318  */
319 #ifdef HAVE_ALLOCINFO
320 #define AllocFreeInfo(_cxt, _chunk) \
321  fprintf(stderr, "AllocFree: %s: %p, %zu\n", \
322  (_cxt)->header.name, (_chunk), (_chunk)->size)
323 #define AllocAllocInfo(_cxt, _chunk) \
324  fprintf(stderr, "AllocAlloc: %s: %p, %zu\n", \
325  (_cxt)->header.name, (_chunk), (_chunk)->size)
326 #else
327 #define AllocFreeInfo(_cxt, _chunk)
328 #define AllocAllocInfo(_cxt, _chunk)
329 #endif
330 
331 /* ----------
332  * AllocSetFreeIndex -
333  *
334  * Depending on the size of an allocation compute which freechunk
335  * list of the alloc set it belongs to. Caller must have verified
336  * that size <= ALLOC_CHUNK_LIMIT.
337  * ----------
338  */
339 static inline int
341 {
342  int idx;
343  unsigned int t,
344  tsize;
345 
346  if (size > (1 << ALLOC_MINBITS))
347  {
348  tsize = (size - 1) >> ALLOC_MINBITS;
349 
350  /*
351  * At this point we need to obtain log2(tsize)+1, ie, the number of
352  * not-all-zero bits at the right. We used to do this with a
353  * shift-and-count loop, but this function is enough of a hotspot to
354  * justify micro-optimization effort. The best approach seems to be
355  * to use a lookup table. Note that this code assumes that
356  * ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes of
357  * the tsize value.
358  */
359  t = tsize >> 8;
360  idx = t ? LogTable256[t] + 8 : LogTable256[tsize];
361 
363  }
364  else
365  idx = 0;
366 
367  return idx;
368 }
369 
370 
371 /*
372  * Public routines
373  */
374 
375 
376 /*
377  * AllocSetContextCreateExtended
378  * Create a new AllocSet context.
379  *
380  * parent: parent context, or NULL if top-level context
381  * name: name of context (for debugging only, need not be unique)
382  * flags: bitmask of MEMCONTEXT_XXX option flags
383  * minContextSize: minimum context size
384  * initBlockSize: initial allocation block size
385  * maxBlockSize: maximum allocation block size
386  *
387  * Notes: if flags & MEMCONTEXT_COPY_NAME, the name string will be copied into
388  * context-lifespan storage; otherwise, it had better be statically allocated.
389  * Most callers should abstract the context size parameters using a macro
390  * such as ALLOCSET_DEFAULT_SIZES. (This is now *required* when going
391  * through the AllocSetContextCreate macro.)
392  */
395  const char *name,
396  int flags,
397  Size minContextSize,
400 {
401  int freeListIndex;
403  Size firstBlockSize;
404  AllocSet set;
405  AllocBlock block;
406 
407  /* Assert we padded AllocChunkData properly */
409  "sizeof(AllocChunkData) is not maxaligned");
412  "padding calculation in AllocChunkData is wrong");
413 
414  /*
415  * First, validate allocation parameters. Once these were regular runtime
416  * test and elog's, but in practice Asserts seem sufficient because nobody
417  * varies their parameters at runtime. We somewhat arbitrarily enforce a
418  * minimum 1K block size.
419  */
420  Assert(initBlockSize == MAXALIGN(initBlockSize) &&
421  initBlockSize >= 1024);
422  Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
423  maxBlockSize >= initBlockSize &&
424  AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
425  Assert(minContextSize == 0 ||
426  (minContextSize == MAXALIGN(minContextSize) &&
427  minContextSize >= 1024 &&
428  minContextSize <= maxBlockSize));
429 
430  /*
431  * Check whether the parameters match either available freelist. We do
432  * not need to demand a match of maxBlockSize.
433  */
434  if (flags == 0 &&
435  minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
436  initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
437  freeListIndex = 0;
438  else if (flags == 0 &&
439  minContextSize == ALLOCSET_SMALL_MINSIZE &&
440  initBlockSize == ALLOCSET_SMALL_INITSIZE)
441  freeListIndex = 1;
442  else
443  freeListIndex = -1;
444 
445  /*
446  * If a suitable freelist entry exists, just recycle that context.
447  */
448  if (freeListIndex >= 0)
449  {
450  AllocSetFreeList *freelist = &context_freelists[freeListIndex];
451 
452  if (freelist->first_free != NULL)
453  {
454  /* Remove entry from freelist */
455  set = freelist->first_free;
456  freelist->first_free = (AllocSet) set->header.nextchild;
457  freelist->num_free--;
458 
459  /* Update its maxBlockSize; everything else should be OK */
460  set->maxBlockSize = maxBlockSize;
461 
462  /* Reinitialize its header, installing correct name and parent */
465  set->headerSize,
466  sizeof(AllocSetContext),
467  &AllocSetMethods,
468  parent,
469  name,
470  flags);
471 
472  return (MemoryContext) set;
473  }
474  }
475 
476  /* Size of the memory context header, including name storage if needed */
477  if (flags & MEMCONTEXT_COPY_NAME)
478  headerSize = MAXALIGN(sizeof(AllocSetContext) + strlen(name) + 1);
479  else
480  headerSize = MAXALIGN(sizeof(AllocSetContext));
481 
482  /* Determine size of initial block */
483  firstBlockSize = headerSize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
484  if (minContextSize != 0)
485  firstBlockSize = Max(firstBlockSize, minContextSize);
486  else
487  firstBlockSize = Max(firstBlockSize, initBlockSize);
488 
489  /*
490  * Allocate the initial block. Unlike other aset.c blocks, it starts with
491  * the context header and its block header follows that.
492  */
493  set = (AllocSet) malloc(firstBlockSize);
494  if (set == NULL)
495  {
496  if (TopMemoryContext)
498  ereport(ERROR,
499  (errcode(ERRCODE_OUT_OF_MEMORY),
500  errmsg("out of memory"),
501  errdetail("Failed while creating memory context \"%s\".",
502  name)));
503  }
504 
505  /*
506  * Avoid writing code that can fail between here and MemoryContextCreate;
507  * we'd leak the header/initial block if we ereport in this stretch.
508  */
509 
510  /* Fill in the initial block's block header */
511  block = (AllocBlock) (((char *) set) + headerSize);
512  block->aset = set;
513  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
514  block->endptr = ((char *) set) + firstBlockSize;
515  block->prev = NULL;
516  block->next = NULL;
517 
518  /* Mark unallocated space NOACCESS; leave the block header alone. */
519  VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
520 
521  /* Remember block as part of block list */
522  set->blocks = block;
523  /* Mark block as not to be released at reset time */
524  set->keeper = block;
525 
526  /* Finish filling in aset-specific parts of the context header */
527  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
528 
529  set->initBlockSize = initBlockSize;
530  set->maxBlockSize = maxBlockSize;
531  set->nextBlockSize = initBlockSize;
532  set->headerSize = headerSize;
533  set->freeListIndex = freeListIndex;
534 
535  /*
536  * Compute the allocation chunk size limit for this context. It can't be
537  * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
538  * If maxBlockSize is small then requests exceeding the maxBlockSize, or
539  * even a significant fraction of it, should be treated as large chunks
540  * too. For the typical case of maxBlockSize a power of 2, the chunk size
541  * limit will be at most 1/8th maxBlockSize, so that given a stream of
542  * requests that are all the maximum chunk size we will waste at most
543  * 1/8th of the allocated space.
544  *
545  * We have to have allocChunkLimit a power of two, because the requested
546  * and actually-allocated sizes of any chunk must be on the same side of
547  * the limit, else we get confused about whether the chunk is "big".
548  *
549  * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
550  */
552  "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
553 
554  set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
555  while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
556  (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
557  set->allocChunkLimit >>= 1;
558 
559  /* Finally, do the type-independent part of context creation */
562  headerSize,
563  sizeof(AllocSetContext),
564  &AllocSetMethods,
565  parent,
566  name,
567  flags);
568 
569  return (MemoryContext) set;
570 }
571 
572 /*
573  * AllocSetReset
574  * Frees all memory which is allocated in the given set.
575  *
576  * Actually, this routine has some discretion about what to do.
577  * It should mark all allocated chunks freed, but it need not necessarily
578  * give back all the resources the set owns. Our actual implementation is
579  * that we give back all but the "keeper" block (which we must keep, since
580  * it shares a malloc chunk with the context header). In this way, we don't
581  * thrash malloc() when a context is repeatedly reset after small allocations,
582  * which is typical behavior for per-tuple contexts.
583  */
584 static void
586 {
587  AllocSet set = (AllocSet) context;
588  AllocBlock block;
589 
591 
592 #ifdef MEMORY_CONTEXT_CHECKING
593  /* Check for corruption and leaks before freeing */
594  AllocSetCheck(context);
595 #endif
596 
597  /* Clear chunk freelists */
598  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
599 
600  block = set->blocks;
601 
602  /* New blocks list will be just the keeper block */
603  set->blocks = set->keeper;
604 
605  while (block != NULL)
606  {
607  AllocBlock next = block->next;
608 
609  if (block == set->keeper)
610  {
611  /* Reset the block, but don't return it to malloc */
612  char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
613 
614 #ifdef CLOBBER_FREED_MEMORY
615  wipe_mem(datastart, block->freeptr - datastart);
616 #else
617  /* wipe_mem() would have done this */
618  VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
619 #endif
620  block->freeptr = datastart;
621  block->prev = NULL;
622  block->next = NULL;
623  }
624  else
625  {
626  /* Normal case, release the block */
627 #ifdef CLOBBER_FREED_MEMORY
628  wipe_mem(block, block->freeptr - ((char *) block));
629 #endif
630  free(block);
631  }
632  block = next;
633  }
634 
635  /* Reset block size allocation sequence, too */
636  set->nextBlockSize = set->initBlockSize;
637 }
638 
639 /*
640  * AllocSetDelete
641  * Frees all memory which is allocated in the given set,
642  * in preparation for deletion of the set.
643  *
644  * Unlike AllocSetReset, this *must* free all resources of the set.
645  */
646 static void
648 {
649  AllocSet set = (AllocSet) context;
650  AllocBlock block = set->blocks;
651 
653 
654 #ifdef MEMORY_CONTEXT_CHECKING
655  /* Check for corruption and leaks before freeing */
656  AllocSetCheck(context);
657 #endif
658 
659  /*
660  * If the context is a candidate for a freelist, put it into that freelist
661  * instead of destroying it.
662  */
663  if (set->freeListIndex >= 0)
664  {
665  AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
666 
667  /*
668  * Reset the context, if it needs it, so that we aren't hanging on to
669  * more than the initial malloc chunk.
670  */
671  if (!context->isReset)
672  MemoryContextResetOnly(context);
673 
674  /*
675  * If the freelist is full, just discard what's already in it. See
676  * comments with context_freelists[].
677  */
678  if (freelist->num_free >= MAX_FREE_CONTEXTS)
679  {
680  while (freelist->first_free != NULL)
681  {
682  AllocSetContext *oldset = freelist->first_free;
683 
684  freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
685  freelist->num_free--;
686 
687  /* All that remains is to free the header/initial block */
688  free(oldset);
689  }
690  Assert(freelist->num_free == 0);
691  }
692 
693  /* Now add the just-deleted context to the freelist. */
694  set->header.nextchild = (MemoryContext) freelist->first_free;
695  freelist->first_free = set;
696  freelist->num_free++;
697 
698  return;
699  }
700 
701  /* Free all blocks, except the keeper which is part of context header */
702  while (block != NULL)
703  {
704  AllocBlock next = block->next;
705 
706 #ifdef CLOBBER_FREED_MEMORY
707  wipe_mem(block, block->freeptr - ((char *) block));
708 #endif
709 
710  if (block != set->keeper)
711  free(block);
712 
713  block = next;
714  }
715 
716  /* Finally, free the context header, including the keeper block */
717  free(set);
718 }
719 
720 /*
721  * AllocSetAlloc
722  * Returns pointer to allocated memory of given size or NULL if
723  * request could not be completed; memory is added to the set.
724  *
725  * No request may exceed:
726  * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
727  * All callers use a much-lower limit.
728  *
729  * Note: when using valgrind, it doesn't matter how the returned allocation
730  * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
731  * return space that is marked NOACCESS - AllocSetRealloc has to beware!
732  */
733 static void *
735 {
736  AllocSet set = (AllocSet) context;
737  AllocBlock block;
738  AllocChunk chunk;
739  int fidx;
740  Size chunk_size;
741  Size blksize;
742 
744 
745  /*
746  * If requested size exceeds maximum for chunks, allocate an entire block
747  * for this request.
748  */
749  if (size > set->allocChunkLimit)
750  {
751  chunk_size = MAXALIGN(size);
752  blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
753  block = (AllocBlock) malloc(blksize);
754  if (block == NULL)
755  return NULL;
756  block->aset = set;
757  block->freeptr = block->endptr = ((char *) block) + blksize;
758 
759  chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
760  chunk->aset = set;
761  chunk->size = chunk_size;
762 #ifdef MEMORY_CONTEXT_CHECKING
763  chunk->requested_size = size;
764  /* set mark to catch clobber of "unused" space */
765  if (size < chunk_size)
766  set_sentinel(AllocChunkGetPointer(chunk), size);
767 #endif
768 #ifdef RANDOMIZE_ALLOCATED_MEMORY
769  /* fill the allocated space with junk */
770  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
771 #endif
772 
773  /*
774  * Stick the new block underneath the active allocation block, if any,
775  * so that we don't lose the use of the space remaining therein.
776  */
777  if (set->blocks != NULL)
778  {
779  block->prev = set->blocks;
780  block->next = set->blocks->next;
781  if (block->next)
782  block->next->prev = block;
783  set->blocks->next = block;
784  }
785  else
786  {
787  block->prev = NULL;
788  block->next = NULL;
789  set->blocks = block;
790  }
791 
792  AllocAllocInfo(set, chunk);
793 
794  /* Ensure any padding bytes are marked NOACCESS. */
795  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
796  chunk_size - size);
797 
798  /* Disallow external access to private part of chunk header. */
800 
801  return AllocChunkGetPointer(chunk);
802  }
803 
804  /*
805  * Request is small enough to be treated as a chunk. Look in the
806  * corresponding free list to see if there is a free chunk we could reuse.
807  * If one is found, remove it from the free list, make it again a member
808  * of the alloc set and return its data address.
809  */
810  fidx = AllocSetFreeIndex(size);
811  chunk = set->freelist[fidx];
812  if (chunk != NULL)
813  {
814  Assert(chunk->size >= size);
815 
816  set->freelist[fidx] = (AllocChunk) chunk->aset;
817 
818  chunk->aset = (void *) set;
819 
820 #ifdef MEMORY_CONTEXT_CHECKING
821  chunk->requested_size = size;
822  /* set mark to catch clobber of "unused" space */
823  if (size < chunk->size)
824  set_sentinel(AllocChunkGetPointer(chunk), size);
825 #endif
826 #ifdef RANDOMIZE_ALLOCATED_MEMORY
827  /* fill the allocated space with junk */
828  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
829 #endif
830 
831  AllocAllocInfo(set, chunk);
832 
833  /* Ensure any padding bytes are marked NOACCESS. */
834  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
835  chunk->size - size);
836 
837  /* Disallow external access to private part of chunk header. */
839 
840  return AllocChunkGetPointer(chunk);
841  }
842 
843  /*
844  * Choose the actual chunk size to allocate.
845  */
846  chunk_size = (1 << ALLOC_MINBITS) << fidx;
847  Assert(chunk_size >= size);
848 
849  /*
850  * If there is enough room in the active allocation block, we will put the
851  * chunk into that block. Else must start a new one.
852  */
853  if ((block = set->blocks) != NULL)
854  {
855  Size availspace = block->endptr - block->freeptr;
856 
857  if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
858  {
859  /*
860  * The existing active (top) block does not have enough room for
861  * the requested allocation, but it might still have a useful
862  * amount of space in it. Once we push it down in the block list,
863  * we'll never try to allocate more space from it. So, before we
864  * do that, carve up its free space into chunks that we can put on
865  * the set's freelists.
866  *
867  * Because we can only get here when there's less than
868  * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
869  * more than ALLOCSET_NUM_FREELISTS-1 times.
870  */
871  while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
872  {
873  Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
874  int a_fidx = AllocSetFreeIndex(availchunk);
875 
876  /*
877  * In most cases, we'll get back the index of the next larger
878  * freelist than the one we need to put this chunk on. The
879  * exception is when availchunk is exactly a power of 2.
880  */
881  if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
882  {
883  a_fidx--;
884  Assert(a_fidx >= 0);
885  availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
886  }
887 
888  chunk = (AllocChunk) (block->freeptr);
889 
890  /* Prepare to initialize the chunk header. */
891  VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
892 
893  block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
894  availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
895 
896  chunk->size = availchunk;
897 #ifdef MEMORY_CONTEXT_CHECKING
898  chunk->requested_size = 0; /* mark it free */
899 #endif
900  chunk->aset = (void *) set->freelist[a_fidx];
901  set->freelist[a_fidx] = chunk;
902  }
903 
904  /* Mark that we need to create a new block */
905  block = NULL;
906  }
907  }
908 
909  /*
910  * Time to create a new regular (multi-chunk) block?
911  */
912  if (block == NULL)
913  {
914  Size required_size;
915 
916  /*
917  * The first such block has size initBlockSize, and we double the
918  * space in each succeeding block, but not more than maxBlockSize.
919  */
920  blksize = set->nextBlockSize;
921  set->nextBlockSize <<= 1;
922  if (set->nextBlockSize > set->maxBlockSize)
923  set->nextBlockSize = set->maxBlockSize;
924 
925  /*
926  * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
927  * space... but try to keep it a power of 2.
928  */
929  required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
930  while (blksize < required_size)
931  blksize <<= 1;
932 
933  /* Try to allocate it */
934  block = (AllocBlock) malloc(blksize);
935 
936  /*
937  * We could be asking for pretty big blocks here, so cope if malloc
938  * fails. But give up if there's less than a meg or so available...
939  */
940  while (block == NULL && blksize > 1024 * 1024)
941  {
942  blksize >>= 1;
943  if (blksize < required_size)
944  break;
945  block = (AllocBlock) malloc(blksize);
946  }
947 
948  if (block == NULL)
949  return NULL;
950 
951  block->aset = set;
952  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
953  block->endptr = ((char *) block) + blksize;
954 
955  /* Mark unallocated space NOACCESS. */
957  blksize - ALLOC_BLOCKHDRSZ);
958 
959  block->prev = NULL;
960  block->next = set->blocks;
961  if (block->next)
962  block->next->prev = block;
963  set->blocks = block;
964  }
965 
966  /*
967  * OK, do the allocation
968  */
969  chunk = (AllocChunk) (block->freeptr);
970 
971  /* Prepare to initialize the chunk header. */
973 
974  block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
975  Assert(block->freeptr <= block->endptr);
976 
977  chunk->aset = (void *) set;
978  chunk->size = chunk_size;
979 #ifdef MEMORY_CONTEXT_CHECKING
980  chunk->requested_size = size;
981  /* set mark to catch clobber of "unused" space */
982  if (size < chunk->size)
983  set_sentinel(AllocChunkGetPointer(chunk), size);
984 #endif
985 #ifdef RANDOMIZE_ALLOCATED_MEMORY
986  /* fill the allocated space with junk */
987  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
988 #endif
989 
990  AllocAllocInfo(set, chunk);
991 
992  /* Ensure any padding bytes are marked NOACCESS. */
993  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
994  chunk_size - size);
995 
996  /* Disallow external access to private part of chunk header. */
998 
999  return AllocChunkGetPointer(chunk);
1000 }
1001 
1002 /*
1003  * AllocSetFree
1004  * Frees allocated memory; memory is removed from the set.
1005  */
1006 static void
1007 AllocSetFree(MemoryContext context, void *pointer)
1008 {
1009  AllocSet set = (AllocSet) context;
1010  AllocChunk chunk = AllocPointerGetChunk(pointer);
1011 
1012  /* Allow access to private part of chunk header. */
1014 
1015  AllocFreeInfo(set, chunk);
1016 
1017 #ifdef MEMORY_CONTEXT_CHECKING
1018  /* Test for someone scribbling on unused space in chunk */
1019  if (chunk->requested_size < chunk->size)
1020  if (!sentinel_ok(pointer, chunk->requested_size))
1021  elog(WARNING, "detected write past chunk end in %s %p",
1022  set->header.name, chunk);
1023 #endif
1024 
1025  if (chunk->size > set->allocChunkLimit)
1026  {
1027  /*
1028  * Big chunks are certain to have been allocated as single-chunk
1029  * blocks. Just unlink that block and return it to malloc().
1030  */
1031  AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
1032 
1033  /*
1034  * Try to verify that we have a sane block pointer: it should
1035  * reference the correct aset, and freeptr and endptr should point
1036  * just past the chunk.
1037  */
1038  if (block->aset != set ||
1039  block->freeptr != block->endptr ||
1040  block->freeptr != ((char *) block) +
1041  (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1042  elog(ERROR, "could not find block containing chunk %p", chunk);
1043 
1044  /* OK, remove block from aset's list and free it */
1045  if (block->prev)
1046  block->prev->next = block->next;
1047  else
1048  set->blocks = block->next;
1049  if (block->next)
1050  block->next->prev = block->prev;
1051 #ifdef CLOBBER_FREED_MEMORY
1052  wipe_mem(block, block->freeptr - ((char *) block));
1053 #endif
1054  free(block);
1055  }
1056  else
1057  {
1058  /* Normal case, put the chunk into appropriate freelist */
1059  int fidx = AllocSetFreeIndex(chunk->size);
1060 
1061  chunk->aset = (void *) set->freelist[fidx];
1062 
1063 #ifdef CLOBBER_FREED_MEMORY
1064  wipe_mem(pointer, chunk->size);
1065 #endif
1066 
1067 #ifdef MEMORY_CONTEXT_CHECKING
1068  /* Reset requested_size to 0 in chunks that are on freelist */
1069  chunk->requested_size = 0;
1070 #endif
1071  set->freelist[fidx] = chunk;
1072  }
1073 }
1074 
1075 /*
1076  * AllocSetRealloc
1077  * Returns new pointer to allocated memory of given size or NULL if
1078  * request could not be completed; this memory is added to the set.
1079  * Memory associated with given pointer is copied into the new memory,
1080  * and the old memory is freed.
1081  *
1082  * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1083  * makes our Valgrind client requests less-precise, hazarding false negatives.
1084  * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1085  * request size.)
1086  */
1087 static void *
1088 AllocSetRealloc(MemoryContext context, void *pointer, Size size)
1089 {
1090  AllocSet set = (AllocSet) context;
1091  AllocChunk chunk = AllocPointerGetChunk(pointer);
1092  Size oldsize;
1093 
1094  /* Allow access to private part of chunk header. */
1096 
1097  oldsize = chunk->size;
1098 
1099 #ifdef MEMORY_CONTEXT_CHECKING
1100  /* Test for someone scribbling on unused space in chunk */
1101  if (chunk->requested_size < oldsize)
1102  if (!sentinel_ok(pointer, chunk->requested_size))
1103  elog(WARNING, "detected write past chunk end in %s %p",
1104  set->header.name, chunk);
1105 #endif
1106 
1107  /*
1108  * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1109  * allocated area already is >= the new size. (In particular, we always
1110  * fall out here if the requested size is a decrease.)
1111  */
1112  if (oldsize >= size)
1113  {
1114 #ifdef MEMORY_CONTEXT_CHECKING
1115  Size oldrequest = chunk->requested_size;
1116 
1117 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1118  /* We can only fill the extra space if we know the prior request */
1119  if (size > oldrequest)
1120  randomize_mem((char *) pointer + oldrequest,
1121  size - oldrequest);
1122 #endif
1123 
1124  chunk->requested_size = size;
1125 
1126  /*
1127  * If this is an increase, mark any newly-available part UNDEFINED.
1128  * Otherwise, mark the obsolete part NOACCESS.
1129  */
1130  if (size > oldrequest)
1131  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1132  size - oldrequest);
1133  else
1134  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1135  oldsize - size);
1136 
1137  /* set mark to catch clobber of "unused" space */
1138  if (size < oldsize)
1139  set_sentinel(pointer, size);
1140 #else /* !MEMORY_CONTEXT_CHECKING */
1141 
1142  /*
1143  * We don't have the information to determine whether we're growing
1144  * the old request or shrinking it, so we conservatively mark the
1145  * entire new allocation DEFINED.
1146  */
1147  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
1148  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1149 #endif
1150 
1151  /* Disallow external access to private part of chunk header. */
1153 
1154  return pointer;
1155  }
1156 
1157  if (oldsize > set->allocChunkLimit)
1158  {
1159  /*
1160  * The chunk must have been allocated as a single-chunk block. Use
1161  * realloc() to make the containing block bigger with minimum space
1162  * wastage.
1163  */
1164  AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
1165  Size chksize;
1166  Size blksize;
1167 
1168  /*
1169  * Try to verify that we have a sane block pointer: it should
1170  * reference the correct aset, and freeptr and endptr should point
1171  * just past the chunk.
1172  */
1173  if (block->aset != set ||
1174  block->freeptr != block->endptr ||
1175  block->freeptr != ((char *) block) +
1176  (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1177  elog(ERROR, "could not find block containing chunk %p", chunk);
1178 
1179  /* Do the realloc */
1180  chksize = MAXALIGN(size);
1181  blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1182  block = (AllocBlock) realloc(block, blksize);
1183  if (block == NULL)
1184  {
1185  /* Disallow external access to private part of chunk header. */
1187  return NULL;
1188  }
1189  block->freeptr = block->endptr = ((char *) block) + blksize;
1190 
1191  /* Update pointers since block has likely been moved */
1192  chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
1193  pointer = AllocChunkGetPointer(chunk);
1194  if (block->prev)
1195  block->prev->next = block;
1196  else
1197  set->blocks = block;
1198  if (block->next)
1199  block->next->prev = block;
1200  chunk->size = chksize;
1201 
1202 #ifdef MEMORY_CONTEXT_CHECKING
1203 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1204  /* We can only fill the extra space if we know the prior request */
1205  randomize_mem((char *) pointer + chunk->requested_size,
1206  size - chunk->requested_size);
1207 #endif
1208 
1209  /*
1210  * realloc() (or randomize_mem()) will have left the newly-allocated
1211  * part UNDEFINED, but we may need to adjust trailing bytes from the
1212  * old allocation.
1213  */
1214  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1215  oldsize - chunk->requested_size);
1216 
1217  chunk->requested_size = size;
1218 
1219  /* set mark to catch clobber of "unused" space */
1220  if (size < chunk->size)
1221  set_sentinel(pointer, size);
1222 #else /* !MEMORY_CONTEXT_CHECKING */
1223 
1224  /*
1225  * We don't know how much of the old chunk size was the actual
1226  * allocation; it could have been as small as one byte. We have to be
1227  * conservative and just mark the entire old portion DEFINED.
1228  */
1229  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1230 #endif
1231 
1232  /* Ensure any padding bytes are marked NOACCESS. */
1233  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1234 
1235  /* Disallow external access to private part of chunk header. */
1237 
1238  return pointer;
1239  }
1240  else
1241  {
1242  /*
1243  * Small-chunk case. We just do this by brute force, ie, allocate a
1244  * new chunk and copy the data. Since we know the existing data isn't
1245  * huge, this won't involve any great memcpy expense, so it's not
1246  * worth being smarter. (At one time we tried to avoid memcpy when it
1247  * was possible to enlarge the chunk in-place, but that turns out to
1248  * misbehave unpleasantly for repeated cycles of
1249  * palloc/repalloc/pfree: the eventually freed chunks go into the
1250  * wrong freelist for the next initial palloc request, and so we leak
1251  * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1252  */
1253  AllocPointer newPointer;
1254 
1255  /* allocate new chunk */
1256  newPointer = AllocSetAlloc((MemoryContext) set, size);
1257 
1258  /* leave immediately if request was not completed */
1259  if (newPointer == NULL)
1260  {
1261  /* Disallow external access to private part of chunk header. */
1263  return NULL;
1264  }
1265 
1266  /*
1267  * AllocSetAlloc() may have returned a region that is still NOACCESS.
1268  * Change it to UNDEFINED for the moment; memcpy() will then transfer
1269  * definedness from the old allocation to the new. If we know the old
1270  * allocation, copy just that much. Otherwise, make the entire old
1271  * chunk defined to avoid errors as we copy the currently-NOACCESS
1272  * trailing bytes.
1273  */
1274  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1275 #ifdef MEMORY_CONTEXT_CHECKING
1276  oldsize = chunk->requested_size;
1277 #else
1278  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1279 #endif
1280 
1281  /* transfer existing data (certain to fit) */
1282  memcpy(newPointer, pointer, oldsize);
1283 
1284  /* free old chunk */
1285  AllocSetFree((MemoryContext) set, pointer);
1286 
1287  return newPointer;
1288  }
1289 }
1290 
1291 /*
1292  * AllocSetGetChunkSpace
1293  * Given a currently-allocated chunk, determine the total space
1294  * it occupies (including all memory-allocation overhead).
1295  */
1296 static Size
1297 AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1298 {
1299  AllocChunk chunk = AllocPointerGetChunk(pointer);
1300  Size result;
1301 
1303  result = chunk->size + ALLOC_CHUNKHDRSZ;
1305  return result;
1306 }
1307 
1308 /*
1309  * AllocSetIsEmpty
1310  * Is an allocset empty of any allocated space?
1311  */
1312 static bool
1314 {
1315  /*
1316  * For now, we say "empty" only if the context is new or just reset. We
1317  * could examine the freelists to determine if all space has been freed,
1318  * but it's not really worth the trouble for present uses of this
1319  * functionality.
1320  */
1321  if (context->isReset)
1322  return true;
1323  return false;
1324 }
1325 
1326 /*
1327  * AllocSetStats
1328  * Compute stats about memory consumption of an allocset.
1329  *
1330  * level: recursion level (0 at top level); used for print indentation.
1331  * print: true to print stats to stderr.
1332  * totals: if not NULL, add stats about this allocset into *totals.
1333  */
1334 static void
1335 AllocSetStats(MemoryContext context, int level, bool print,
1336  MemoryContextCounters *totals)
1337 {
1338  AllocSet set = (AllocSet) context;
1339  Size nblocks = 0;
1340  Size freechunks = 0;
1341  Size totalspace;
1342  Size freespace = 0;
1343  AllocBlock block;
1344  int fidx;
1345 
1346  /* Include context header in totalspace */
1347  totalspace = set->headerSize;
1348 
1349  for (block = set->blocks; block != NULL; block = block->next)
1350  {
1351  nblocks++;
1352  totalspace += block->endptr - ((char *) block);
1353  freespace += block->endptr - block->freeptr;
1354  }
1355  for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1356  {
1357  AllocChunk chunk;
1358 
1359  for (chunk = set->freelist[fidx]; chunk != NULL;
1360  chunk = (AllocChunk) chunk->aset)
1361  {
1362  freechunks++;
1363  freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1364  }
1365  }
1366 
1367  if (print)
1368  {
1369  int i;
1370 
1371  for (i = 0; i < level; i++)
1372  fprintf(stderr, " ");
1373  fprintf(stderr,
1374  "%s: %zu total in %zd blocks; %zu free (%zd chunks); %zu used\n",
1375  set->header.name, totalspace, nblocks, freespace, freechunks,
1376  totalspace - freespace);
1377  }
1378 
1379  if (totals)
1380  {
1381  totals->nblocks += nblocks;
1382  totals->freechunks += freechunks;
1383  totals->totalspace += totalspace;
1384  totals->freespace += freespace;
1385  }
1386 }
1387 
1388 
1389 #ifdef MEMORY_CONTEXT_CHECKING
1390 
1391 /*
1392  * AllocSetCheck
1393  * Walk through chunks and check consistency of memory.
1394  *
1395  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1396  * find yourself in an infinite loop when trouble occurs, because this
1397  * routine will be entered again when elog cleanup tries to release memory!
1398  */
1399 static void
1400 AllocSetCheck(MemoryContext context)
1401 {
1402  AllocSet set = (AllocSet) context;
1403  const char *name = set->header.name;
1404  AllocBlock prevblock;
1405  AllocBlock block;
1406 
1407  for (prevblock = NULL, block = set->blocks;
1408  block != NULL;
1409  prevblock = block, block = block->next)
1410  {
1411  char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1412  long blk_used = block->freeptr - bpoz;
1413  long blk_data = 0;
1414  long nchunks = 0;
1415 
1416  /*
1417  * Empty block - empty can be keeper-block only
1418  */
1419  if (!blk_used)
1420  {
1421  if (set->keeper != block)
1422  elog(WARNING, "problem in alloc set %s: empty block %p",
1423  name, block);
1424  }
1425 
1426  /*
1427  * Check block header fields
1428  */
1429  if (block->aset != set ||
1430  block->prev != prevblock ||
1431  block->freeptr < bpoz ||
1432  block->freeptr > block->endptr)
1433  elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1434  name, block);
1435 
1436  /*
1437  * Chunk walker
1438  */
1439  while (bpoz < block->freeptr)
1440  {
1441  AllocChunk chunk = (AllocChunk) bpoz;
1442  Size chsize,
1443  dsize;
1444 
1445  /* Allow access to private part of chunk header. */
1447 
1448  chsize = chunk->size; /* aligned chunk size */
1449  dsize = chunk->requested_size; /* real data */
1450 
1451  /*
1452  * Check chunk size
1453  */
1454  if (dsize > chsize)
1455  elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1456  name, chunk, block);
1457  if (chsize < (1 << ALLOC_MINBITS))
1458  elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1459  name, chsize, chunk, block);
1460 
1461  /* single-chunk block? */
1462  if (chsize > set->allocChunkLimit &&
1463  chsize + ALLOC_CHUNKHDRSZ != blk_used)
1464  elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1465  name, chunk, block);
1466 
1467  /*
1468  * If chunk is allocated, check for correct aset pointer. (If it's
1469  * free, the aset is the freelist pointer, which we can't check as
1470  * easily...) Note this is an incomplete test, since palloc(0)
1471  * produces an allocated chunk with requested_size == 0.
1472  */
1473  if (dsize > 0 && chunk->aset != (void *) set)
1474  elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1475  name, block, chunk);
1476 
1477  /*
1478  * Check for overwrite of padding space in an allocated chunk.
1479  */
1480  if (chunk->aset == (void *) set && dsize < chsize &&
1481  !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1482  elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1483  name, block, chunk);
1484 
1485  /*
1486  * If chunk is allocated, disallow external access to private part
1487  * of chunk header.
1488  */
1489  if (chunk->aset == (void *) set)
1491 
1492  blk_data += chsize;
1493  nchunks++;
1494 
1495  bpoz += ALLOC_CHUNKHDRSZ + chsize;
1496  }
1497 
1498  if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1499  elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1500  name, block);
1501  }
1502 }
1503 
1504 #endif /* MEMORY_CONTEXT_CHECKING */
#define MemSetAligned(start, val, len)
Definition: c.h:910
Size initBlockSize
Definition: aset.c:130
#define AllocFreeInfo(_cxt, _chunk)
Definition: aset.c:327
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
AllocBlock blocks
Definition: aset.c:127
static int32 next
Definition: blutils.c:210
void print(const void *obj)
Definition: print.c:35
struct AllocBlockData AllocBlockData
static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer)
Definition: aset.c:1297
MemoryContextData header
Definition: aset.c:125
void * AllocPointer
Definition: aset.c:112
#define MEMCONTEXT_COPY_NAME
Definition: memutils.h:188
#define AllocSetIsValid(set)
Definition: aset.c:217
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
struct AllocSetFreeList AllocSetFreeList
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
static int AllocSetFreeIndex(Size size)
Definition: aset.c:340
static void AllocSetReset(MemoryContext context)
Definition: aset.c:585
int num_free
Definition: aset.c:254
#define AllocChunkGetPointer(chk)
Definition: aset.c:221
#define ALLOCCHUNK_PRIVATE_LEN
Definition: aset.c:205
#define LT16(n)
Definition: aset.c:306
int errcode(int sqlerrcode)
Definition: elog.c:575
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:264
#define ALLOC_BLOCKHDRSZ
Definition: aset.c:102
AllocBlock keeper
Definition: aset.c:135
AllocSet aset
Definition: aset.c:156
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:153
char * freeptr
Definition: aset.c:159
#define ALLOCSET_DEFAULT_MINSIZE
Definition: memutils.h:194
#define ALLOCSET_SMALL_MINSIZE
Definition: memutils.h:204
AllocSetContext * first_free
Definition: aset.c:255
#define malloc(a)
Definition: header.h:50
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:764
static AllocSetFreeList context_freelists[2]
Definition: aset.c:259
#define ERROR
Definition: elog.h:43
#define ALLOC_CHUNK_LIMIT
Definition: aset.c:83
MemoryContext AllocSetContextCreateExtended(MemoryContext parent, const char *name, int flags, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:394
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:437
void MemoryContextCreate(MemoryContext node, NodeTag tag, Size size, Size nameoffset, const MemoryContextMethods *methods, MemoryContext parent, const char *name, int flags)
Definition: mcxt.c:627
char * endptr
Definition: aset.c:160
#define ALLOCSET_NUM_FREELISTS
Definition: aset.c:82
static void * AllocSetRealloc(MemoryContext context, void *pointer, Size size)
Definition: aset.c:1088
#define ALLOCSET_SEPARATE_THRESHOLD
Definition: memutils.h:224
int errdetail(const char *fmt,...)
Definition: elog.c:873
#define ALLOC_CHUNKHDRSZ
Definition: aset.c:103
AllocBlock next
Definition: aset.c:158
#define ereport(elevel, rest)
Definition: elog.h:122
#define AssertArg(condition)
Definition: c.h:682
MemoryContext TopMemoryContext
Definition: mcxt.c:43
struct MemoryContextData * MemoryContext
Definition: palloc.h:36
static bool AllocSetIsEmpty(MemoryContext context)
Definition: aset.c:1313
#define WARNING
Definition: elog.h:40
#define ALLOCSET_SMALL_INITSIZE
Definition: memutils.h:205
int freeListIndex
Definition: aset.c:137
static const unsigned char LogTable256[256]
Definition: aset.c:308
Size headerSize
Definition: aset.c:133
#define ALLOCCHUNK_RAWSIZE
Definition: aset.c:186
#define ALLOC_CHUNK_FRACTION
Definition: aset.c:85
struct AllocBlockData * AllocBlock
Definition: aset.c:105
#define MAX_FREE_CONTEXTS
Definition: aset.c:250
AllocChunk freelist[ALLOCSET_NUM_FREELISTS]
Definition: aset.c:128
Size nextBlockSize
Definition: aset.c:132
AllocBlock prev
Definition: aset.c:157
#define free(a)
Definition: header.h:65
Size allocChunkLimit
Definition: aset.c:134
#define Max(x, y)
Definition: c.h:820
struct AllocChunkData * AllocChunk
Definition: aset.c:106
#define Assert(condition)
Definition: c.h:680
static void AllocSetFree(MemoryContext context, void *pointer)
Definition: aset.c:1007
size_t Size
Definition: c.h:414
#define MAXALIGN(LEN)
Definition: c.h:633
static void AllocSetStats(MemoryContext context, int level, bool print, MemoryContextCounters *totals)
Definition: aset.c:1335
struct AllocChunkData AllocChunkData
static void AllocSetDelete(MemoryContext context)
Definition: aset.c:647
#define realloc(a, b)
Definition: header.h:60
const char * name
Definition: encode.c:521
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:46
void * aset
Definition: aset.c:195
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define ALLOCSET_DEFAULT_INITSIZE
Definition: memutils.h:195
int i
static void * AllocSetAlloc(MemoryContext context, Size size)
Definition: aset.c:734
static const MemoryContextMethods AllocSetMethods
Definition: aset.c:289
AllocSetContext * AllocSet
Definition: aset.c:140
struct AllocSetContext AllocSetContext
#define ALLOC_MINBITS
Definition: aset.c:81
#define elog
Definition: elog.h:219
Size maxBlockSize
Definition: aset.c:131
#define AllocAllocInfo(_cxt, _chunk)
Definition: aset.c:328
#define offsetof(type, field)
Definition: c.h:603
MemoryContext nextchild
Definition: memnodes.h:82
Size size
Definition: aset.c:178
#define AllocPointerGetChunk(ptr)
Definition: aset.c:219