PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
aset.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * aset.c
4  * Allocation set definitions.
5  *
6  * AllocSet is our standard implementation of the abstract MemoryContext
7  * type.
8  *
9  *
10  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
11  * Portions Copyright (c) 1994, Regents of the University of California
12  *
13  * IDENTIFICATION
14  * src/backend/utils/mmgr/aset.c
15  *
16  * NOTE:
17  * This is a new (Feb. 05, 1999) implementation of the allocation set
18  * routines. AllocSet...() does not use OrderedSet...() any more.
19  * Instead it manages allocations in a block pool by itself, combining
20  * many small allocations in a few bigger blocks. AllocSetFree() normally
21  * doesn't free() memory really. It just add's the free'd area to some
22  * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23  * at once on AllocSetReset(), which happens when the memory context gets
24  * destroyed.
25  * Jan Wieck
26  *
27  * Performance improvement from Tom Lane, 8/99: for extremely large request
28  * sizes, we do want to be able to give the memory back to free() as soon
29  * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30  * freelist entries that might never be usable. This is specially needed
31  * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32  * the previous instances of the block were guaranteed to be wasted until
33  * AllocSetReset() under the old way.
34  *
35  * Further improvement 12/00: as the code stood, request sizes in the
36  * midrange between "small" and "large" were handled very inefficiently,
37  * because any sufficiently large free chunk would be used to satisfy a
38  * request, even if it was much larger than necessary. This led to more
39  * and more wasted space in allocated chunks over time. To fix, get rid
40  * of the midrange behavior: we now handle only "small" power-of-2-size
41  * chunks as chunks. Anything "large" is passed off to malloc(). Change
42  * the number of freelists to change the small/large boundary.
43  *
44  *-------------------------------------------------------------------------
45  */
46 
47 #include "postgres.h"
48 
49 #include "utils/memdebug.h"
50 #include "utils/memutils.h"
51 
52 /* Define this to detail debug alloc information */
53 /* #define HAVE_ALLOCINFO */
54 
55 /*--------------------
56  * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57  * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58  *
59  * Note that all chunks in the freelists have power-of-2 sizes. This
60  * improves recyclability: we may waste some space, but the wasted space
61  * should stay pretty constant as requests are made and released.
62  *
63  * A request too large for the last freelist is handled by allocating a
64  * dedicated block from malloc(). The block still has a block header and
65  * chunk header, but when the chunk is freed we'll return the whole block
66  * to malloc(), not put it on our freelists.
67  *
68  * CAUTION: ALLOC_MINBITS must be large enough so that
69  * 1<<ALLOC_MINBITS is at least MAXALIGN,
70  * or we may fail to align the smallest chunks adequately.
71  * 8-byte alignment is enough on all currently known machines.
72  *
73  * With the current parameters, request sizes up to 8K are treated as chunks,
74  * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
75  * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
76  * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
77  * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
78  *--------------------
79  */
80 
81 #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
82 #define ALLOCSET_NUM_FREELISTS 11
83 #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
84 /* Size of largest chunk that we use a fixed size for */
85 #define ALLOC_CHUNK_FRACTION 4
86 /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
87 
88 /*--------------------
89  * The first block allocated for an allocset has size initBlockSize.
90  * Each time we have to allocate another block, we double the block size
91  * (if possible, and without exceeding maxBlockSize), so as to reduce
92  * the bookkeeping load on malloc().
93  *
94  * Blocks allocated to hold oversize chunks do not follow this rule, however;
95  * they are just however big they need to be to hold that single chunk.
96  *--------------------
97  */
98 
99 #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
100 #define ALLOC_CHUNKHDRSZ sizeof(struct AllocChunkData)
101 
102 typedef struct AllocBlockData *AllocBlock; /* forward reference */
103 typedef struct AllocChunkData *AllocChunk;
104 
105 /*
106  * AllocPointer
107  * Aligned pointer which may be a member of an allocation set.
108  */
109 typedef void *AllocPointer;
110 
111 /*
112  * AllocSetContext is our standard implementation of MemoryContext.
113  *
114  * Note: header.isReset means there is nothing for AllocSetReset to do.
115  * This is different from the aset being physically empty (empty blocks list)
116  * because we may still have a keeper block. It's also different from the set
117  * being logically empty, because we don't attempt to detect pfree'ing the
118  * last active chunk.
119  */
120 typedef struct AllocSetContext
121 {
122  MemoryContextData header; /* Standard memory-context fields */
123  /* Info about storage allocated in this context: */
124  AllocBlock blocks; /* head of list of blocks in this set */
125  AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
126  /* Allocation parameters for this context: */
127  Size initBlockSize; /* initial block size */
128  Size maxBlockSize; /* maximum block size */
129  Size nextBlockSize; /* next block size to allocate */
130  Size allocChunkLimit; /* effective chunk size limit */
131  AllocBlock keeper; /* if not NULL, keep this block over resets */
133 
135 
136 /*
137  * AllocBlock
138  * An AllocBlock is the unit of memory that is obtained by aset.c
139  * from malloc(). It contains one or more AllocChunks, which are
140  * the units requested by palloc() and freed by pfree(). AllocChunks
141  * cannot be returned to malloc() individually, instead they are put
142  * on freelists by pfree() and re-used by the next palloc() that has
143  * a matching request size.
144  *
145  * AllocBlockData is the header data for a block --- the usable space
146  * within the block begins at the next alignment boundary.
147  */
148 typedef struct AllocBlockData
149 {
150  AllocSet aset; /* aset that owns this block */
151  AllocBlock prev; /* prev block in aset's blocks list, if any */
152  AllocBlock next; /* next block in aset's blocks list, if any */
153  char *freeptr; /* start of free space in this block */
154  char *endptr; /* end of space in this block */
156 
157 /*
158  * AllocChunk
159  * The prefix of each piece of memory in an AllocBlock
160  */
161 typedef struct AllocChunkData
162 {
163  /* size is always the size of the usable space in the chunk */
165 #ifdef MEMORY_CONTEXT_CHECKING
166  /* when debugging memory usage, also store actual requested size */
167  /* this is zero in a free chunk */
168  Size requested_size;
169 #if MAXIMUM_ALIGNOF > 4 && SIZEOF_VOID_P == 4
170  Size padding;
171 #endif
172 
173 #endif /* MEMORY_CONTEXT_CHECKING */
174 
175  /* aset is the owning aset if allocated, or the freelist link if free */
176  void *aset;
177 
178  /* there must not be any padding to reach a MAXALIGN boundary here! */
180 
181 /*
182  * AllocPointerIsValid
183  * True iff pointer is valid allocation pointer.
184  */
185 #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
186 
187 /*
188  * AllocSetIsValid
189  * True iff set is valid allocation set.
190  */
191 #define AllocSetIsValid(set) PointerIsValid(set)
192 
193 #define AllocPointerGetChunk(ptr) \
194  ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
195 #define AllocChunkGetPointer(chk) \
196  ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
197 
198 /*
199  * These functions implement the MemoryContext API for AllocSet contexts.
200  */
201 static void *AllocSetAlloc(MemoryContext context, Size size);
202 static void AllocSetFree(MemoryContext context, void *pointer);
203 static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
204 static void AllocSetInit(MemoryContext context);
205 static void AllocSetReset(MemoryContext context);
206 static void AllocSetDelete(MemoryContext context);
207 static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
208 static bool AllocSetIsEmpty(MemoryContext context);
209 static void AllocSetStats(MemoryContext context, int level, bool print,
210  MemoryContextCounters *totals);
211 
212 #ifdef MEMORY_CONTEXT_CHECKING
213 static void AllocSetCheck(MemoryContext context);
214 #endif
215 
216 /*
217  * This is the virtual function table for AllocSet contexts.
218  */
221  AllocSetFree,
223  AllocSetInit,
229 #ifdef MEMORY_CONTEXT_CHECKING
230  ,AllocSetCheck
231 #endif
232 };
233 
234 /*
235  * Table for AllocSetFreeIndex
236  */
237 #define LT16(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
238 
239 static const unsigned char LogTable256[256] =
240 {
241  0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
242  LT16(5), LT16(6), LT16(6), LT16(7), LT16(7), LT16(7), LT16(7),
243  LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8)
244 };
245 
246 /* ----------
247  * Debug macros
248  * ----------
249  */
250 #ifdef HAVE_ALLOCINFO
251 #define AllocFreeInfo(_cxt, _chunk) \
252  fprintf(stderr, "AllocFree: %s: %p, %zu\n", \
253  (_cxt)->header.name, (_chunk), (_chunk)->size)
254 #define AllocAllocInfo(_cxt, _chunk) \
255  fprintf(stderr, "AllocAlloc: %s: %p, %zu\n", \
256  (_cxt)->header.name, (_chunk), (_chunk)->size)
257 #else
258 #define AllocFreeInfo(_cxt, _chunk)
259 #define AllocAllocInfo(_cxt, _chunk)
260 #endif
261 
262 /* ----------
263  * AllocSetFreeIndex -
264  *
265  * Depending on the size of an allocation compute which freechunk
266  * list of the alloc set it belongs to. Caller must have verified
267  * that size <= ALLOC_CHUNK_LIMIT.
268  * ----------
269  */
270 static inline int
272 {
273  int idx;
274  unsigned int t,
275  tsize;
276 
277  if (size > (1 << ALLOC_MINBITS))
278  {
279  tsize = (size - 1) >> ALLOC_MINBITS;
280 
281  /*
282  * At this point we need to obtain log2(tsize)+1, ie, the number of
283  * not-all-zero bits at the right. We used to do this with a
284  * shift-and-count loop, but this function is enough of a hotspot to
285  * justify micro-optimization effort. The best approach seems to be
286  * to use a lookup table. Note that this code assumes that
287  * ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes of
288  * the tsize value.
289  */
290  t = tsize >> 8;
291  idx = t ? LogTable256[t] + 8 : LogTable256[tsize];
292 
294  }
295  else
296  idx = 0;
297 
298  return idx;
299 }
300 
301 
302 /*
303  * Public routines
304  */
305 
306 
307 /*
308  * AllocSetContextCreate
309  * Create a new AllocSet context.
310  *
311  * parent: parent context, or NULL if top-level context
312  * name: name of context (for debugging only, need not be unique)
313  * minContextSize: minimum context size
314  * initBlockSize: initial allocation block size
315  * maxBlockSize: maximum allocation block size
316  *
317  * Notes: the name string will be copied into context-lifespan storage.
318  * Most callers should abstract the context size parameters using a macro
319  * such as ALLOCSET_DEFAULT_SIZES.
320  */
323  const char *name,
324  Size minContextSize,
325  Size initBlockSize,
326  Size maxBlockSize)
327 {
328  AllocSet set;
329 
331  MAXALIGN(sizeof(AllocChunkData)),
332  "padding calculation in AllocChunkData is wrong");
333 
334  /*
335  * First, validate allocation parameters. (If we're going to throw an
336  * error, we should do so before the context is created, not after.) We
337  * somewhat arbitrarily enforce a minimum 1K block size.
338  */
339  if (initBlockSize != MAXALIGN(initBlockSize) ||
340  initBlockSize < 1024)
341  elog(ERROR, "invalid initBlockSize for memory context: %zu",
342  initBlockSize);
343  if (maxBlockSize != MAXALIGN(maxBlockSize) ||
344  maxBlockSize < initBlockSize ||
345  !AllocHugeSizeIsValid(maxBlockSize)) /* must be safe to double */
346  elog(ERROR, "invalid maxBlockSize for memory context: %zu",
347  maxBlockSize);
348  if (minContextSize != 0 &&
349  (minContextSize != MAXALIGN(minContextSize) ||
350  minContextSize <= ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
351  elog(ERROR, "invalid minContextSize for memory context: %zu",
352  minContextSize);
353 
354  /* Do the type-independent part of context creation */
356  sizeof(AllocSetContext),
358  parent,
359  name);
360 
361  /* Save allocation parameters */
362  set->initBlockSize = initBlockSize;
363  set->maxBlockSize = maxBlockSize;
364  set->nextBlockSize = initBlockSize;
365 
366  /*
367  * Compute the allocation chunk size limit for this context. It can't be
368  * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
369  * If maxBlockSize is small then requests exceeding the maxBlockSize, or
370  * even a significant fraction of it, should be treated as large chunks
371  * too. For the typical case of maxBlockSize a power of 2, the chunk size
372  * limit will be at most 1/8th maxBlockSize, so that given a stream of
373  * requests that are all the maximum chunk size we will waste at most
374  * 1/8th of the allocated space.
375  *
376  * We have to have allocChunkLimit a power of two, because the requested
377  * and actually-allocated sizes of any chunk must be on the same side of
378  * the limit, else we get confused about whether the chunk is "big".
379  *
380  * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
381  */
383  "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
384 
386  while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
387  (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
388  set->allocChunkLimit >>= 1;
389 
390  /*
391  * Grab always-allocated space, if requested
392  */
393  if (minContextSize > 0)
394  {
395  Size blksize = minContextSize;
396  AllocBlock block;
397 
398  block = (AllocBlock) malloc(blksize);
399  if (block == NULL)
400  {
402  ereport(ERROR,
403  (errcode(ERRCODE_OUT_OF_MEMORY),
404  errmsg("out of memory"),
405  errdetail("Failed while creating memory context \"%s\".",
406  name)));
407  }
408  block->aset = set;
409  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
410  block->endptr = ((char *) block) + blksize;
411  block->prev = NULL;
412  block->next = set->blocks;
413  if (block->next)
414  block->next->prev = block;
415  set->blocks = block;
416  /* Mark block as not to be released at reset time */
417  set->keeper = block;
418 
419  /* Mark unallocated space NOACCESS; leave the block header alone. */
421  blksize - ALLOC_BLOCKHDRSZ);
422  }
423 
424  return (MemoryContext) set;
425 }
426 
427 /*
428  * AllocSetInit
429  * Context-type-specific initialization routine.
430  *
431  * This is called by MemoryContextCreate() after setting up the
432  * generic MemoryContext fields and before linking the new context
433  * into the context tree. We must do whatever is needed to make the
434  * new context minimally valid for deletion. We must *not* risk
435  * failure --- thus, for example, allocating more memory is not cool.
436  * (AllocSetContextCreate can allocate memory when it gets control
437  * back, however.)
438  */
439 static void
441 {
442  /*
443  * Since MemoryContextCreate already zeroed the context node, we don't
444  * have to do anything here: it's already OK.
445  */
446 }
447 
448 /*
449  * AllocSetReset
450  * Frees all memory which is allocated in the given set.
451  *
452  * Actually, this routine has some discretion about what to do.
453  * It should mark all allocated chunks freed, but it need not necessarily
454  * give back all the resources the set owns. Our actual implementation is
455  * that we hang onto any "keeper" block specified for the set. In this way,
456  * we don't thrash malloc() when a context is repeatedly reset after small
457  * allocations, which is typical behavior for per-tuple contexts.
458  */
459 static void
461 {
462  AllocSet set = (AllocSet) context;
463  AllocBlock block;
464 
466 
467 #ifdef MEMORY_CONTEXT_CHECKING
468  /* Check for corruption and leaks before freeing */
469  AllocSetCheck(context);
470 #endif
471 
472  /* Clear chunk freelists */
473  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
474 
475  block = set->blocks;
476 
477  /* New blocks list is either empty or just the keeper block */
478  set->blocks = set->keeper;
479 
480  while (block != NULL)
481  {
482  AllocBlock next = block->next;
483 
484  if (block == set->keeper)
485  {
486  /* Reset the block, but don't return it to malloc */
487  char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
488 
489 #ifdef CLOBBER_FREED_MEMORY
490  wipe_mem(datastart, block->freeptr - datastart);
491 #else
492  /* wipe_mem() would have done this */
493  VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
494 #endif
495  block->freeptr = datastart;
496  block->prev = NULL;
497  block->next = NULL;
498  }
499  else
500  {
501  /* Normal case, release the block */
502 #ifdef CLOBBER_FREED_MEMORY
503  wipe_mem(block, block->freeptr - ((char *) block));
504 #endif
505  free(block);
506  }
507  block = next;
508  }
509 
510  /* Reset block size allocation sequence, too */
511  set->nextBlockSize = set->initBlockSize;
512 }
513 
514 /*
515  * AllocSetDelete
516  * Frees all memory which is allocated in the given set,
517  * in preparation for deletion of the set.
518  *
519  * Unlike AllocSetReset, this *must* free all resources of the set.
520  * But note we are not responsible for deleting the context node itself.
521  */
522 static void
524 {
525  AllocSet set = (AllocSet) context;
526  AllocBlock block = set->blocks;
527 
529 
530 #ifdef MEMORY_CONTEXT_CHECKING
531  /* Check for corruption and leaks before freeing */
532  AllocSetCheck(context);
533 #endif
534 
535  /* Make it look empty, just in case... */
536  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
537  set->blocks = NULL;
538  set->keeper = NULL;
539 
540  while (block != NULL)
541  {
542  AllocBlock next = block->next;
543 
544 #ifdef CLOBBER_FREED_MEMORY
545  wipe_mem(block, block->freeptr - ((char *) block));
546 #endif
547  free(block);
548  block = next;
549  }
550 }
551 
552 /*
553  * AllocSetAlloc
554  * Returns pointer to allocated memory of given size or NULL if
555  * request could not be completed; memory is added to the set.
556  *
557  * No request may exceed:
558  * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
559  * All callers use a much-lower limit.
560  */
561 static void *
563 {
564  AllocSet set = (AllocSet) context;
565  AllocBlock block;
566  AllocChunk chunk;
567  int fidx;
568  Size chunk_size;
569  Size blksize;
570 
572 
573  /*
574  * If requested size exceeds maximum for chunks, allocate an entire block
575  * for this request.
576  */
577  if (size > set->allocChunkLimit)
578  {
579  chunk_size = MAXALIGN(size);
580  blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
581  block = (AllocBlock) malloc(blksize);
582  if (block == NULL)
583  return NULL;
584  block->aset = set;
585  block->freeptr = block->endptr = ((char *) block) + blksize;
586 
587  chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
588  chunk->aset = set;
589  chunk->size = chunk_size;
590 #ifdef MEMORY_CONTEXT_CHECKING
591  /* Valgrind: Will be made NOACCESS below. */
592  chunk->requested_size = size;
593  /* set mark to catch clobber of "unused" space */
594  if (size < chunk_size)
595  set_sentinel(AllocChunkGetPointer(chunk), size);
596 #endif
597 #ifdef RANDOMIZE_ALLOCATED_MEMORY
598  /* fill the allocated space with junk */
599  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
600 #endif
601 
602  /*
603  * Stick the new block underneath the active allocation block, if any,
604  * so that we don't lose the use of the space remaining therein.
605  */
606  if (set->blocks != NULL)
607  {
608  block->prev = set->blocks;
609  block->next = set->blocks->next;
610  if (block->next)
611  block->next->prev = block;
612  set->blocks->next = block;
613  }
614  else
615  {
616  block->prev = NULL;
617  block->next = NULL;
618  set->blocks = block;
619  }
620 
621  AllocAllocInfo(set, chunk);
622 
623  /*
624  * Chunk's metadata fields remain DEFINED. The requested allocation
625  * itself can be NOACCESS or UNDEFINED; our caller will soon make it
626  * UNDEFINED. Make extra space at the end of the chunk, if any,
627  * NOACCESS.
628  */
629  VALGRIND_MAKE_MEM_NOACCESS((char *) chunk + ALLOC_CHUNKHDRSZ,
630  chunk_size - ALLOC_CHUNKHDRSZ);
631 
632  return AllocChunkGetPointer(chunk);
633  }
634 
635  /*
636  * Request is small enough to be treated as a chunk. Look in the
637  * corresponding free list to see if there is a free chunk we could reuse.
638  * If one is found, remove it from the free list, make it again a member
639  * of the alloc set and return its data address.
640  */
641  fidx = AllocSetFreeIndex(size);
642  chunk = set->freelist[fidx];
643  if (chunk != NULL)
644  {
645  Assert(chunk->size >= size);
646 
647  set->freelist[fidx] = (AllocChunk) chunk->aset;
648 
649  chunk->aset = (void *) set;
650 
651 #ifdef MEMORY_CONTEXT_CHECKING
652  /* Valgrind: Free list requested_size should be DEFINED. */
653  chunk->requested_size = size;
654  VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
655  sizeof(chunk->requested_size));
656  /* set mark to catch clobber of "unused" space */
657  if (size < chunk->size)
658  set_sentinel(AllocChunkGetPointer(chunk), size);
659 #endif
660 #ifdef RANDOMIZE_ALLOCATED_MEMORY
661  /* fill the allocated space with junk */
662  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
663 #endif
664 
665  AllocAllocInfo(set, chunk);
666  return AllocChunkGetPointer(chunk);
667  }
668 
669  /*
670  * Choose the actual chunk size to allocate.
671  */
672  chunk_size = (1 << ALLOC_MINBITS) << fidx;
673  Assert(chunk_size >= size);
674 
675  /*
676  * If there is enough room in the active allocation block, we will put the
677  * chunk into that block. Else must start a new one.
678  */
679  if ((block = set->blocks) != NULL)
680  {
681  Size availspace = block->endptr - block->freeptr;
682 
683  if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
684  {
685  /*
686  * The existing active (top) block does not have enough room for
687  * the requested allocation, but it might still have a useful
688  * amount of space in it. Once we push it down in the block list,
689  * we'll never try to allocate more space from it. So, before we
690  * do that, carve up its free space into chunks that we can put on
691  * the set's freelists.
692  *
693  * Because we can only get here when there's less than
694  * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
695  * more than ALLOCSET_NUM_FREELISTS-1 times.
696  */
697  while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
698  {
699  Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
700  int a_fidx = AllocSetFreeIndex(availchunk);
701 
702  /*
703  * In most cases, we'll get back the index of the next larger
704  * freelist than the one we need to put this chunk on. The
705  * exception is when availchunk is exactly a power of 2.
706  */
707  if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
708  {
709  a_fidx--;
710  Assert(a_fidx >= 0);
711  availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
712  }
713 
714  chunk = (AllocChunk) (block->freeptr);
715 
716  /* Prepare to initialize the chunk header. */
717  VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
718 
719  block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
720  availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
721 
722  chunk->size = availchunk;
723 #ifdef MEMORY_CONTEXT_CHECKING
724  chunk->requested_size = 0; /* mark it free */
725 #endif
726  chunk->aset = (void *) set->freelist[a_fidx];
727  set->freelist[a_fidx] = chunk;
728  }
729 
730  /* Mark that we need to create a new block */
731  block = NULL;
732  }
733  }
734 
735  /*
736  * Time to create a new regular (multi-chunk) block?
737  */
738  if (block == NULL)
739  {
740  Size required_size;
741 
742  /*
743  * The first such block has size initBlockSize, and we double the
744  * space in each succeeding block, but not more than maxBlockSize.
745  */
746  blksize = set->nextBlockSize;
747  set->nextBlockSize <<= 1;
748  if (set->nextBlockSize > set->maxBlockSize)
749  set->nextBlockSize = set->maxBlockSize;
750 
751  /*
752  * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
753  * space... but try to keep it a power of 2.
754  */
755  required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
756  while (blksize < required_size)
757  blksize <<= 1;
758 
759  /* Try to allocate it */
760  block = (AllocBlock) malloc(blksize);
761 
762  /*
763  * We could be asking for pretty big blocks here, so cope if malloc
764  * fails. But give up if there's less than a meg or so available...
765  */
766  while (block == NULL && blksize > 1024 * 1024)
767  {
768  blksize >>= 1;
769  if (blksize < required_size)
770  break;
771  block = (AllocBlock) malloc(blksize);
772  }
773 
774  if (block == NULL)
775  return NULL;
776 
777  block->aset = set;
778  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
779  block->endptr = ((char *) block) + blksize;
780 
781  /*
782  * If this is the first block of the set, make it the "keeper" block.
783  * Formerly, a keeper block could only be created during context
784  * creation, but allowing it to happen here lets us have fast reset
785  * cycling even for contexts created with minContextSize = 0; that way
786  * we don't have to force space to be allocated in contexts that might
787  * never need any space. Don't mark an oversize block as a keeper,
788  * however.
789  */
790  if (set->keeper == NULL && blksize == set->initBlockSize)
791  set->keeper = block;
792 
793  /* Mark unallocated space NOACCESS. */
795  blksize - ALLOC_BLOCKHDRSZ);
796 
797  block->prev = NULL;
798  block->next = set->blocks;
799  if (block->next)
800  block->next->prev = block;
801  set->blocks = block;
802  }
803 
804  /*
805  * OK, do the allocation
806  */
807  chunk = (AllocChunk) (block->freeptr);
808 
809  /* Prepare to initialize the chunk header. */
811 
812  block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
813  Assert(block->freeptr <= block->endptr);
814 
815  chunk->aset = (void *) set;
816  chunk->size = chunk_size;
817 #ifdef MEMORY_CONTEXT_CHECKING
818  chunk->requested_size = size;
819  VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
820  sizeof(chunk->requested_size));
821  /* set mark to catch clobber of "unused" space */
822  if (size < chunk->size)
823  set_sentinel(AllocChunkGetPointer(chunk), size);
824 #endif
825 #ifdef RANDOMIZE_ALLOCATED_MEMORY
826  /* fill the allocated space with junk */
827  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
828 #endif
829 
830  AllocAllocInfo(set, chunk);
831  return AllocChunkGetPointer(chunk);
832 }
833 
834 /*
835  * AllocSetFree
836  * Frees allocated memory; memory is removed from the set.
837  */
838 static void
839 AllocSetFree(MemoryContext context, void *pointer)
840 {
841  AllocSet set = (AllocSet) context;
842  AllocChunk chunk = AllocPointerGetChunk(pointer);
843 
844  AllocFreeInfo(set, chunk);
845 
846 #ifdef MEMORY_CONTEXT_CHECKING
847  VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
848  sizeof(chunk->requested_size));
849  /* Test for someone scribbling on unused space in chunk */
850  if (chunk->requested_size < chunk->size)
851  if (!sentinel_ok(pointer, chunk->requested_size))
852  elog(WARNING, "detected write past chunk end in %s %p",
853  set->header.name, chunk);
854 #endif
855 
856  if (chunk->size > set->allocChunkLimit)
857  {
858  /*
859  * Big chunks are certain to have been allocated as single-chunk
860  * blocks. Just unlink that block and return it to malloc().
861  */
862  AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
863 
864  /*
865  * Try to verify that we have a sane block pointer: it should
866  * reference the correct aset, and freeptr and endptr should point
867  * just past the chunk.
868  */
869  if (block->aset != set ||
870  block->freeptr != block->endptr ||
871  block->freeptr != ((char *) block) +
873  elog(ERROR, "could not find block containing chunk %p", chunk);
874 
875  /* OK, remove block from aset's list and free it */
876  if (block->prev)
877  block->prev->next = block->next;
878  else
879  set->blocks = block->next;
880  if (block->next)
881  block->next->prev = block->prev;
882 #ifdef CLOBBER_FREED_MEMORY
883  wipe_mem(block, block->freeptr - ((char *) block));
884 #endif
885  free(block);
886  }
887  else
888  {
889  /* Normal case, put the chunk into appropriate freelist */
890  int fidx = AllocSetFreeIndex(chunk->size);
891 
892  chunk->aset = (void *) set->freelist[fidx];
893 
894 #ifdef CLOBBER_FREED_MEMORY
895  wipe_mem(pointer, chunk->size);
896 #endif
897 
898 #ifdef MEMORY_CONTEXT_CHECKING
899  /* Reset requested_size to 0 in chunks that are on freelist */
900  chunk->requested_size = 0;
901 #endif
902  set->freelist[fidx] = chunk;
903  }
904 }
905 
906 /*
907  * AllocSetRealloc
908  * Returns new pointer to allocated memory of given size or NULL if
909  * request could not be completed; this memory is added to the set.
910  * Memory associated with given pointer is copied into the new memory,
911  * and the old memory is freed.
912  *
913  * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
914  * makes our Valgrind client requests less-precise, hazarding false negatives.
915  * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
916  * request size.)
917  */
918 static void *
919 AllocSetRealloc(MemoryContext context, void *pointer, Size size)
920 {
921  AllocSet set = (AllocSet) context;
922  AllocChunk chunk = AllocPointerGetChunk(pointer);
923  Size oldsize = chunk->size;
924 
925 #ifdef MEMORY_CONTEXT_CHECKING
926  VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
927  sizeof(chunk->requested_size));
928  /* Test for someone scribbling on unused space in chunk */
929  if (chunk->requested_size < oldsize)
930  if (!sentinel_ok(pointer, chunk->requested_size))
931  elog(WARNING, "detected write past chunk end in %s %p",
932  set->header.name, chunk);
933 #endif
934 
935  /*
936  * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
937  * allocated area already is >= the new size. (In particular, we always
938  * fall out here if the requested size is a decrease.)
939  */
940  if (oldsize >= size)
941  {
942 #ifdef MEMORY_CONTEXT_CHECKING
943  Size oldrequest = chunk->requested_size;
944 
945 #ifdef RANDOMIZE_ALLOCATED_MEMORY
946  /* We can only fill the extra space if we know the prior request */
947  if (size > oldrequest)
948  randomize_mem((char *) pointer + oldrequest,
949  size - oldrequest);
950 #endif
951 
952  chunk->requested_size = size;
953  VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
954  sizeof(chunk->requested_size));
955 
956  /*
957  * If this is an increase, mark any newly-available part UNDEFINED.
958  * Otherwise, mark the obsolete part NOACCESS.
959  */
960  if (size > oldrequest)
961  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
962  size - oldrequest);
963  else
964  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
965  oldsize - size);
966 
967  /* set mark to catch clobber of "unused" space */
968  if (size < oldsize)
969  set_sentinel(pointer, size);
970 #else /* !MEMORY_CONTEXT_CHECKING */
971 
972  /*
973  * We don't have the information to determine whether we're growing
974  * the old request or shrinking it, so we conservatively mark the
975  * entire new allocation DEFINED.
976  */
977  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
978  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
979 #endif
980 
981  return pointer;
982  }
983 
984  if (oldsize > set->allocChunkLimit)
985  {
986  /*
987  * The chunk must have been allocated as a single-chunk block. Use
988  * realloc() to make the containing block bigger with minimum space
989  * wastage.
990  */
991  AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
992  Size chksize;
993  Size blksize;
994 
995  /*
996  * Try to verify that we have a sane block pointer: it should
997  * reference the correct aset, and freeptr and endptr should point
998  * just past the chunk.
999  */
1000  if (block->aset != set ||
1001  block->freeptr != block->endptr ||
1002  block->freeptr != ((char *) block) +
1003  (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1004  elog(ERROR, "could not find block containing chunk %p", chunk);
1005 
1006  /* Do the realloc */
1007  chksize = MAXALIGN(size);
1008  blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1009  block = (AllocBlock) realloc(block, blksize);
1010  if (block == NULL)
1011  return NULL;
1012  block->freeptr = block->endptr = ((char *) block) + blksize;
1013 
1014  /* Update pointers since block has likely been moved */
1015  chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
1016  pointer = AllocChunkGetPointer(chunk);
1017  if (block->prev)
1018  block->prev->next = block;
1019  else
1020  set->blocks = block;
1021  if (block->next)
1022  block->next->prev = block;
1023  chunk->size = chksize;
1024 
1025 #ifdef MEMORY_CONTEXT_CHECKING
1026 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1027  /* We can only fill the extra space if we know the prior request */
1028  randomize_mem((char *) pointer + chunk->requested_size,
1029  size - chunk->requested_size);
1030 #endif
1031 
1032  /*
1033  * realloc() (or randomize_mem()) will have left the newly-allocated
1034  * part UNDEFINED, but we may need to adjust trailing bytes from the
1035  * old allocation.
1036  */
1037  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1038  oldsize - chunk->requested_size);
1039 
1040  chunk->requested_size = size;
1041  VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
1042  sizeof(chunk->requested_size));
1043 
1044  /* set mark to catch clobber of "unused" space */
1045  if (size < chunk->size)
1046  set_sentinel(pointer, size);
1047 #else /* !MEMORY_CONTEXT_CHECKING */
1048 
1049  /*
1050  * We don't know how much of the old chunk size was the actual
1051  * allocation; it could have been as small as one byte. We have to be
1052  * conservative and just mark the entire old portion DEFINED.
1053  */
1054  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1055 #endif
1056 
1057  /* Make any trailing alignment padding NOACCESS. */
1058  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1059 
1060  return pointer;
1061  }
1062  else
1063  {
1064  /*
1065  * Small-chunk case. We just do this by brute force, ie, allocate a
1066  * new chunk and copy the data. Since we know the existing data isn't
1067  * huge, this won't involve any great memcpy expense, so it's not
1068  * worth being smarter. (At one time we tried to avoid memcpy when it
1069  * was possible to enlarge the chunk in-place, but that turns out to
1070  * misbehave unpleasantly for repeated cycles of
1071  * palloc/repalloc/pfree: the eventually freed chunks go into the
1072  * wrong freelist for the next initial palloc request, and so we leak
1073  * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1074  */
1075  AllocPointer newPointer;
1076 
1077  /* allocate new chunk */
1078  newPointer = AllocSetAlloc((MemoryContext) set, size);
1079 
1080  /* leave immediately if request was not completed */
1081  if (newPointer == NULL)
1082  return NULL;
1083 
1084  /*
1085  * AllocSetAlloc() just made the region NOACCESS. Change it to
1086  * UNDEFINED for the moment; memcpy() will then transfer definedness
1087  * from the old allocation to the new. If we know the old allocation,
1088  * copy just that much. Otherwise, make the entire old chunk defined
1089  * to avoid errors as we copy the currently-NOACCESS trailing bytes.
1090  */
1091  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1092 #ifdef MEMORY_CONTEXT_CHECKING
1093  oldsize = chunk->requested_size;
1094 #else
1095  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1096 #endif
1097 
1098  /* transfer existing data (certain to fit) */
1099  memcpy(newPointer, pointer, oldsize);
1100 
1101  /* free old chunk */
1102  AllocSetFree((MemoryContext) set, pointer);
1103 
1104  return newPointer;
1105  }
1106 }
1107 
1108 /*
1109  * AllocSetGetChunkSpace
1110  * Given a currently-allocated chunk, determine the total space
1111  * it occupies (including all memory-allocation overhead).
1112  */
1113 static Size
1114 AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1115 {
1116  AllocChunk chunk = AllocPointerGetChunk(pointer);
1117 
1118  return chunk->size + ALLOC_CHUNKHDRSZ;
1119 }
1120 
1121 /*
1122  * AllocSetIsEmpty
1123  * Is an allocset empty of any allocated space?
1124  */
1125 static bool
1127 {
1128  /*
1129  * For now, we say "empty" only if the context is new or just reset. We
1130  * could examine the freelists to determine if all space has been freed,
1131  * but it's not really worth the trouble for present uses of this
1132  * functionality.
1133  */
1134  if (context->isReset)
1135  return true;
1136  return false;
1137 }
1138 
1139 /*
1140  * AllocSetStats
1141  * Compute stats about memory consumption of an allocset.
1142  *
1143  * level: recursion level (0 at top level); used for print indentation.
1144  * print: true to print stats to stderr.
1145  * totals: if not NULL, add stats about this allocset into *totals.
1146  */
1147 static void
1148 AllocSetStats(MemoryContext context, int level, bool print,
1149  MemoryContextCounters *totals)
1150 {
1151  AllocSet set = (AllocSet) context;
1152  Size nblocks = 0;
1153  Size freechunks = 0;
1154  Size totalspace = 0;
1155  Size freespace = 0;
1156  AllocBlock block;
1157  int fidx;
1158 
1159  for (block = set->blocks; block != NULL; block = block->next)
1160  {
1161  nblocks++;
1162  totalspace += block->endptr - ((char *) block);
1163  freespace += block->endptr - block->freeptr;
1164  }
1165  for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1166  {
1167  AllocChunk chunk;
1168 
1169  for (chunk = set->freelist[fidx]; chunk != NULL;
1170  chunk = (AllocChunk) chunk->aset)
1171  {
1172  freechunks++;
1173  freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1174  }
1175  }
1176 
1177  if (print)
1178  {
1179  int i;
1180 
1181  for (i = 0; i < level; i++)
1182  fprintf(stderr, " ");
1183  fprintf(stderr,
1184  "%s: %zu total in %zd blocks; %zu free (%zd chunks); %zu used\n",
1185  set->header.name, totalspace, nblocks, freespace, freechunks,
1186  totalspace - freespace);
1187  }
1188 
1189  if (totals)
1190  {
1191  totals->nblocks += nblocks;
1192  totals->freechunks += freechunks;
1193  totals->totalspace += totalspace;
1194  totals->freespace += freespace;
1195  }
1196 }
1197 
1198 
1199 #ifdef MEMORY_CONTEXT_CHECKING
1200 
1201 /*
1202  * AllocSetCheck
1203  * Walk through chunks and check consistency of memory.
1204  *
1205  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1206  * find yourself in an infinite loop when trouble occurs, because this
1207  * routine will be entered again when elog cleanup tries to release memory!
1208  */
1209 static void
1210 AllocSetCheck(MemoryContext context)
1211 {
1212  AllocSet set = (AllocSet) context;
1213  char *name = set->header.name;
1214  AllocBlock prevblock;
1215  AllocBlock block;
1216 
1217  for (prevblock = NULL, block = set->blocks;
1218  block != NULL;
1219  prevblock = block, block = block->next)
1220  {
1221  char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1222  long blk_used = block->freeptr - bpoz;
1223  long blk_data = 0;
1224  long nchunks = 0;
1225 
1226  /*
1227  * Empty block - empty can be keeper-block only
1228  */
1229  if (!blk_used)
1230  {
1231  if (set->keeper != block)
1232  elog(WARNING, "problem in alloc set %s: empty block %p",
1233  name, block);
1234  }
1235 
1236  /*
1237  * Check block header fields
1238  */
1239  if (block->aset != set ||
1240  block->prev != prevblock ||
1241  block->freeptr < bpoz ||
1242  block->freeptr > block->endptr)
1243  elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1244  name, block);
1245 
1246  /*
1247  * Chunk walker
1248  */
1249  while (bpoz < block->freeptr)
1250  {
1251  AllocChunk chunk = (AllocChunk) bpoz;
1252  Size chsize,
1253  dsize;
1254 
1255  chsize = chunk->size; /* aligned chunk size */
1256  VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
1257  sizeof(chunk->requested_size));
1258  dsize = chunk->requested_size; /* real data */
1259  if (dsize > 0) /* not on a free list */
1260  VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
1261  sizeof(chunk->requested_size));
1262 
1263  /*
1264  * Check chunk size
1265  */
1266  if (dsize > chsize)
1267  elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1268  name, chunk, block);
1269  if (chsize < (1 << ALLOC_MINBITS))
1270  elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1271  name, chsize, chunk, block);
1272 
1273  /* single-chunk block? */
1274  if (chsize > set->allocChunkLimit &&
1275  chsize + ALLOC_CHUNKHDRSZ != blk_used)
1276  elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1277  name, chunk, block);
1278 
1279  /*
1280  * If chunk is allocated, check for correct aset pointer. (If it's
1281  * free, the aset is the freelist pointer, which we can't check as
1282  * easily...)
1283  */
1284  if (dsize > 0 && chunk->aset != (void *) set)
1285  elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1286  name, block, chunk);
1287 
1288  /*
1289  * Check for overwrite of "unallocated" space in chunk
1290  */
1291  if (dsize > 0 && dsize < chsize &&
1292  !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1293  elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1294  name, block, chunk);
1295 
1296  blk_data += chsize;
1297  nchunks++;
1298 
1299  bpoz += ALLOC_CHUNKHDRSZ + chsize;
1300  }
1301 
1302  if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1303  elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1304  name, block);
1305  }
1306 }
1307 
1308 #endif /* MEMORY_CONTEXT_CHECKING */
#define MemSetAligned(start, val, len)
Definition: c.h:890
Size initBlockSize
Definition: aset.c:127
static void AllocSetInit(MemoryContext context)
Definition: aset.c:440
#define AllocFreeInfo(_cxt, _chunk)
Definition: aset.c:258
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
AllocBlock blocks
Definition: aset.c:124
static int32 next
Definition: blutils.c:210
void print(const void *obj)
Definition: print.c:35
struct AllocBlockData AllocBlockData
static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer)
Definition: aset.c:1114
MemoryContextData header
Definition: aset.c:122
void * AllocPointer
Definition: aset.c:109
#define AllocSetIsValid(set)
Definition: aset.c:191
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
static int AllocSetFreeIndex(Size size)
Definition: aset.c:271
static void AllocSetReset(MemoryContext context)
Definition: aset.c:460
#define AllocChunkGetPointer(chk)
Definition: aset.c:195
#define LT16(n)
Definition: aset.c:237
int errcode(int sqlerrcode)
Definition: elog.c:575
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:264
#define ALLOC_BLOCKHDRSZ
Definition: aset.c:99
AllocBlock keeper
Definition: aset.c:131
AllocSet aset
Definition: aset.c:150
char * freeptr
Definition: aset.c:153
#define malloc(a)
Definition: header.h:50
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:757
#define ERROR
Definition: elog.h:43
#define ALLOC_CHUNK_LIMIT
Definition: aset.c:83
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:438
char * endptr
Definition: aset.c:154
#define ALLOCSET_NUM_FREELISTS
Definition: aset.c:82
static void * AllocSetRealloc(MemoryContext context, void *pointer, Size size)
Definition: aset.c:919
#define ALLOCSET_SEPARATE_THRESHOLD
Definition: memutils.h:192
int errdetail(const char *fmt,...)
Definition: elog.c:873
MemoryContext MemoryContextCreate(NodeTag tag, Size size, MemoryContextMethods *methods, MemoryContext parent, const char *name)
Definition: mcxt.c:640
#define ALLOC_CHUNKHDRSZ
Definition: aset.c:100
static MemoryContextMethods AllocSetMethods
Definition: aset.c:219
AllocBlock next
Definition: aset.c:152
#define ereport(elevel, rest)
Definition: elog.h:122
#define AssertArg(condition)
Definition: c.h:677
MemoryContext TopMemoryContext
Definition: mcxt.c:43
static bool AllocSetIsEmpty(MemoryContext context)
Definition: aset.c:1126
#define WARNING
Definition: elog.h:40
static const unsigned char LogTable256[256]
Definition: aset.c:239
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:322
#define ALLOC_CHUNK_FRACTION
Definition: aset.c:85
struct AllocBlockData * AllocBlock
Definition: aset.c:102
AllocChunk freelist[ALLOCSET_NUM_FREELISTS]
Definition: aset.c:125
Size nextBlockSize
Definition: aset.c:129
AllocBlock prev
Definition: aset.c:151
#define free(a)
Definition: header.h:65
Size allocChunkLimit
Definition: aset.c:130
struct AllocChunkData * AllocChunk
Definition: aset.c:103
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:675
static void AllocSetFree(MemoryContext context, void *pointer)
Definition: aset.c:839
size_t Size
Definition: c.h:356
#define MAXALIGN(LEN)
Definition: c.h:588
static void AllocSetStats(MemoryContext context, int level, bool print, MemoryContextCounters *totals)
Definition: aset.c:1148
struct AllocChunkData AllocChunkData
static void AllocSetDelete(MemoryContext context)
Definition: aset.c:523
#define realloc(a, b)
Definition: header.h:60
const char * name
Definition: encode.c:521
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:46
void * aset
Definition: aset.c:176
int errmsg(const char *fmt,...)
Definition: elog.c:797
int i
static void * AllocSetAlloc(MemoryContext context, Size size)
Definition: aset.c:562
AllocSetContext * AllocSet
Definition: aset.c:134
struct AllocSetContext AllocSetContext
#define ALLOC_MINBITS
Definition: aset.c:81
#define elog
Definition: elog.h:219
Size maxBlockSize
Definition: aset.c:128
#define AllocAllocInfo(_cxt, _chunk)
Definition: aset.c:259
#define offsetof(type, field)
Definition: c.h:555
Size size
Definition: aset.c:164
#define AllocPointerGetChunk(ptr)
Definition: aset.c:193