PostgreSQL Source Code  git master
dsa.c File Reference
#include "postgres.h"
#include "port/atomics.h"
#include "port/pg_bitutils.h"
#include "storage/dsm.h"
#include "storage/ipc.h"
#include "storage/lwlock.h"
#include "storage/shmem.h"
#include "utils/dsa.h"
#include "utils/freepage.h"
#include "utils/memutils.h"
Include dependency graph for dsa.c:

Go to the source code of this file.

Data Structures

struct  dsa_segment_header
 
struct  dsa_area_span
 
struct  dsa_area_pool
 
struct  dsa_area_control
 
struct  dsa_segment_map
 
struct  dsa_area
 

Macros

#define DSA_INITIAL_SEGMENT_SIZE   ((size_t) (1 * 1024 * 1024))
 
#define DSA_NUM_SEGMENTS_AT_EACH_SIZE   2
 
#define DSA_OFFSET_WIDTH   40 /* 1024 segments of size up to 1TB */
 
#define DSA_MAX_SEGMENTS    Min(1024, (1 << ((SIZEOF_DSA_POINTER * 8) - DSA_OFFSET_WIDTH)))
 
#define DSA_OFFSET_BITMASK   (((dsa_pointer) 1 << DSA_OFFSET_WIDTH) - 1)
 
#define DSA_MAX_SEGMENT_SIZE   ((size_t) 1 << DSA_OFFSET_WIDTH)
 
#define DSA_PAGES_PER_SUPERBLOCK   16
 
#define DSA_SEGMENT_HEADER_MAGIC   0x0ce26608
 
#define DSA_MAKE_POINTER(segment_number, offset)    (((dsa_pointer) (segment_number) << DSA_OFFSET_WIDTH) | (offset))
 
#define DSA_EXTRACT_SEGMENT_NUMBER(dp)   ((dp) >> DSA_OFFSET_WIDTH)
 
#define DSA_EXTRACT_OFFSET(dp)   ((dp) & DSA_OFFSET_BITMASK)
 
#define DSA_SEGMENT_INDEX_NONE   (~(dsa_segment_index)0)
 
#define DSA_NUM_SEGMENT_BINS   16
 
#define DSA_AREA_LOCK(area)   (&area->control->lock)
 
#define DSA_SCLASS_LOCK(area, sclass)   (&area->control->pools[sclass].lock)
 
#define NextFreeObjectIndex(object)   (* (uint16 *) (object))
 
#define DSA_NUM_SIZE_CLASSES   lengthof(dsa_size_classes)
 
#define DSA_SCLASS_BLOCK_OF_SPANS   0
 
#define DSA_SCLASS_SPAN_LARGE   1
 
#define DSA_SIZE_CLASS_MAP_QUANTUM   8
 
#define DSA_FULLNESS_CLASSES   4
 
#define DsaAreaPoolToDsaPointer(area, p)    DSA_MAKE_POINTER(0, (char *) p - (char *) area->control)
 
#define DSA_SPAN_NOTHING_FREE   ((uint16) -1)
 
#define DSA_SUPERBLOCK_SIZE   (DSA_PAGES_PER_SUPERBLOCK * FPM_PAGE_SIZE)
 
#define get_segment_index(area, segment_map_ptr)    (segment_map_ptr - &area->segment_maps[0])
 

Typedefs

typedef size_t dsa_segment_index
 

Functions

static size_t contiguous_pages_to_segment_bin (size_t n)
 
static void init_span (dsa_area *area, dsa_pointer span_pointer, dsa_area_pool *pool, dsa_pointer start, size_t npages, uint16 size_class)
 
static bool transfer_first_span (dsa_area *area, dsa_area_pool *pool, int fromclass, int toclass)
 
static dsa_pointer alloc_object (dsa_area *area, int size_class)
 
static bool ensure_active_superblock (dsa_area *area, dsa_area_pool *pool, int size_class)
 
static dsa_segment_mapget_segment_by_index (dsa_area *area, dsa_segment_index index)
 
static void destroy_superblock (dsa_area *area, dsa_pointer span_pointer)
 
static void unlink_span (dsa_area *area, dsa_area_span *span)
 
static void add_span_to_fullness_class (dsa_area *area, dsa_area_span *span, dsa_pointer span_pointer, int fclass)
 
static void unlink_segment (dsa_area *area, dsa_segment_map *segment_map)
 
static dsa_segment_mapget_best_segment (dsa_area *area, size_t npages)
 
static dsa_segment_mapmake_new_segment (dsa_area *area, size_t requested_pages)
 
static dsa_areacreate_internal (void *place, size_t size, int tranche_id, dsm_handle control_handle, dsm_segment *control_segment)
 
static dsa_areaattach_internal (void *place, dsm_segment *segment, dsa_handle handle)
 
static void check_for_freed_segments (dsa_area *area)
 
static void check_for_freed_segments_locked (dsa_area *area)
 
dsa_areadsa_create (int tranche_id)
 
dsa_areadsa_create_in_place (void *place, size_t size, int tranche_id, dsm_segment *segment)
 
dsa_handle dsa_get_handle (dsa_area *area)
 
dsa_areadsa_attach (dsa_handle handle)
 
dsa_areadsa_attach_in_place (void *place, dsm_segment *segment)
 
void dsa_on_dsm_detach_release_in_place (dsm_segment *segment, Datum place)
 
void dsa_on_shmem_exit_release_in_place (int code, Datum place)
 
void dsa_release_in_place (void *place)
 
void dsa_pin_mapping (dsa_area *area)
 
dsa_pointer dsa_allocate_extended (dsa_area *area, size_t size, int flags)
 
void dsa_free (dsa_area *area, dsa_pointer dp)
 
void * dsa_get_address (dsa_area *area, dsa_pointer dp)
 
void dsa_pin (dsa_area *area)
 
void dsa_unpin (dsa_area *area)
 
void dsa_set_size_limit (dsa_area *area, size_t limit)
 
void dsa_trim (dsa_area *area)
 
void dsa_dump (dsa_area *area)
 
size_t dsa_minimum_size (void)
 
void dsa_detach (dsa_area *area)
 

Variables

static const uint16 dsa_size_classes []
 
static const uint8 dsa_size_class_map []
 

Macro Definition Documentation

◆ DSA_AREA_LOCK

#define DSA_AREA_LOCK (   area)    (&area->control->lock)

Definition at line 155 of file dsa.c.

◆ DSA_EXTRACT_OFFSET

#define DSA_EXTRACT_OFFSET (   dp)    ((dp) & DSA_OFFSET_BITMASK)

Definition at line 122 of file dsa.c.

◆ DSA_EXTRACT_SEGMENT_NUMBER

#define DSA_EXTRACT_SEGMENT_NUMBER (   dp)    ((dp) >> DSA_OFFSET_WIDTH)

Definition at line 119 of file dsa.c.

◆ DSA_FULLNESS_CLASSES

#define DSA_FULLNESS_CLASSES   4

Definition at line 289 of file dsa.c.

◆ DSA_INITIAL_SEGMENT_SIZE

#define DSA_INITIAL_SEGMENT_SIZE   ((size_t) (1 * 1024 * 1024))

Definition at line 69 of file dsa.c.

◆ DSA_MAKE_POINTER

#define DSA_MAKE_POINTER (   segment_number,
  offset 
)     (((dsa_pointer) (segment_number) << DSA_OFFSET_WIDTH) | (offset))

Definition at line 115 of file dsa.c.

◆ DSA_MAX_SEGMENT_SIZE

#define DSA_MAX_SEGMENT_SIZE   ((size_t) 1 << DSA_OFFSET_WIDTH)

Definition at line 102 of file dsa.c.

◆ DSA_MAX_SEGMENTS

#define DSA_MAX_SEGMENTS    Min(1024, (1 << ((SIZEOF_DSA_POINTER * 8) - DSA_OFFSET_WIDTH)))

Definition at line 95 of file dsa.c.

◆ DSA_NUM_SEGMENT_BINS

#define DSA_NUM_SEGMENT_BINS   16

Definition at line 134 of file dsa.c.

◆ DSA_NUM_SEGMENTS_AT_EACH_SIZE

#define DSA_NUM_SEGMENTS_AT_EACH_SIZE   2

Definition at line 78 of file dsa.c.

◆ DSA_NUM_SIZE_CLASSES

#define DSA_NUM_SIZE_CLASSES   lengthof(dsa_size_classes)

Definition at line 259 of file dsa.c.

◆ DSA_OFFSET_BITMASK

#define DSA_OFFSET_BITMASK   (((dsa_pointer) 1 << DSA_OFFSET_WIDTH) - 1)

Definition at line 99 of file dsa.c.

◆ DSA_OFFSET_WIDTH

#define DSA_OFFSET_WIDTH   40 /* 1024 segments of size up to 1TB */

Definition at line 88 of file dsa.c.

◆ DSA_PAGES_PER_SUPERBLOCK

#define DSA_PAGES_PER_SUPERBLOCK   16

Definition at line 105 of file dsa.c.

◆ DSA_SCLASS_BLOCK_OF_SPANS

#define DSA_SCLASS_BLOCK_OF_SPANS   0

Definition at line 262 of file dsa.c.

◆ DSA_SCLASS_LOCK

#define DSA_SCLASS_LOCK (   area,
  sclass 
)    (&area->control->pools[sclass].lock)

Definition at line 156 of file dsa.c.

◆ DSA_SCLASS_SPAN_LARGE

#define DSA_SCLASS_SPAN_LARGE   1

Definition at line 263 of file dsa.c.

◆ DSA_SEGMENT_HEADER_MAGIC

#define DSA_SEGMENT_HEADER_MAGIC   0x0ce26608

Definition at line 112 of file dsa.c.

◆ DSA_SEGMENT_INDEX_NONE

#define DSA_SEGMENT_INDEX_NONE   (~(dsa_segment_index)0)

Definition at line 128 of file dsa.c.

◆ DSA_SIZE_CLASS_MAP_QUANTUM

#define DSA_SIZE_CLASS_MAP_QUANTUM   8

Definition at line 281 of file dsa.c.

◆ DSA_SPAN_NOTHING_FREE

#define DSA_SPAN_NOTHING_FREE   ((uint16) -1)

Definition at line 389 of file dsa.c.

◆ DSA_SUPERBLOCK_SIZE

#define DSA_SUPERBLOCK_SIZE   (DSA_PAGES_PER_SUPERBLOCK * FPM_PAGE_SIZE)

Definition at line 390 of file dsa.c.

◆ DsaAreaPoolToDsaPointer

#define DsaAreaPoolToDsaPointer (   area,
 
)     DSA_MAKE_POINTER(0, (char *) p - (char *) area->control)

Definition at line 341 of file dsa.c.

◆ get_segment_index

#define get_segment_index (   area,
  segment_map_ptr 
)     (segment_map_ptr - &area->segment_maps[0])

Definition at line 393 of file dsa.c.

◆ NextFreeObjectIndex

#define NextFreeObjectIndex (   object)    (* (uint16 *) (object))

Definition at line 225 of file dsa.c.

Typedef Documentation

◆ dsa_segment_index

typedef size_t dsa_segment_index

Definition at line 125 of file dsa.c.

Function Documentation

◆ add_span_to_fullness_class()

static void add_span_to_fullness_class ( dsa_area area,
dsa_area_span span,
dsa_pointer  span_pointer,
int  fclass 
)
static

Definition at line 1899 of file dsa.c.

1902 {
1903  dsa_area_pool *pool = dsa_get_address(area, span->pool);
1904 
1905  if (DsaPointerIsValid(pool->spans[fclass]))
1906  {
1907  dsa_area_span *head = dsa_get_address(area,
1908  pool->spans[fclass]);
1909 
1910  head->prevspan = span_pointer;
1911  }
1912  span->prevspan = InvalidDsaPointer;
1913  span->nextspan = pool->spans[fclass];
1914  pool->spans[fclass] = span_pointer;
1915  span->fclass = fclass;
1916 }
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:944
#define InvalidDsaPointer
Definition: dsa.h:78
#define DsaPointerIsValid(x)
Definition: dsa.h:81
dsa_pointer spans[DSA_FULLNESS_CLASSES]
Definition: dsa.c:302
dsa_pointer nextspan
Definition: dsa.c:210
uint16 fclass
Definition: dsa.c:218
dsa_pointer prevspan
Definition: dsa.c:209
dsa_pointer pool
Definition: dsa.c:208

References dsa_get_address(), DsaPointerIsValid, dsa_area_span::fclass, InvalidDsaPointer, dsa_area_span::nextspan, dsa_area_span::pool, dsa_area_span::prevspan, and dsa_area_pool::spans.

Referenced by dsa_free().

◆ alloc_object()

static dsa_pointer alloc_object ( dsa_area area,
int  size_class 
)
inlinestatic

Definition at line 1449 of file dsa.c.

1450 {
1451  dsa_area_pool *pool = &area->control->pools[size_class];
1452  dsa_area_span *span;
1453  dsa_pointer block;
1454  dsa_pointer result;
1455  char *object;
1456  size_t size;
1457 
1458  /*
1459  * Even though ensure_active_superblock can in turn call alloc_object if
1460  * it needs to allocate a new span, that's always from a different pool,
1461  * and the order of lock acquisition is always the same, so it's OK that
1462  * we hold this lock for the duration of this function.
1463  */
1464  Assert(!LWLockHeldByMe(DSA_SCLASS_LOCK(area, size_class)));
1465  LWLockAcquire(DSA_SCLASS_LOCK(area, size_class), LW_EXCLUSIVE);
1466 
1467  /*
1468  * If there's no active superblock, we must successfully obtain one or
1469  * fail the request.
1470  */
1471  if (!DsaPointerIsValid(pool->spans[1]) &&
1472  !ensure_active_superblock(area, pool, size_class))
1473  {
1474  result = InvalidDsaPointer;
1475  }
1476  else
1477  {
1478  /*
1479  * There should be a block in fullness class 1 at this point, and it
1480  * should never be completely full. Thus we can either pop an object
1481  * from the free list or, failing that, initialize a new object.
1482  */
1483  Assert(DsaPointerIsValid(pool->spans[1]));
1484  span = (dsa_area_span *)
1485  dsa_get_address(area, pool->spans[1]);
1486  Assert(span->nallocatable > 0);
1487  block = span->start;
1488  Assert(size_class < DSA_NUM_SIZE_CLASSES);
1489  size = dsa_size_classes[size_class];
1490  if (span->firstfree != DSA_SPAN_NOTHING_FREE)
1491  {
1492  result = block + span->firstfree * size;
1493  object = dsa_get_address(area, result);
1494  span->firstfree = NextFreeObjectIndex(object);
1495  }
1496  else
1497  {
1498  result = block + span->ninitialized * size;
1499  ++span->ninitialized;
1500  }
1501  --span->nallocatable;
1502 
1503  /* If it's now full, move it to the highest-numbered fullness class. */
1504  if (span->nallocatable == 0)
1505  transfer_first_span(area, pool, 1, DSA_FULLNESS_CLASSES - 1);
1506  }
1507 
1508  Assert(LWLockHeldByMe(DSA_SCLASS_LOCK(area, size_class)));
1509  LWLockRelease(DSA_SCLASS_LOCK(area, size_class));
1510 
1511  return result;
1512 }
static const uint16 dsa_size_classes[]
Definition: dsa.c:248
static bool ensure_active_superblock(dsa_area *area, dsa_area_pool *pool, int size_class)
Definition: dsa.c:1537
#define DSA_SPAN_NOTHING_FREE
Definition: dsa.c:389
#define DSA_SCLASS_LOCK(area, sclass)
Definition: dsa.c:156
static bool transfer_first_span(dsa_area *area, dsa_area_pool *pool, int fromclass, int toclass)
Definition: dsa.c:1409
#define DSA_NUM_SIZE_CLASSES
Definition: dsa.c:259
#define NextFreeObjectIndex(object)
Definition: dsa.c:225
#define DSA_FULLNESS_CLASSES
Definition: dsa.c:289
uint64 dsa_pointer
Definition: dsa.h:62
Assert(fmt[strlen(fmt) - 1] !='\n')
bool LWLockHeldByMe(LWLock *lock)
Definition: lwlock.c:1919
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1195
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
@ LW_EXCLUSIVE
Definition: lwlock.h:115
dsa_area_pool pools[DSA_NUM_SIZE_CLASSES]
Definition: dsa.c:321
dsa_pointer start
Definition: dsa.c:211
uint16 nallocatable
Definition: dsa.c:215
uint16 ninitialized
Definition: dsa.c:214
uint16 firstfree
Definition: dsa.c:216
dsa_area_control * control
Definition: dsa.c:369

References Assert(), dsa_area::control, DSA_FULLNESS_CLASSES, dsa_get_address(), DSA_NUM_SIZE_CLASSES, DSA_SCLASS_LOCK, dsa_size_classes, DSA_SPAN_NOTHING_FREE, DsaPointerIsValid, ensure_active_superblock(), dsa_area_span::firstfree, InvalidDsaPointer, LW_EXCLUSIVE, LWLockAcquire(), LWLockHeldByMe(), LWLockRelease(), dsa_area_span::nallocatable, NextFreeObjectIndex, dsa_area_span::ninitialized, dsa_area_control::pools, dsa_area_pool::spans, dsa_area_span::start, and transfer_first_span().

Referenced by dsa_allocate_extended(), and ensure_active_superblock().

◆ attach_internal()

static dsa_area * attach_internal ( void *  place,
dsm_segment segment,
dsa_handle  handle 
)
static

Definition at line 1303 of file dsa.c.

1304 {
1305  dsa_area_control *control;
1306  dsa_area *area;
1307  dsa_segment_map *segment_map;
1308 
1309  control = (dsa_area_control *) place;
1310  Assert(control->handle == handle);
1311  Assert(control->segment_handles[0] == handle);
1312  Assert(control->segment_header.magic ==
1313  (DSA_SEGMENT_HEADER_MAGIC ^ handle ^ 0));
1314 
1315  /* Build the backend-local area object. */
1316  area = palloc(sizeof(dsa_area));
1317  area->control = control;
1318  area->mapping_pinned = false;
1319  memset(&area->segment_maps[0], 0,
1320  sizeof(dsa_segment_map) * DSA_MAX_SEGMENTS);
1321  area->high_segment_index = 0;
1322 
1323  /* Set up the segment map for this process's mapping. */
1324  segment_map = &area->segment_maps[0];
1325  segment_map->segment = segment; /* NULL for in-place */
1326  segment_map->mapped_address = place;
1327  segment_map->header = (dsa_segment_header *) segment_map->mapped_address;
1328  segment_map->fpm = (FreePageManager *)
1329  (segment_map->mapped_address + MAXALIGN(sizeof(dsa_area_control)));
1330  segment_map->pagemap = (dsa_pointer *)
1331  (segment_map->mapped_address + MAXALIGN(sizeof(dsa_area_control)) +
1332  MAXALIGN(sizeof(FreePageManager)));
1333 
1334  /* Bump the reference count. */
1336  if (control->refcnt == 0)
1337  {
1338  /* We can't attach to a DSA area that has already been destroyed. */
1339  ereport(ERROR,
1340  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1341  errmsg("could not attach to dynamic shared area")));
1342  }
1343  ++control->refcnt;
1346 
1347  return area;
1348 }
#define MAXALIGN(LEN)
Definition: c.h:795
#define DSA_AREA_LOCK(area)
Definition: dsa.c:155
#define DSA_SEGMENT_HEADER_MAGIC
Definition: dsa.c:112
#define DSA_MAX_SEGMENTS
Definition: dsa.c:95
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
void * palloc(Size size)
Definition: mcxt.c:1210
dsa_segment_header segment_header
Definition: dsa.c:313
size_t freed_segment_counter
Definition: dsa.c:333
int refcnt
Definition: dsa.c:329
dsa_handle handle
Definition: dsa.c:315
dsm_handle segment_handles[DSA_MAX_SEGMENTS]
Definition: dsa.c:317
Definition: dsa.c:367
dsa_segment_map segment_maps[DSA_MAX_SEGMENTS]
Definition: dsa.c:380
dsa_segment_index high_segment_index
Definition: dsa.c:383
bool mapping_pinned
Definition: dsa.c:372
size_t freed_segment_counter
Definition: dsa.c:386
uint32 magic
Definition: dsa.c:166
dsa_segment_header * header
Definition: dsa.c:355
FreePageManager * fpm
Definition: dsa.c:356
dsm_segment * segment
Definition: dsa.c:353
dsa_pointer * pagemap
Definition: dsa.c:357
char * mapped_address
Definition: dsa.c:354

References Assert(), dsa_area::control, DSA_AREA_LOCK, DSA_MAX_SEGMENTS, DSA_SEGMENT_HEADER_MAGIC, ereport, errcode(), errmsg(), ERROR, dsa_segment_map::fpm, dsa_area_control::freed_segment_counter, dsa_area::freed_segment_counter, dsa_area_control::handle, dsa_segment_map::header, dsa_area::high_segment_index, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_segment_header::magic, dsa_segment_map::mapped_address, dsa_area::mapping_pinned, MAXALIGN, dsa_segment_map::pagemap, palloc(), dsa_area_control::refcnt, dsa_segment_map::segment, dsa_area_control::segment_handles, dsa_area_control::segment_header, and dsa_area::segment_maps.

Referenced by dsa_attach(), and dsa_attach_in_place().

◆ check_for_freed_segments()

static void check_for_freed_segments ( dsa_area area)
static

Definition at line 2241 of file dsa.c.

2242 {
2243  size_t freed_segment_counter;
2244 
2245  /*
2246  * Any other process that has freed a segment has incremented
2247  * freed_segment_counter while holding an LWLock, and that must precede
2248  * any backend creating a new segment in the same slot while holding an
2249  * LWLock, and that must precede the creation of any dsa_pointer pointing
2250  * into the new segment which might reach us here, and the caller must
2251  * have sent the dsa_pointer to this process using appropriate memory
2252  * synchronization (some kind of locking or atomic primitive or system
2253  * call). So all we need to do on the reading side is ask for the load of
2254  * freed_segment_counter to follow the caller's load of the dsa_pointer it
2255  * has, and we can be sure to detect any segments that had been freed as
2256  * of the time that the dsa_pointer reached this process.
2257  */
2258  pg_read_barrier();
2259  freed_segment_counter = area->control->freed_segment_counter;
2260  if (unlikely(area->freed_segment_counter != freed_segment_counter))
2261  {
2262  /* Check all currently mapped segments to find what's been freed. */
2266  }
2267 }
#define pg_read_barrier()
Definition: atomics.h:153
#define unlikely(x)
Definition: c.h:295
static void check_for_freed_segments_locked(dsa_area *area)
Definition: dsa.c:2277

References check_for_freed_segments_locked(), dsa_area::control, DSA_AREA_LOCK, dsa_area_control::freed_segment_counter, dsa_area::freed_segment_counter, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), pg_read_barrier, and unlikely.

Referenced by dsa_free(), and dsa_get_address().

◆ check_for_freed_segments_locked()

static void check_for_freed_segments_locked ( dsa_area area)
static

Definition at line 2277 of file dsa.c.

2278 {
2279  size_t freed_segment_counter;
2280  int i;
2281 
2283  freed_segment_counter = area->control->freed_segment_counter;
2284  if (unlikely(area->freed_segment_counter != freed_segment_counter))
2285  {
2286  for (i = 0; i <= area->high_segment_index; ++i)
2287  {
2288  if (area->segment_maps[i].header != NULL &&
2289  area->segment_maps[i].header->freed)
2290  {
2291  dsm_detach(area->segment_maps[i].segment);
2292  area->segment_maps[i].segment = NULL;
2293  area->segment_maps[i].header = NULL;
2294  area->segment_maps[i].mapped_address = NULL;
2295  }
2296  }
2297  area->freed_segment_counter = freed_segment_counter;
2298  }
2299 }
void dsm_detach(dsm_segment *seg)
Definition: dsm.c:776
int i
Definition: isn.c:73

References Assert(), dsa_area::control, DSA_AREA_LOCK, dsm_detach(), dsa_segment_header::freed, dsa_area_control::freed_segment_counter, dsa_area::freed_segment_counter, dsa_segment_map::header, dsa_area::high_segment_index, i, LWLockHeldByMe(), dsa_segment_map::mapped_address, dsa_segment_map::segment, dsa_area::segment_maps, and unlikely.

Referenced by check_for_freed_segments(), destroy_superblock(), dsa_dump(), and get_best_segment().

◆ contiguous_pages_to_segment_bin()

static size_t contiguous_pages_to_segment_bin ( size_t  n)
inlinestatic

Definition at line 142 of file dsa.c.

143 {
144  size_t bin;
145 
146  if (n == 0)
147  bin = 0;
148  else
149  bin = pg_leftmost_one_pos_size_t(n) + 1;
150 
151  return Min(bin, DSA_NUM_SEGMENT_BINS - 1);
152 }
#define Min(x, y)
Definition: c.h:988
#define DSA_NUM_SEGMENT_BINS
Definition: dsa.c:134
#define pg_leftmost_one_pos_size_t
Definition: pg_bitutils.h:289

References DSA_NUM_SEGMENT_BINS, Min, and pg_leftmost_one_pos_size_t.

Referenced by create_internal(), get_best_segment(), and make_new_segment().

◆ create_internal()

static dsa_area * create_internal ( void *  place,
size_t  size,
int  tranche_id,
dsm_handle  control_handle,
dsm_segment control_segment 
)
static

Definition at line 1203 of file dsa.c.

1207 {
1208  dsa_area_control *control;
1209  dsa_area *area;
1210  dsa_segment_map *segment_map;
1211  size_t usable_pages;
1212  size_t total_pages;
1213  size_t metadata_bytes;
1214  int i;
1215 
1216  /* Sanity check on the space we have to work in. */
1217  if (size < dsa_minimum_size())
1218  elog(ERROR, "dsa_area space must be at least %zu, but %zu provided",
1219  dsa_minimum_size(), size);
1220 
1221  /* Now figure out how much space is usable */
1222  total_pages = size / FPM_PAGE_SIZE;
1223  metadata_bytes =
1224  MAXALIGN(sizeof(dsa_area_control)) +
1225  MAXALIGN(sizeof(FreePageManager)) +
1226  total_pages * sizeof(dsa_pointer);
1227  /* Add padding up to next page boundary. */
1228  if (metadata_bytes % FPM_PAGE_SIZE != 0)
1229  metadata_bytes += FPM_PAGE_SIZE - (metadata_bytes % FPM_PAGE_SIZE);
1230  Assert(metadata_bytes <= size);
1231  usable_pages = (size - metadata_bytes) / FPM_PAGE_SIZE;
1232 
1233  /*
1234  * Initialize the dsa_area_control object located at the start of the
1235  * space.
1236  */
1237  control = (dsa_area_control *) place;
1238  memset(place, 0, sizeof(*control));
1239  control->segment_header.magic =
1240  DSA_SEGMENT_HEADER_MAGIC ^ control_handle ^ 0;
1243  control->segment_header.usable_pages = usable_pages;
1244  control->segment_header.freed = false;
1246  control->handle = control_handle;
1247  control->max_total_segment_size = (size_t) -1;
1248  control->total_segment_size = size;
1249  control->segment_handles[0] = control_handle;
1250  for (i = 0; i < DSA_NUM_SEGMENT_BINS; ++i)
1252  control->refcnt = 1;
1253  control->lwlock_tranche_id = tranche_id;
1254 
1255  /*
1256  * Create the dsa_area object that this backend will use to access the
1257  * area. Other backends will need to obtain their own dsa_area object by
1258  * attaching.
1259  */
1260  area = palloc(sizeof(dsa_area));
1261  area->control = control;
1262  area->mapping_pinned = false;
1263  memset(area->segment_maps, 0, sizeof(dsa_segment_map) * DSA_MAX_SEGMENTS);
1264  area->high_segment_index = 0;
1265  area->freed_segment_counter = 0;
1266  LWLockInitialize(&control->lock, control->lwlock_tranche_id);
1267  for (i = 0; i < DSA_NUM_SIZE_CLASSES; ++i)
1269  control->lwlock_tranche_id);
1270 
1271  /* Set up the segment map for this process's mapping. */
1272  segment_map = &area->segment_maps[0];
1273  segment_map->segment = control_segment;
1274  segment_map->mapped_address = place;
1275  segment_map->header = (dsa_segment_header *) place;
1276  segment_map->fpm = (FreePageManager *)
1277  (segment_map->mapped_address +
1278  MAXALIGN(sizeof(dsa_area_control)));
1279  segment_map->pagemap = (dsa_pointer *)
1280  (segment_map->mapped_address +
1281  MAXALIGN(sizeof(dsa_area_control)) +
1282  MAXALIGN(sizeof(FreePageManager)));
1283 
1284  /* Set up the free page map. */
1285  FreePageManagerInitialize(segment_map->fpm, segment_map->mapped_address);
1286  /* There can be 0 usable pages if size is dsa_minimum_size(). */
1287 
1288  if (usable_pages > 0)
1289  FreePageManagerPut(segment_map->fpm, metadata_bytes / FPM_PAGE_SIZE,
1290  usable_pages);
1291 
1292  /* Put this segment into the appropriate bin. */
1293  control->segment_bins[contiguous_pages_to_segment_bin(usable_pages)] = 0;
1294  segment_map->header->bin = contiguous_pages_to_segment_bin(usable_pages);
1295 
1296  return area;
1297 }
#define DSA_SEGMENT_INDEX_NONE
Definition: dsa.c:128
#define DSA_INITIAL_SEGMENT_SIZE
Definition: dsa.c:69
static size_t contiguous_pages_to_segment_bin(size_t n)
Definition: dsa.c:142
size_t dsa_minimum_size(void)
Definition: dsa.c:1181
void FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages)
Definition: freepage.c:379
void FreePageManagerInitialize(FreePageManager *fpm, char *base)
Definition: freepage.c:183
#define FPM_PAGE_SIZE
Definition: freepage.h:30
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:730
size_t total_segment_size
Definition: dsa.c:323
int lwlock_tranche_id
Definition: dsa.c:335
size_t max_total_segment_size
Definition: dsa.c:325
dsa_segment_index segment_bins[DSA_NUM_SEGMENT_BINS]
Definition: dsa.c:319
LWLock lock
Definition: dsa.c:337
size_t size
Definition: dsa.c:170
dsa_segment_index next
Definition: dsa.c:182
dsa_segment_index prev
Definition: dsa.c:176
size_t usable_pages
Definition: dsa.c:168
size_t bin
Definition: dsa.c:184

References Assert(), dsa_segment_header::bin, contiguous_pages_to_segment_bin(), dsa_area::control, DSA_INITIAL_SEGMENT_SIZE, DSA_MAX_SEGMENTS, dsa_minimum_size(), DSA_NUM_SEGMENT_BINS, DSA_NUM_SIZE_CLASSES, DSA_SCLASS_LOCK, DSA_SEGMENT_HEADER_MAGIC, DSA_SEGMENT_INDEX_NONE, elog(), ERROR, dsa_segment_map::fpm, FPM_PAGE_SIZE, dsa_segment_header::freed, dsa_area::freed_segment_counter, FreePageManagerInitialize(), FreePageManagerPut(), dsa_area_control::handle, dsa_segment_map::header, dsa_area::high_segment_index, i, dsa_area_control::lock, dsa_area_control::lwlock_tranche_id, LWLockInitialize(), dsa_segment_header::magic, dsa_segment_map::mapped_address, dsa_area::mapping_pinned, dsa_area_control::max_total_segment_size, MAXALIGN, dsa_segment_header::next, dsa_segment_map::pagemap, palloc(), dsa_segment_header::prev, dsa_area_control::refcnt, dsa_segment_map::segment, dsa_area_control::segment_bins, dsa_area_control::segment_handles, dsa_area_control::segment_header, dsa_area::segment_maps, dsa_segment_header::size, dsa_area_control::total_segment_size, and dsa_segment_header::usable_pages.

Referenced by dsa_create(), and dsa_create_in_place().

◆ destroy_superblock()

static void destroy_superblock ( dsa_area area,
dsa_pointer  span_pointer 
)
static

Definition at line 1812 of file dsa.c.

1813 {
1814  dsa_area_span *span = dsa_get_address(area, span_pointer);
1815  int size_class = span->size_class;
1816  dsa_segment_map *segment_map;
1817 
1818 
1819  /* Remove it from its fullness class list. */
1820  unlink_span(area, span);
1821 
1822  /*
1823  * Note: Here we acquire the area lock while we already hold a per-pool
1824  * lock. We never hold the area lock and then take a pool lock, or we
1825  * could deadlock.
1826  */
1829  segment_map =
1831  FreePageManagerPut(segment_map->fpm,
1833  span->npages);
1834  /* Check if the segment is now entirely free. */
1835  if (fpm_largest(segment_map->fpm) == segment_map->header->usable_pages)
1836  {
1837  dsa_segment_index index = get_segment_index(area, segment_map);
1838 
1839  /* If it's not the segment with extra control data, free it. */
1840  if (index != 0)
1841  {
1842  /*
1843  * Give it back to the OS, and allow other backends to detect that
1844  * they need to detach.
1845  */
1846  unlink_segment(area, segment_map);
1847  segment_map->header->freed = true;
1849  segment_map->header->size);
1850  area->control->total_segment_size -=
1851  segment_map->header->size;
1853  dsm_detach(segment_map->segment);
1855  ++area->control->freed_segment_counter;
1856  segment_map->segment = NULL;
1857  segment_map->header = NULL;
1858  segment_map->mapped_address = NULL;
1859  }
1860  }
1862 
1863  /*
1864  * Span-of-spans blocks store the span which describes them within the
1865  * block itself, so freeing the storage implicitly frees the descriptor
1866  * also. If this is a block of any other type, we need to separately free
1867  * the span object also. This recursive call to dsa_free will acquire the
1868  * span pool's lock. We can't deadlock because the acquisition order is
1869  * always some other pool and then the span pool.
1870  */
1871  if (size_class != DSA_SCLASS_BLOCK_OF_SPANS)
1872  dsa_free(area, span_pointer);
1873 }
static void unlink_segment(dsa_area *area, dsa_segment_map *segment_map)
Definition: dsa.c:1948
#define DSA_EXTRACT_SEGMENT_NUMBER(dp)
Definition: dsa.c:119
#define get_segment_index(area, segment_map_ptr)
Definition: dsa.c:393
#define DSA_EXTRACT_OFFSET(dp)
Definition: dsa.c:122
size_t dsa_segment_index
Definition: dsa.c:125
static dsa_segment_map * get_segment_by_index(dsa_area *area, dsa_segment_index index)
Definition: dsa.c:1734
#define DSA_SCLASS_BLOCK_OF_SPANS
Definition: dsa.c:262
static void unlink_span(dsa_area *area, dsa_area_span *span)
Definition: dsa.c:1876
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:832
dsm_handle dsm_segment_handle(dsm_segment *seg)
Definition: dsm.c:1094
void dsm_unpin_segment(dsm_handle handle)
Definition: dsm.c:960
#define DSM_HANDLE_INVALID
Definition: dsm_impl.h:58
#define fpm_largest(fpm)
Definition: freepage.h:88
uint16 size_class
Definition: dsa.c:213
size_t npages
Definition: dsa.c:212
Definition: type.h:95

References Assert(), check_for_freed_segments_locked(), dsa_area::control, DSA_AREA_LOCK, DSA_EXTRACT_OFFSET, DSA_EXTRACT_SEGMENT_NUMBER, dsa_free(), dsa_get_address(), DSA_SCLASS_BLOCK_OF_SPANS, dsm_detach(), DSM_HANDLE_INVALID, dsm_segment_handle(), dsm_unpin_segment(), dsa_segment_map::fpm, fpm_largest, FPM_PAGE_SIZE, dsa_segment_header::freed, dsa_area_control::freed_segment_counter, FreePageManagerPut(), get_segment_by_index(), get_segment_index, dsa_segment_map::header, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_segment_map::mapped_address, dsa_area_span::npages, dsa_segment_map::segment, dsa_area_control::segment_handles, dsa_segment_header::size, dsa_area_span::size_class, dsa_area_span::start, dsa_area_control::total_segment_size, unlink_segment(), unlink_span(), and dsa_segment_header::usable_pages.

Referenced by dsa_free(), and dsa_trim().

◆ dsa_allocate_extended()

dsa_pointer dsa_allocate_extended ( dsa_area area,
size_t  size,
int  flags 
)

Definition at line 677 of file dsa.c.

678 {
679  uint16 size_class;
680  dsa_pointer start_pointer;
681  dsa_segment_map *segment_map;
682  dsa_pointer result;
683 
684  Assert(size > 0);
685 
686  /* Sanity check on huge individual allocation size. */
687  if (((flags & DSA_ALLOC_HUGE) != 0 && !AllocHugeSizeIsValid(size)) ||
688  ((flags & DSA_ALLOC_HUGE) == 0 && !AllocSizeIsValid(size)))
689  elog(ERROR, "invalid DSA memory alloc request size %zu", size);
690 
691  /*
692  * If bigger than the largest size class, just grab a run of pages from
693  * the free page manager, instead of allocating an object from a pool.
694  * There will still be a span, but it's a special class of span that
695  * manages this whole allocation and simply gives all pages back to the
696  * free page manager when dsa_free is called.
697  */
698  if (size > dsa_size_classes[lengthof(dsa_size_classes) - 1])
699  {
700  size_t npages = fpm_size_to_pages(size);
701  size_t first_page;
702  dsa_pointer span_pointer;
704 
705  /* Obtain a span object. */
706  span_pointer = alloc_object(area, DSA_SCLASS_BLOCK_OF_SPANS);
707  if (!DsaPointerIsValid(span_pointer))
708  {
709  /* Raise error unless asked not to. */
710  if ((flags & DSA_ALLOC_NO_OOM) == 0)
711  ereport(ERROR,
712  (errcode(ERRCODE_OUT_OF_MEMORY),
713  errmsg("out of memory"),
714  errdetail("Failed on DSA request of size %zu.",
715  size)));
716  return InvalidDsaPointer;
717  }
718 
720 
721  /* Find a segment from which to allocate. */
722  segment_map = get_best_segment(area, npages);
723  if (segment_map == NULL)
724  segment_map = make_new_segment(area, npages);
725  if (segment_map == NULL)
726  {
727  /* Can't make any more segments: game over. */
729  dsa_free(area, span_pointer);
730 
731  /* Raise error unless asked not to. */
732  if ((flags & DSA_ALLOC_NO_OOM) == 0)
733  ereport(ERROR,
734  (errcode(ERRCODE_OUT_OF_MEMORY),
735  errmsg("out of memory"),
736  errdetail("Failed on DSA request of size %zu.",
737  size)));
738  return InvalidDsaPointer;
739  }
740 
741  /*
742  * Ask the free page manager for a run of pages. This should always
743  * succeed, since both get_best_segment and make_new_segment should
744  * only return a non-NULL pointer if it actually contains enough
745  * contiguous freespace. If it does fail, something in our backend
746  * private state is out of whack, so use FATAL to kill the process.
747  */
748  if (!FreePageManagerGet(segment_map->fpm, npages, &first_page))
749  elog(FATAL,
750  "dsa_allocate could not find %zu free pages", npages);
752 
753  start_pointer = DSA_MAKE_POINTER(get_segment_index(area, segment_map),
754  first_page * FPM_PAGE_SIZE);
755 
756  /* Initialize span and pagemap. */
758  LW_EXCLUSIVE);
759  init_span(area, span_pointer, pool, start_pointer, npages,
761  segment_map->pagemap[first_page] = span_pointer;
763 
764  /* Zero-initialize the memory if requested. */
765  if ((flags & DSA_ALLOC_ZERO) != 0)
766  memset(dsa_get_address(area, start_pointer), 0, size);
767 
768  return start_pointer;
769  }
770 
771  /* Map allocation to a size class. */
773  {
774  int mapidx;
775 
776  /* For smaller sizes we have a lookup table... */
777  mapidx = ((size + DSA_SIZE_CLASS_MAP_QUANTUM - 1) /
779  size_class = dsa_size_class_map[mapidx];
780  }
781  else
782  {
783  uint16 min;
784  uint16 max;
785 
786  /* ... and for the rest we search by binary chop. */
788  max = lengthof(dsa_size_classes) - 1;
789 
790  while (min < max)
791  {
792  uint16 mid = (min + max) / 2;
793  uint16 class_size = dsa_size_classes[mid];
794 
795  if (class_size < size)
796  min = mid + 1;
797  else
798  max = mid;
799  }
800 
801  size_class = min;
802  }
803  Assert(size <= dsa_size_classes[size_class]);
804  Assert(size_class == 0 || size > dsa_size_classes[size_class - 1]);
805 
806  /* Attempt to allocate an object from the appropriate pool. */
807  result = alloc_object(area, size_class);
808 
809  /* Check for failure to allocate. */
810  if (!DsaPointerIsValid(result))
811  {
812  /* Raise error unless asked not to. */
813  if ((flags & DSA_ALLOC_NO_OOM) == 0)
814  ereport(ERROR,
815  (errcode(ERRCODE_OUT_OF_MEMORY),
816  errmsg("out of memory"),
817  errdetail("Failed on DSA request of size %zu.", size)));
818  return InvalidDsaPointer;
819  }
820 
821  /* Zero-initialize the memory if requested. */
822  if ((flags & DSA_ALLOC_ZERO) != 0)
823  memset(dsa_get_address(area, result), 0, size);
824 
825  return result;
826 }
unsigned short uint16
Definition: c.h:489
#define lengthof(array)
Definition: c.h:772
#define DSA_MAKE_POINTER(segment_number, offset)
Definition: dsa.c:115
static dsa_pointer alloc_object(dsa_area *area, int size_class)
Definition: dsa.c:1449
#define DSA_SIZE_CLASS_MAP_QUANTUM
Definition: dsa.c:281
static const uint8 dsa_size_class_map[]
Definition: dsa.c:271
static dsa_segment_map * make_new_segment(dsa_area *area, size_t requested_pages)
Definition: dsa.c:2072
#define DSA_SCLASS_SPAN_LARGE
Definition: dsa.c:263
static void init_span(dsa_area *area, dsa_pointer span_pointer, dsa_area_pool *pool, dsa_pointer start, size_t npages, uint16 size_class)
Definition: dsa.c:1354
static dsa_segment_map * get_best_segment(dsa_area *area, size_t npages)
Definition: dsa.c:1980
#define DSA_ALLOC_NO_OOM
Definition: dsa.h:74
#define DSA_ALLOC_HUGE
Definition: dsa.h:73
#define DSA_ALLOC_ZERO
Definition: dsa.h:75
int errdetail(const char *fmt,...)
Definition: elog.c:1202
#define FATAL
Definition: elog.h:41
bool FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page)
Definition: freepage.c:210
#define fpm_size_to_pages(sz)
Definition: freepage.h:74
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:49
#define AllocSizeIsValid(size)
Definition: memutils.h:42

References alloc_object(), AllocHugeSizeIsValid, AllocSizeIsValid, Assert(), dsa_area::control, DSA_ALLOC_HUGE, DSA_ALLOC_NO_OOM, DSA_ALLOC_ZERO, DSA_AREA_LOCK, dsa_free(), dsa_get_address(), DSA_MAKE_POINTER, DSA_SCLASS_BLOCK_OF_SPANS, DSA_SCLASS_LOCK, DSA_SCLASS_SPAN_LARGE, dsa_size_class_map, DSA_SIZE_CLASS_MAP_QUANTUM, dsa_size_classes, DsaPointerIsValid, elog(), ereport, errcode(), errdetail(), errmsg(), ERROR, FATAL, dsa_segment_map::fpm, FPM_PAGE_SIZE, fpm_size_to_pages, FreePageManagerGet(), get_best_segment(), get_segment_index, init_span(), InvalidDsaPointer, lengthof, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), make_new_segment(), dsa_segment_map::pagemap, and dsa_area_control::pools.

Referenced by dshash_create(), and pagetable_allocate().

◆ dsa_attach()

dsa_area* dsa_attach ( dsa_handle  handle)

Definition at line 518 of file dsa.c.

519 {
520  dsm_segment *segment;
521  dsa_area *area;
522 
523  /*
524  * An area handle is really a DSM segment handle for the first segment, so
525  * we go ahead and attach to that.
526  */
527  segment = dsm_attach(handle);
528  if (segment == NULL)
529  ereport(ERROR,
530  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
531  errmsg("could not attach to dynamic shared area")));
532 
533  area = attach_internal(dsm_segment_address(segment), segment, handle);
534 
535  /* Clean up when the control segment detaches. */
538 
539  return area;
540 }
void dsa_on_dsm_detach_release_in_place(dsm_segment *segment, Datum place)
Definition: dsa.c:584
static dsa_area * attach_internal(void *place, dsm_segment *segment, dsa_handle handle)
Definition: dsa.c:1303
void * dsm_segment_address(dsm_segment *seg)
Definition: dsm.c:1066
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1103
dsm_segment * dsm_attach(dsm_handle h)
Definition: dsm.c:638
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322

References attach_internal(), dsa_on_dsm_detach_release_in_place(), dsm_attach(), dsm_segment_address(), ereport, errcode(), errmsg(), ERROR, on_dsm_detach(), and PointerGetDatum().

Referenced by logicalrep_launcher_attach_dshmem().

◆ dsa_attach_in_place()

dsa_area* dsa_attach_in_place ( void *  place,
dsm_segment segment 
)

Definition at line 553 of file dsa.c.

554 {
555  dsa_area *area;
556 
557  area = attach_internal(place, NULL, DSA_HANDLE_INVALID);
558 
559  /*
560  * Clean up when the control segment detaches, if a containing DSM segment
561  * was provided.
562  */
563  if (segment != NULL)
565  PointerGetDatum(place));
566 
567  return area;
568 }
#define DSA_HANDLE_INVALID
Definition: dsa.h:103

References attach_internal(), DSA_HANDLE_INVALID, dsa_on_dsm_detach_release_in_place(), on_dsm_detach(), and PointerGetDatum().

Referenced by AttachSession(), ParallelQueryMain(), and pgstat_attach_shmem().

◆ dsa_create()

dsa_area* dsa_create ( int  tranche_id)

Definition at line 432 of file dsa.c.

433 {
434  dsm_segment *segment;
435  dsa_area *area;
436 
437  /*
438  * Create the DSM segment that will hold the shared control object and the
439  * first segment of usable space.
440  */
441  segment = dsm_create(DSA_INITIAL_SEGMENT_SIZE, 0);
442 
443  /*
444  * All segments backing this area are pinned, so that DSA can explicitly
445  * control their lifetime (otherwise a newly created segment belonging to
446  * this area might be freed when the only backend that happens to have it
447  * mapped in ends, corrupting the area).
448  */
449  dsm_pin_segment(segment);
450 
451  /* Create a new DSA area with the control object in this segment. */
452  area = create_internal(dsm_segment_address(segment),
454  tranche_id,
455  dsm_segment_handle(segment), segment);
456 
457  /* Clean up when the control segment detaches. */
460 
461  return area;
462 }
static dsa_area * create_internal(void *place, size_t size, int tranche_id, dsm_handle control_handle, dsm_segment *control_segment)
Definition: dsa.c:1203
dsm_segment * dsm_create(Size size, int flags)
Definition: dsm.c:489
void dsm_pin_segment(dsm_segment *seg)
Definition: dsm.c:928

References create_internal(), DSA_INITIAL_SEGMENT_SIZE, dsa_on_dsm_detach_release_in_place(), dsm_create(), dsm_pin_segment(), dsm_segment_address(), dsm_segment_handle(), on_dsm_detach(), and PointerGetDatum().

Referenced by logicalrep_launcher_attach_dshmem().

◆ dsa_create_in_place()

dsa_area* dsa_create_in_place ( void *  place,
size_t  size,
int  tranche_id,
dsm_segment segment 
)

Definition at line 481 of file dsa.c.

483 {
484  dsa_area *area;
485 
486  area = create_internal(place, size, tranche_id,
487  DSM_HANDLE_INVALID, NULL);
488 
489  /*
490  * Clean up when the control segment detaches, if a containing DSM segment
491  * was provided.
492  */
493  if (segment != NULL)
495  PointerGetDatum(place));
496 
497  return area;
498 }

References create_internal(), dsa_on_dsm_detach_release_in_place(), DSM_HANDLE_INVALID, on_dsm_detach(), and PointerGetDatum().

Referenced by ExecInitParallelPlan(), GetSessionDsmHandle(), and StatsShmemInit().

◆ dsa_detach()

void dsa_detach ( dsa_area area)

Definition at line 1922 of file dsa.c.

1923 {
1924  int i;
1925 
1926  /* Detach from all segments. */
1927  for (i = 0; i <= area->high_segment_index; ++i)
1928  if (area->segment_maps[i].segment != NULL)
1929  dsm_detach(area->segment_maps[i].segment);
1930 
1931  /*
1932  * Note that 'detaching' (= detaching from DSM segments) doesn't include
1933  * 'releasing' (= adjusting the reference count). It would be nice to
1934  * combine these operations, but client code might never get around to
1935  * calling dsa_detach because of an error path, and a detach hook on any
1936  * particular segment is too late to detach other segments in the area
1937  * without risking a 'leak' warning in the non-error path.
1938  */
1939 
1940  /* Free the backend-local area object. */
1941  pfree(area);
1942 }
void pfree(void *pointer)
Definition: mcxt.c:1436

References dsm_detach(), dsa_area::high_segment_index, i, pfree(), dsa_segment_map::segment, and dsa_area::segment_maps.

Referenced by DetachSession(), ExecParallelCleanup(), ParallelQueryMain(), pgstat_detach_shmem(), and StatsShmemInit().

◆ dsa_dump()

void dsa_dump ( dsa_area area)

Definition at line 1077 of file dsa.c.

1078 {
1079  size_t i,
1080  j;
1081 
1082  /*
1083  * Note: This gives an inconsistent snapshot as it acquires and releases
1084  * individual locks as it goes...
1085  */
1086 
1089  fprintf(stderr, "dsa_area handle %x:\n", area->control->handle);
1090  fprintf(stderr, " max_total_segment_size: %zu\n",
1092  fprintf(stderr, " total_segment_size: %zu\n",
1093  area->control->total_segment_size);
1094  fprintf(stderr, " refcnt: %d\n", area->control->refcnt);
1095  fprintf(stderr, " pinned: %c\n", area->control->pinned ? 't' : 'f');
1096  fprintf(stderr, " segment bins:\n");
1097  for (i = 0; i < DSA_NUM_SEGMENT_BINS; ++i)
1098  {
1100  {
1101  dsa_segment_index segment_index;
1102 
1103  fprintf(stderr,
1104  " segment bin %zu (at least %d contiguous pages free):\n",
1105  i, 1 << (i - 1));
1106  segment_index = area->control->segment_bins[i];
1107  while (segment_index != DSA_SEGMENT_INDEX_NONE)
1108  {
1109  dsa_segment_map *segment_map;
1110 
1111  segment_map =
1112  get_segment_by_index(area, segment_index);
1113 
1114  fprintf(stderr,
1115  " segment index %zu, usable_pages = %zu, "
1116  "contiguous_pages = %zu, mapped at %p\n",
1117  segment_index,
1118  segment_map->header->usable_pages,
1119  fpm_largest(segment_map->fpm),
1120  segment_map->mapped_address);
1121  segment_index = segment_map->header->next;
1122  }
1123  }
1124  }
1126 
1127  fprintf(stderr, " pools:\n");
1128  for (i = 0; i < DSA_NUM_SIZE_CLASSES; ++i)
1129  {
1130  bool found = false;
1131 
1133  for (j = 0; j < DSA_FULLNESS_CLASSES; ++j)
1134  if (DsaPointerIsValid(area->control->pools[i].spans[j]))
1135  found = true;
1136  if (found)
1137  {
1139  fprintf(stderr, " pool for blocks of span objects:\n");
1140  else if (i == DSA_SCLASS_SPAN_LARGE)
1141  fprintf(stderr, " pool for large object spans:\n");
1142  else
1143  fprintf(stderr,
1144  " pool for size class %zu (object size %hu bytes):\n",
1145  i, dsa_size_classes[i]);
1146  for (j = 0; j < DSA_FULLNESS_CLASSES; ++j)
1147  {
1148  if (!DsaPointerIsValid(area->control->pools[i].spans[j]))
1149  fprintf(stderr, " fullness class %zu is empty\n", j);
1150  else
1151  {
1152  dsa_pointer span_pointer = area->control->pools[i].spans[j];
1153 
1154  fprintf(stderr, " fullness class %zu:\n", j);
1155  while (DsaPointerIsValid(span_pointer))
1156  {
1157  dsa_area_span *span;
1158 
1159  span = dsa_get_address(area, span_pointer);
1160  fprintf(stderr,
1161  " span descriptor at "
1162  DSA_POINTER_FORMAT ", superblock at "
1164  ", pages = %zu, objects free = %hu/%hu\n",
1165  span_pointer, span->start, span->npages,
1166  span->nallocatable, span->nmax);
1167  span_pointer = span->nextspan;
1168  }
1169  }
1170  }
1171  }
1173  }
1174 }
#define DSA_POINTER_FORMAT
Definition: dsa.h:69
int j
Definition: isn.c:74
#define fprintf
Definition: port.h:242
bool pinned
Definition: dsa.c:331
uint16 nmax
Definition: dsa.c:217

References check_for_freed_segments_locked(), dsa_area::control, DSA_AREA_LOCK, DSA_FULLNESS_CLASSES, dsa_get_address(), DSA_NUM_SEGMENT_BINS, DSA_NUM_SIZE_CLASSES, DSA_POINTER_FORMAT, DSA_SCLASS_BLOCK_OF_SPANS, DSA_SCLASS_LOCK, DSA_SCLASS_SPAN_LARGE, DSA_SEGMENT_INDEX_NONE, dsa_size_classes, DsaPointerIsValid, dsa_segment_map::fpm, fpm_largest, fprintf, get_segment_by_index(), dsa_area_control::handle, dsa_segment_map::header, i, j, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_segment_map::mapped_address, dsa_area_control::max_total_segment_size, dsa_area_span::nallocatable, dsa_segment_header::next, dsa_area_span::nextspan, dsa_area_span::nmax, dsa_area_span::npages, dsa_area_control::pinned, dsa_area_control::pools, dsa_area_control::refcnt, dsa_area_control::segment_bins, dsa_area_pool::spans, dsa_area_span::start, dsa_area_control::total_segment_size, and dsa_segment_header::usable_pages.

◆ dsa_free()

void dsa_free ( dsa_area area,
dsa_pointer  dp 
)

Definition at line 832 of file dsa.c.

833 {
834  dsa_segment_map *segment_map;
835  int pageno;
836  dsa_pointer span_pointer;
837  dsa_area_span *span;
838  char *superblock;
839  char *object;
840  size_t size;
841  int size_class;
842 
843  /* Make sure we don't have a stale segment in the slot 'dp' refers to. */
845 
846  /* Locate the object, span and pool. */
847  segment_map = get_segment_by_index(area, DSA_EXTRACT_SEGMENT_NUMBER(dp));
848  pageno = DSA_EXTRACT_OFFSET(dp) / FPM_PAGE_SIZE;
849  span_pointer = segment_map->pagemap[pageno];
850  span = dsa_get_address(area, span_pointer);
851  superblock = dsa_get_address(area, span->start);
852  object = dsa_get_address(area, dp);
853  size_class = span->size_class;
854  size = dsa_size_classes[size_class];
855 
856  /*
857  * Special case for large objects that live in a special span: we return
858  * those pages directly to the free page manager and free the span.
859  */
860  if (span->size_class == DSA_SCLASS_SPAN_LARGE)
861  {
862 
863 #ifdef CLOBBER_FREED_MEMORY
864  memset(object, 0x7f, span->npages * FPM_PAGE_SIZE);
865 #endif
866 
867  /* Give pages back to free page manager. */
869  FreePageManagerPut(segment_map->fpm,
871  span->npages);
873  /* Unlink span. */
875  LW_EXCLUSIVE);
876  unlink_span(area, span);
878  /* Free the span object so it can be reused. */
879  dsa_free(area, span_pointer);
880  return;
881  }
882 
883 #ifdef CLOBBER_FREED_MEMORY
884  memset(object, 0x7f, size);
885 #endif
886 
887  LWLockAcquire(DSA_SCLASS_LOCK(area, size_class), LW_EXCLUSIVE);
888 
889  /* Put the object on the span's freelist. */
890  Assert(object >= superblock);
891  Assert(object < superblock + DSA_SUPERBLOCK_SIZE);
892  Assert((object - superblock) % size == 0);
893  NextFreeObjectIndex(object) = span->firstfree;
894  span->firstfree = (object - superblock) / size;
895  ++span->nallocatable;
896 
897  /*
898  * See if the span needs to moved to a different fullness class, or be
899  * freed so its pages can be given back to the segment.
900  */
901  if (span->nallocatable == 1 && span->fclass == DSA_FULLNESS_CLASSES - 1)
902  {
903  /*
904  * The block was completely full and is located in the
905  * highest-numbered fullness class, which is never scanned for free
906  * chunks. We must move it to the next-lower fullness class.
907  */
908  unlink_span(area, span);
909  add_span_to_fullness_class(area, span, span_pointer,
911 
912  /*
913  * If this is the only span, and there is no active span, then we
914  * should probably move this span to fullness class 1. (Otherwise if
915  * you allocate exactly all the objects in the only span, it moves to
916  * class 3, then you free them all, it moves to 2, and then is given
917  * back, leaving no active span).
918  */
919  }
920  else if (span->nallocatable == span->nmax &&
921  (span->fclass != 1 || span->prevspan != InvalidDsaPointer))
922  {
923  /*
924  * This entire block is free, and it's not the active block for this
925  * size class. Return the memory to the free page manager. We don't
926  * do this for the active block to prevent hysteresis: if we
927  * repeatedly allocate and free the only chunk in the active block, it
928  * will be very inefficient if we deallocate and reallocate the block
929  * every time.
930  */
931  destroy_superblock(area, span_pointer);
932  }
933 
934  LWLockRelease(DSA_SCLASS_LOCK(area, size_class));
935 }
static void check_for_freed_segments(dsa_area *area)
Definition: dsa.c:2241
static void add_span_to_fullness_class(dsa_area *area, dsa_area_span *span, dsa_pointer span_pointer, int fclass)
Definition: dsa.c:1899
#define DSA_SUPERBLOCK_SIZE
Definition: dsa.c:390
static void destroy_superblock(dsa_area *area, dsa_pointer span_pointer)
Definition: dsa.c:1812

References add_span_to_fullness_class(), Assert(), check_for_freed_segments(), destroy_superblock(), DSA_AREA_LOCK, DSA_EXTRACT_OFFSET, DSA_EXTRACT_SEGMENT_NUMBER, DSA_FULLNESS_CLASSES, dsa_get_address(), DSA_SCLASS_LOCK, DSA_SCLASS_SPAN_LARGE, dsa_size_classes, DSA_SUPERBLOCK_SIZE, dsa_area_span::fclass, dsa_area_span::firstfree, dsa_segment_map::fpm, FPM_PAGE_SIZE, FreePageManagerPut(), get_segment_by_index(), InvalidDsaPointer, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_area_span::nallocatable, NextFreeObjectIndex, dsa_area_span::nmax, dsa_area_span::npages, dsa_segment_map::pagemap, dsa_area_span::prevspan, dsa_area_span::size_class, dsa_area_span::start, and unlink_span().

Referenced by delete_item_from_bucket(), delete_key_from_bucket(), destroy_superblock(), dsa_allocate_extended(), dshash_create(), dshash_destroy(), ExecHashTableDetach(), ExecHashTableDetachBatch(), ExecParallelCleanup(), ExecParallelHashIncreaseNumBatches(), ExecParallelHashIncreaseNumBuckets(), ExecParallelHashRepartitionFirst(), ExecParallelReinitialize(), find_or_make_matching_shared_tupledesc(), pagetable_free(), pgstat_free_entry(), resize(), and tbm_free_shared_area().

◆ dsa_get_address()

void* dsa_get_address ( dsa_area area,
dsa_pointer  dp 
)

Definition at line 944 of file dsa.c.

945 {
947  size_t offset;
948 
949  /* Convert InvalidDsaPointer to NULL. */
950  if (!DsaPointerIsValid(dp))
951  return NULL;
952 
953  /* Process any requests to detach from freed segments. */
955 
956  /* Break the dsa_pointer into its components. */
958  offset = DSA_EXTRACT_OFFSET(dp);
960 
961  /* Check if we need to cause this segment to be mapped in. */
962  if (unlikely(area->segment_maps[index].mapped_address == NULL))
963  {
964  /* Call for effect (we don't need the result). */
966  }
967 
968  return area->segment_maps[index].mapped_address + offset;
969 }

References Assert(), check_for_freed_segments(), DSA_EXTRACT_OFFSET, DSA_EXTRACT_SEGMENT_NUMBER, DSA_MAX_SEGMENTS, DsaPointerIsValid, get_segment_by_index(), dsa_segment_map::mapped_address, dsa_area::segment_maps, and unlikely.

Referenced by add_span_to_fullness_class(), alloc_object(), delete_item_from_bucket(), delete_key_from_bucket(), destroy_superblock(), dsa_allocate_extended(), dsa_dump(), dsa_free(), dsa_trim(), dshash_attach(), dshash_create(), dshash_destroy(), dshash_dump(), dshash_seq_next(), ensure_active_superblock(), ensure_valid_bucket_pointers(), ExecHashTableDetachBatch(), ExecParallelHashEnsureBatchAccessors(), ExecParallelHashFirstTuple(), ExecParallelHashIncreaseNumBatches(), ExecParallelHashIncreaseNumBuckets(), ExecParallelHashJoinSetUpBatches(), ExecParallelHashNextTuple(), ExecParallelHashPopChunkQueue(), ExecParallelHashRepartitionRest(), ExecParallelHashTableAlloc(), ExecParallelHashTableSetCurrentBatch(), ExecParallelHashTupleAlloc(), find_in_bucket(), find_or_make_matching_shared_tupledesc(), init_span(), insert_into_bucket(), insert_item_into_bucket(), lookup_rowtype_tupdesc_internal(), pagetable_allocate(), ParallelQueryMain(), pgstat_build_snapshot(), pgstat_get_entry_ref(), pgstat_init_entry(), pgstat_reinit_entry(), pgstat_reset_matching_entries(), pgstat_write_statsfile(), resize(), SerializeParamExecParams(), share_tupledesc(), shared_record_table_compare(), shared_record_table_hash(), tbm_attach_shared_iterate(), tbm_free_shared_area(), tbm_prepare_shared_iterate(), transfer_first_span(), and unlink_span().

◆ dsa_get_handle()

dsa_handle dsa_get_handle ( dsa_area area)

Definition at line 506 of file dsa.c.

507 {
509  return area->control->handle;
510 }

References Assert(), dsa_area::control, DSA_HANDLE_INVALID, and dsa_area_control::handle.

Referenced by logicalrep_launcher_attach_dshmem().

◆ dsa_minimum_size()

size_t dsa_minimum_size ( void  )

Definition at line 1181 of file dsa.c.

1182 {
1183  size_t size;
1184  int pages = 0;
1185 
1186  size = MAXALIGN(sizeof(dsa_area_control)) +
1187  MAXALIGN(sizeof(FreePageManager));
1188 
1189  /* Figure out how many pages we need, including the page map... */
1190  while (((size + FPM_PAGE_SIZE - 1) / FPM_PAGE_SIZE) > pages)
1191  {
1192  ++pages;
1193  size += sizeof(dsa_pointer);
1194  }
1195 
1196  return pages * FPM_PAGE_SIZE;
1197 }

References FPM_PAGE_SIZE, and MAXALIGN.

Referenced by create_internal(), ExecInitParallelPlan(), and pgstat_dsa_init_size().

◆ dsa_on_dsm_detach_release_in_place()

void dsa_on_dsm_detach_release_in_place ( dsm_segment segment,
Datum  place 
)

Definition at line 584 of file dsa.c.

585 {
587 }
void dsa_release_in_place(void *place)
Definition: dsa.c:613
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312

References DatumGetPointer(), and dsa_release_in_place().

Referenced by dsa_attach(), dsa_attach_in_place(), dsa_create(), and dsa_create_in_place().

◆ dsa_on_shmem_exit_release_in_place()

void dsa_on_shmem_exit_release_in_place ( int  code,
Datum  place 
)

Definition at line 598 of file dsa.c.

599 {
601 }

References DatumGetPointer(), and dsa_release_in_place().

◆ dsa_pin()

void dsa_pin ( dsa_area area)

Definition at line 977 of file dsa.c.

978 {
980  if (area->control->pinned)
981  {
983  elog(ERROR, "dsa_area already pinned");
984  }
985  area->control->pinned = true;
986  ++area->control->refcnt;
988 }

References dsa_area::control, DSA_AREA_LOCK, elog(), ERROR, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_area_control::pinned, and dsa_area_control::refcnt.

Referenced by logicalrep_launcher_attach_dshmem(), and StatsShmemInit().

◆ dsa_pin_mapping()

void dsa_pin_mapping ( dsa_area area)

Definition at line 643 of file dsa.c.

644 {
645  int i;
646 
647  Assert(!area->mapping_pinned);
648  area->mapping_pinned = true;
649 
650  for (i = 0; i <= area->high_segment_index; ++i)
651  if (area->segment_maps[i].segment != NULL)
653 }
void dsm_pin_mapping(dsm_segment *seg)
Definition: dsm.c:888

References Assert(), dsm_pin_mapping(), dsa_area::high_segment_index, i, dsa_area::mapping_pinned, dsa_segment_map::segment, and dsa_area::segment_maps.

Referenced by AttachSession(), GetSessionDsmHandle(), logicalrep_launcher_attach_dshmem(), and pgstat_attach_shmem().

◆ dsa_release_in_place()

void dsa_release_in_place ( void *  place)

Definition at line 613 of file dsa.c.

614 {
615  dsa_area_control *control = (dsa_area_control *) place;
616  int i;
617 
618  LWLockAcquire(&control->lock, LW_EXCLUSIVE);
619  Assert(control->segment_header.magic ==
620  (DSA_SEGMENT_HEADER_MAGIC ^ control->handle ^ 0));
621  Assert(control->refcnt > 0);
622  if (--control->refcnt == 0)
623  {
624  for (i = 0; i <= control->high_segment_index; ++i)
625  {
626  dsm_handle handle;
627 
628  handle = control->segment_handles[i];
629  if (handle != DSM_HANDLE_INVALID)
630  dsm_unpin_segment(handle);
631  }
632  }
633  LWLockRelease(&control->lock);
634 }
uint32 dsm_handle
Definition: dsm_impl.h:55
dsa_segment_index high_segment_index
Definition: dsa.c:327

References Assert(), DSA_SEGMENT_HEADER_MAGIC, DSM_HANDLE_INVALID, dsm_unpin_segment(), dsa_area_control::handle, dsa_area_control::high_segment_index, i, dsa_area_control::lock, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_segment_header::magic, dsa_area_control::refcnt, dsa_area_control::segment_handles, and dsa_area_control::segment_header.

Referenced by dsa_on_dsm_detach_release_in_place(), and dsa_on_shmem_exit_release_in_place().

◆ dsa_set_size_limit()

void dsa_set_size_limit ( dsa_area area,
size_t  limit 
)

◆ dsa_trim()

void dsa_trim ( dsa_area area)

Definition at line 1032 of file dsa.c.

1033 {
1034  int size_class;
1035 
1036  /*
1037  * Trim in reverse pool order so we get to the spans-of-spans last, just
1038  * in case any become entirely free while processing all the other pools.
1039  */
1040  for (size_class = DSA_NUM_SIZE_CLASSES - 1; size_class >= 0; --size_class)
1041  {
1042  dsa_area_pool *pool = &area->control->pools[size_class];
1043  dsa_pointer span_pointer;
1044 
1045  if (size_class == DSA_SCLASS_SPAN_LARGE)
1046  {
1047  /* Large object frees give back segments aggressively already. */
1048  continue;
1049  }
1050 
1051  /*
1052  * Search fullness class 1 only. That is where we expect to find an
1053  * entirely empty superblock (entirely empty superblocks in other
1054  * fullness classes are returned to the free page map by dsa_free).
1055  */
1056  LWLockAcquire(DSA_SCLASS_LOCK(area, size_class), LW_EXCLUSIVE);
1057  span_pointer = pool->spans[1];
1058  while (DsaPointerIsValid(span_pointer))
1059  {
1060  dsa_area_span *span = dsa_get_address(area, span_pointer);
1061  dsa_pointer next = span->nextspan;
1062 
1063  if (span->nallocatable == span->nmax)
1064  destroy_superblock(area, span_pointer);
1065 
1066  span_pointer = next;
1067  }
1068  LWLockRelease(DSA_SCLASS_LOCK(area, size_class));
1069  }
1070 }
static int32 next
Definition: blutils.c:219

References dsa_area::control, destroy_superblock(), dsa_get_address(), DSA_NUM_SIZE_CLASSES, DSA_SCLASS_LOCK, DSA_SCLASS_SPAN_LARGE, DsaPointerIsValid, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_area_span::nallocatable, next, dsa_area_span::nextspan, dsa_area_span::nmax, dsa_area_control::pools, and dsa_area_pool::spans.

◆ dsa_unpin()

void dsa_unpin ( dsa_area area)

Definition at line 996 of file dsa.c.

997 {
999  Assert(area->control->refcnt > 1);
1000  if (!area->control->pinned)
1001  {
1003  elog(ERROR, "dsa_area not pinned");
1004  }
1005  area->control->pinned = false;
1006  --area->control->refcnt;
1008 }

References Assert(), dsa_area::control, DSA_AREA_LOCK, elog(), ERROR, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_area_control::pinned, and dsa_area_control::refcnt.

◆ ensure_active_superblock()

static bool ensure_active_superblock ( dsa_area area,
dsa_area_pool pool,
int  size_class 
)
static

Definition at line 1537 of file dsa.c.

1539 {
1540  dsa_pointer span_pointer;
1541  dsa_pointer start_pointer;
1542  size_t obsize = dsa_size_classes[size_class];
1543  size_t nmax;
1544  int fclass;
1545  size_t npages = 1;
1546  size_t first_page;
1547  size_t i;
1548  dsa_segment_map *segment_map;
1549 
1550  Assert(LWLockHeldByMe(DSA_SCLASS_LOCK(area, size_class)));
1551 
1552  /*
1553  * Compute the number of objects that will fit in a block of this size
1554  * class. Span-of-spans blocks are just a single page, and the first
1555  * object isn't available for use because it describes the block-of-spans
1556  * itself.
1557  */
1558  if (size_class == DSA_SCLASS_BLOCK_OF_SPANS)
1559  nmax = FPM_PAGE_SIZE / obsize - 1;
1560  else
1561  nmax = DSA_SUPERBLOCK_SIZE / obsize;
1562 
1563  /*
1564  * If fullness class 1 is empty, try to find a span to put in it by
1565  * scanning higher-numbered fullness classes (excluding the last one,
1566  * whose blocks are certain to all be completely full).
1567  */
1568  for (fclass = 2; fclass < DSA_FULLNESS_CLASSES - 1; ++fclass)
1569  {
1570  span_pointer = pool->spans[fclass];
1571 
1572  while (DsaPointerIsValid(span_pointer))
1573  {
1574  int tfclass;
1575  dsa_area_span *span;
1576  dsa_area_span *nextspan;
1577  dsa_area_span *prevspan;
1578  dsa_pointer next_span_pointer;
1579 
1580  span = (dsa_area_span *)
1581  dsa_get_address(area, span_pointer);
1582  next_span_pointer = span->nextspan;
1583 
1584  /* Figure out what fullness class should contain this span. */
1585  tfclass = (nmax - span->nallocatable)
1586  * (DSA_FULLNESS_CLASSES - 1) / nmax;
1587 
1588  /* Look up next span. */
1589  if (DsaPointerIsValid(span->nextspan))
1590  nextspan = (dsa_area_span *)
1591  dsa_get_address(area, span->nextspan);
1592  else
1593  nextspan = NULL;
1594 
1595  /*
1596  * If utilization has dropped enough that this now belongs in some
1597  * other fullness class, move it there.
1598  */
1599  if (tfclass < fclass)
1600  {
1601  /* Remove from the current fullness class list. */
1602  if (pool->spans[fclass] == span_pointer)
1603  {
1604  /* It was the head; remove it. */
1606  pool->spans[fclass] = span->nextspan;
1607  if (nextspan != NULL)
1608  nextspan->prevspan = InvalidDsaPointer;
1609  }
1610  else
1611  {
1612  /* It was not the head. */
1614  prevspan = (dsa_area_span *)
1615  dsa_get_address(area, span->prevspan);
1616  prevspan->nextspan = span->nextspan;
1617  }
1618  if (nextspan != NULL)
1619  nextspan->prevspan = span->prevspan;
1620 
1621  /* Push onto the head of the new fullness class list. */
1622  span->nextspan = pool->spans[tfclass];
1623  pool->spans[tfclass] = span_pointer;
1624  span->prevspan = InvalidDsaPointer;
1625  if (DsaPointerIsValid(span->nextspan))
1626  {
1627  nextspan = (dsa_area_span *)
1628  dsa_get_address(area, span->nextspan);
1629  nextspan->prevspan = span_pointer;
1630  }
1631  span->fclass = tfclass;
1632  }
1633 
1634  /* Advance to next span on list. */
1635  span_pointer = next_span_pointer;
1636  }
1637 
1638  /* Stop now if we found a suitable block. */
1639  if (DsaPointerIsValid(pool->spans[1]))
1640  return true;
1641  }
1642 
1643  /*
1644  * If there are no blocks that properly belong in fullness class 1, pick
1645  * one from some other fullness class and move it there anyway, so that we
1646  * have an allocation target. Our last choice is to transfer a block
1647  * that's almost empty (and might become completely empty soon if left
1648  * alone), but even that is better than failing, which is what we must do
1649  * if there are no blocks at all with freespace.
1650  */
1651  Assert(!DsaPointerIsValid(pool->spans[1]));
1652  for (fclass = 2; fclass < DSA_FULLNESS_CLASSES - 1; ++fclass)
1653  if (transfer_first_span(area, pool, fclass, 1))
1654  return true;
1655  if (!DsaPointerIsValid(pool->spans[1]) &&
1656  transfer_first_span(area, pool, 0, 1))
1657  return true;
1658 
1659  /*
1660  * We failed to find an existing span with free objects, so we need to
1661  * allocate a new superblock and construct a new span to manage it.
1662  *
1663  * First, get a dsa_area_span object to describe the new superblock block
1664  * ... unless this allocation is for a dsa_area_span object, in which case
1665  * that's surely not going to work. We handle that case by storing the
1666  * span describing a block-of-spans inline.
1667  */
1668  if (size_class != DSA_SCLASS_BLOCK_OF_SPANS)
1669  {
1670  span_pointer = alloc_object(area, DSA_SCLASS_BLOCK_OF_SPANS);
1671  if (!DsaPointerIsValid(span_pointer))
1672  return false;
1673  npages = DSA_PAGES_PER_SUPERBLOCK;
1674  }
1675 
1676  /* Find or create a segment and allocate the superblock. */
1678  segment_map = get_best_segment(area, npages);
1679  if (segment_map == NULL)
1680  {
1681  segment_map = make_new_segment(area, npages);
1682  if (segment_map == NULL)
1683  {
1685  return false;
1686  }
1687  }
1688 
1689  /*
1690  * This shouldn't happen: get_best_segment() or make_new_segment()
1691  * promised that we can successfully allocate npages.
1692  */
1693  if (!FreePageManagerGet(segment_map->fpm, npages, &first_page))
1694  elog(FATAL,
1695  "dsa_allocate could not find %zu free pages for superblock",
1696  npages);
1698 
1699  /* Compute the start of the superblock. */
1700  start_pointer =
1701  DSA_MAKE_POINTER(get_segment_index(area, segment_map),
1702  first_page * FPM_PAGE_SIZE);
1703 
1704  /*
1705  * If this is a block-of-spans, carve the descriptor right out of the
1706  * allocated space.
1707  */
1708  if (size_class == DSA_SCLASS_BLOCK_OF_SPANS)
1709  {
1710  /*
1711  * We have a pointer into the segment. We need to build a dsa_pointer
1712  * from the segment index and offset into the segment.
1713  */
1714  span_pointer = start_pointer;
1715  }
1716 
1717  /* Initialize span and pagemap. */
1718  init_span(area, span_pointer, pool, start_pointer, npages, size_class);
1719  for (i = 0; i < npages; ++i)
1720  segment_map->pagemap[first_page + i] = span_pointer;
1721 
1722  return true;
1723 }
#define DSA_PAGES_PER_SUPERBLOCK
Definition: dsa.c:105

References alloc_object(), Assert(), DSA_AREA_LOCK, DSA_FULLNESS_CLASSES, dsa_get_address(), DSA_MAKE_POINTER, DSA_PAGES_PER_SUPERBLOCK, DSA_SCLASS_BLOCK_OF_SPANS, DSA_SCLASS_LOCK, dsa_size_classes, DSA_SUPERBLOCK_SIZE, DsaPointerIsValid, elog(), FATAL, dsa_area_span::fclass, dsa_segment_map::fpm, FPM_PAGE_SIZE, FreePageManagerGet(), get_best_segment(), get_segment_index, i, init_span(), InvalidDsaPointer, LW_EXCLUSIVE, LWLockAcquire(), LWLockHeldByMe(), LWLockRelease(), make_new_segment(), dsa_area_span::nallocatable, dsa_area_span::nextspan, dsa_segment_map::pagemap, dsa_area_span::prevspan, dsa_area_pool::spans, and transfer_first_span().

Referenced by alloc_object().

◆ get_best_segment()

static dsa_segment_map * get_best_segment ( dsa_area area,
size_t  npages 
)
static

Definition at line 1980 of file dsa.c.

1981 {
1982  size_t bin;
1983 
1986 
1987  /*
1988  * Start searching from the first bin that *might* have enough contiguous
1989  * pages.
1990  */
1991  for (bin = contiguous_pages_to_segment_bin(npages);
1992  bin < DSA_NUM_SEGMENT_BINS;
1993  ++bin)
1994  {
1995  /*
1996  * The minimum contiguous size that any segment in this bin should
1997  * have. We'll re-bin if we see segments with fewer.
1998  */
1999  size_t threshold = (size_t) 1 << (bin - 1);
2000  dsa_segment_index segment_index;
2001 
2002  /* Search this bin for a segment with enough contiguous space. */
2003  segment_index = area->control->segment_bins[bin];
2004  while (segment_index != DSA_SEGMENT_INDEX_NONE)
2005  {
2006  dsa_segment_map *segment_map;
2007  dsa_segment_index next_segment_index;
2008  size_t contiguous_pages;
2009 
2010  segment_map = get_segment_by_index(area, segment_index);
2011  next_segment_index = segment_map->header->next;
2012  contiguous_pages = fpm_largest(segment_map->fpm);
2013 
2014  /* Not enough for the request, still enough for this bin. */
2015  if (contiguous_pages >= threshold && contiguous_pages < npages)
2016  {
2017  segment_index = next_segment_index;
2018  continue;
2019  }
2020 
2021  /* Re-bin it if it's no longer in the appropriate bin. */
2022  if (contiguous_pages < threshold)
2023  {
2024  size_t new_bin;
2025 
2026  new_bin = contiguous_pages_to_segment_bin(contiguous_pages);
2027 
2028  /* Remove it from its current bin. */
2029  unlink_segment(area, segment_map);
2030 
2031  /* Push it onto the front of its new bin. */
2032  segment_map->header->prev = DSA_SEGMENT_INDEX_NONE;
2033  segment_map->header->next =
2034  area->control->segment_bins[new_bin];
2035  segment_map->header->bin = new_bin;
2036  area->control->segment_bins[new_bin] = segment_index;
2037  if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE)
2038  {
2040 
2041  next = get_segment_by_index(area,
2042  segment_map->header->next);
2043  Assert(next->header->bin == new_bin);
2044  next->header->prev = segment_index;
2045  }
2046 
2047  /*
2048  * But fall through to see if it's enough to satisfy this
2049  * request anyway....
2050  */
2051  }
2052 
2053  /* Check if we are done. */
2054  if (contiguous_pages >= npages)
2055  return segment_map;
2056 
2057  /* Continue searching the same bin. */
2058  segment_index = next_segment_index;
2059  }
2060  }
2061 
2062  /* Not found. */
2063  return NULL;
2064 }

References Assert(), dsa_segment_header::bin, check_for_freed_segments_locked(), contiguous_pages_to_segment_bin(), dsa_area::control, DSA_AREA_LOCK, DSA_NUM_SEGMENT_BINS, DSA_SEGMENT_INDEX_NONE, dsa_segment_map::fpm, fpm_largest, get_segment_by_index(), dsa_segment_map::header, LWLockHeldByMe(), next, dsa_segment_header::next, dsa_segment_header::prev, dsa_area_control::segment_bins, and unlink_segment().

Referenced by dsa_allocate_extended(), and ensure_active_superblock().

◆ get_segment_by_index()

static dsa_segment_map * get_segment_by_index ( dsa_area area,
dsa_segment_index  index 
)
static

Definition at line 1734 of file dsa.c.

1735 {
1736  if (unlikely(area->segment_maps[index].mapped_address == NULL))
1737  {
1738  dsm_handle handle;
1739  dsm_segment *segment;
1740  dsa_segment_map *segment_map;
1741 
1742  /*
1743  * If we are reached by dsa_free or dsa_get_address, there must be at
1744  * least one object allocated in the referenced segment. Otherwise,
1745  * their caller has a double-free or access-after-free bug, which we
1746  * have no hope of detecting. So we know it's safe to access this
1747  * array slot without holding a lock; it won't change underneath us.
1748  * Furthermore, we know that we can see the latest contents of the
1749  * slot, as explained in check_for_freed_segments, which those
1750  * functions call before arriving here.
1751  */
1752  handle = area->control->segment_handles[index];
1753 
1754  /* It's an error to try to access an unused slot. */
1755  if (handle == DSM_HANDLE_INVALID)
1756  elog(ERROR,
1757  "dsa_area could not attach to a segment that has been freed");
1758 
1759  segment = dsm_attach(handle);
1760  if (segment == NULL)
1761  elog(ERROR, "dsa_area could not attach to segment");
1762  if (area->mapping_pinned)
1763  dsm_pin_mapping(segment);
1764  segment_map = &area->segment_maps[index];
1765  segment_map->segment = segment;
1766  segment_map->mapped_address = dsm_segment_address(segment);
1767  segment_map->header =
1768  (dsa_segment_header *) segment_map->mapped_address;
1769  segment_map->fpm = (FreePageManager *)
1770  (segment_map->mapped_address +
1771  MAXALIGN(sizeof(dsa_segment_header)));
1772  segment_map->pagemap = (dsa_pointer *)
1773  (segment_map->mapped_address +
1774  MAXALIGN(sizeof(dsa_segment_header)) +
1775  MAXALIGN(sizeof(FreePageManager)));
1776 
1777  /* Remember the highest index this backend has ever mapped. */
1778  if (area->high_segment_index < index)
1779  area->high_segment_index = index;
1780 
1781  Assert(segment_map->header->magic ==
1783  }
1784 
1785  /*
1786  * Callers of dsa_get_address() and dsa_free() don't hold the area lock,
1787  * but it's a bug in the calling code and undefined behavior if the
1788  * address is not live (ie if the segment might possibly have been freed,
1789  * they're trying to use a dangling pointer).
1790  *
1791  * For dsa.c code that holds the area lock to manipulate segment_bins
1792  * lists, it would be a bug if we ever reach a freed segment here. After
1793  * it's marked as freed, the only thing any backend should do with it is
1794  * unmap it, and it should always have done that in
1795  * check_for_freed_segments_locked() before arriving here to resolve an
1796  * index to a segment_map.
1797  *
1798  * Either way we can assert that we aren't returning a freed segment.
1799  */
1800  Assert(!area->segment_maps[index].header->freed);
1801 
1802  return &area->segment_maps[index];
1803 }

References Assert(), dsa_area::control, DSA_SEGMENT_HEADER_MAGIC, dsm_attach(), DSM_HANDLE_INVALID, dsm_pin_mapping(), dsm_segment_address(), elog(), ERROR, dsa_segment_map::fpm, dsa_segment_header::freed, dsa_area_control::handle, dsa_segment_map::header, dsa_area::high_segment_index, dsa_segment_header::magic, dsa_segment_map::mapped_address, dsa_area::mapping_pinned, MAXALIGN, dsa_segment_map::pagemap, dsa_segment_map::segment, dsa_area_control::segment_handles, dsa_area::segment_maps, and unlikely.

Referenced by destroy_superblock(), dsa_dump(), dsa_free(), dsa_get_address(), get_best_segment(), make_new_segment(), and unlink_segment().

◆ init_span()

static void init_span ( dsa_area area,
dsa_pointer  span_pointer,
dsa_area_pool pool,
dsa_pointer  start,
size_t  npages,
uint16  size_class 
)
static

Definition at line 1354 of file dsa.c.

1358 {
1359  dsa_area_span *span = dsa_get_address(area, span_pointer);
1360  size_t obsize = dsa_size_classes[size_class];
1361 
1362  /*
1363  * The per-pool lock must be held because we manipulate the span list for
1364  * this pool.
1365  */
1366  Assert(LWLockHeldByMe(DSA_SCLASS_LOCK(area, size_class)));
1367 
1368  /* Push this span onto the front of the span list for fullness class 1. */
1369  if (DsaPointerIsValid(pool->spans[1]))
1370  {
1371  dsa_area_span *head = (dsa_area_span *)
1372  dsa_get_address(area, pool->spans[1]);
1373 
1374  head->prevspan = span_pointer;
1375  }
1376  span->pool = DsaAreaPoolToDsaPointer(area, pool);
1377  span->nextspan = pool->spans[1];
1378  span->prevspan = InvalidDsaPointer;
1379  pool->spans[1] = span_pointer;
1380 
1381  span->start = start;
1382  span->npages = npages;
1383  span->size_class = size_class;
1384  span->ninitialized = 0;
1385  if (size_class == DSA_SCLASS_BLOCK_OF_SPANS)
1386  {
1387  /*
1388  * A block-of-spans contains its own descriptor, so mark one object as
1389  * initialized and reduce the count of allocatable objects by one.
1390  * Doing this here has the side effect of also reducing nmax by one,
1391  * which is important to make sure we free this object at the correct
1392  * time.
1393  */
1394  span->ninitialized = 1;
1395  span->nallocatable = FPM_PAGE_SIZE / obsize - 1;
1396  }
1397  else if (size_class != DSA_SCLASS_SPAN_LARGE)
1398  span->nallocatable = DSA_SUPERBLOCK_SIZE / obsize;
1400  span->nmax = span->nallocatable;
1401  span->fclass = 1;
1402 }
#define DsaAreaPoolToDsaPointer(area, p)
Definition: dsa.c:341

References Assert(), dsa_get_address(), DSA_SCLASS_BLOCK_OF_SPANS, DSA_SCLASS_LOCK, DSA_SCLASS_SPAN_LARGE, dsa_size_classes, DSA_SPAN_NOTHING_FREE, DSA_SUPERBLOCK_SIZE, DsaAreaPoolToDsaPointer, DsaPointerIsValid, dsa_area_span::fclass, dsa_area_span::firstfree, FPM_PAGE_SIZE, InvalidDsaPointer, LWLockHeldByMe(), dsa_area_span::nallocatable, dsa_area_span::nextspan, dsa_area_span::ninitialized, dsa_area_span::nmax, dsa_area_span::npages, dsa_area_span::pool, dsa_area_span::prevspan, dsa_area_span::size_class, dsa_area_pool::spans, and dsa_area_span::start.

Referenced by dsa_allocate_extended(), and ensure_active_superblock().

◆ make_new_segment()

static dsa_segment_map * make_new_segment ( dsa_area area,
size_t  requested_pages 
)
static

Definition at line 2072 of file dsa.c.

2073 {
2074  dsa_segment_index new_index;
2075  size_t metadata_bytes;
2076  size_t total_size;
2077  size_t total_pages;
2078  size_t usable_pages;
2079  dsa_segment_map *segment_map;
2080  dsm_segment *segment;
2081 
2083 
2084  /* Find a segment slot that is not in use (linearly for now). */
2085  for (new_index = 1; new_index < DSA_MAX_SEGMENTS; ++new_index)
2086  {
2087  if (area->control->segment_handles[new_index] == DSM_HANDLE_INVALID)
2088  break;
2089  }
2090  if (new_index == DSA_MAX_SEGMENTS)
2091  return NULL;
2092 
2093  /*
2094  * If the total size limit is already exceeded, then we exit early and
2095  * avoid arithmetic wraparound in the unsigned expressions below.
2096  */
2097  if (area->control->total_segment_size >=
2099  return NULL;
2100 
2101  /*
2102  * The size should be at least as big as requested, and at least big
2103  * enough to follow a geometric series that approximately doubles the
2104  * total storage each time we create a new segment. We use geometric
2105  * growth because the underlying DSM system isn't designed for large
2106  * numbers of segments (otherwise we might even consider just using one
2107  * DSM segment for each large allocation and for each superblock, and then
2108  * we wouldn't need to use FreePageManager).
2109  *
2110  * We decide on a total segment size first, so that we produce tidy
2111  * power-of-two sized segments. This is a good property to have if we
2112  * move to huge pages in the future. Then we work back to the number of
2113  * pages we can fit.
2114  */
2116  ((size_t) 1 << (new_index / DSA_NUM_SEGMENTS_AT_EACH_SIZE));
2120  area->control->total_segment_size);
2121 
2122  total_pages = total_size / FPM_PAGE_SIZE;
2123  metadata_bytes =
2124  MAXALIGN(sizeof(dsa_segment_header)) +
2125  MAXALIGN(sizeof(FreePageManager)) +
2126  sizeof(dsa_pointer) * total_pages;
2127 
2128  /* Add padding up to next page boundary. */
2129  if (metadata_bytes % FPM_PAGE_SIZE != 0)
2130  metadata_bytes += FPM_PAGE_SIZE - (metadata_bytes % FPM_PAGE_SIZE);
2131  if (total_size <= metadata_bytes)
2132  return NULL;
2133  usable_pages = (total_size - metadata_bytes) / FPM_PAGE_SIZE;
2134  Assert(metadata_bytes + usable_pages * FPM_PAGE_SIZE <= total_size);
2135 
2136  /* See if that is enough... */
2137  if (requested_pages > usable_pages)
2138  {
2139  /*
2140  * We'll make an odd-sized segment, working forward from the requested
2141  * number of pages.
2142  */
2143  usable_pages = requested_pages;
2144  metadata_bytes =
2145  MAXALIGN(sizeof(dsa_segment_header)) +
2146  MAXALIGN(sizeof(FreePageManager)) +
2147  usable_pages * sizeof(dsa_pointer);
2148 
2149  /* Add padding up to next page boundary. */
2150  if (metadata_bytes % FPM_PAGE_SIZE != 0)
2151  metadata_bytes += FPM_PAGE_SIZE - (metadata_bytes % FPM_PAGE_SIZE);
2152  total_size = metadata_bytes + usable_pages * FPM_PAGE_SIZE;
2153 
2154  /* Is that too large for dsa_pointer's addressing scheme? */
2156  return NULL;
2157 
2158  /* Would that exceed the limit? */
2160  area->control->total_segment_size)
2161  return NULL;
2162  }
2163 
2164  /* Create the segment. */
2165  segment = dsm_create(total_size, 0);
2166  if (segment == NULL)
2167  return NULL;
2168  dsm_pin_segment(segment);
2169  if (area->mapping_pinned)
2170  dsm_pin_mapping(segment);
2171 
2172  /* Store the handle in shared memory to be found by index. */
2173  area->control->segment_handles[new_index] =
2174  dsm_segment_handle(segment);
2175  /* Track the highest segment index in the history of the area. */
2176  if (area->control->high_segment_index < new_index)
2177  area->control->high_segment_index = new_index;
2178  /* Track the highest segment index this backend has ever mapped. */
2179  if (area->high_segment_index < new_index)
2180  area->high_segment_index = new_index;
2181  /* Track total size of all segments. */
2185 
2186  /* Build a segment map for this segment in this backend. */
2187  segment_map = &area->segment_maps[new_index];
2188  segment_map->segment = segment;
2189  segment_map->mapped_address = dsm_segment_address(segment);
2190  segment_map->header = (dsa_segment_header *) segment_map->mapped_address;
2191  segment_map->fpm = (FreePageManager *)
2192  (segment_map->mapped_address +
2193  MAXALIGN(sizeof(dsa_segment_header)));
2194  segment_map->pagemap = (dsa_pointer *)
2195  (segment_map->mapped_address +
2196  MAXALIGN(sizeof(dsa_segment_header)) +
2197  MAXALIGN(sizeof(FreePageManager)));
2198 
2199  /* Set up the free page map. */
2200  FreePageManagerInitialize(segment_map->fpm, segment_map->mapped_address);
2201  FreePageManagerPut(segment_map->fpm, metadata_bytes / FPM_PAGE_SIZE,
2202  usable_pages);
2203 
2204  /* Set up the segment header and put it in the appropriate bin. */
2205  segment_map->header->magic =
2206  DSA_SEGMENT_HEADER_MAGIC ^ area->control->handle ^ new_index;
2207  segment_map->header->usable_pages = usable_pages;
2208  segment_map->header->size = total_size;
2209  segment_map->header->bin = contiguous_pages_to_segment_bin(usable_pages);
2210  segment_map->header->prev = DSA_SEGMENT_INDEX_NONE;
2211  segment_map->header->next =
2212  area->control->segment_bins[segment_map->header->bin];
2213  segment_map->header->freed = false;
2214  area->control->segment_bins[segment_map->header->bin] = new_index;
2215  if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE)
2216  {
2218  get_segment_by_index(area, segment_map->header->next);
2219 
2220  Assert(next->header->bin == segment_map->header->bin);
2221  next->header->prev = new_index;
2222  }
2223 
2224  return segment_map;
2225 }
#define DSA_NUM_SEGMENTS_AT_EACH_SIZE
Definition: dsa.c:78
#define DSA_MAX_SEGMENT_SIZE
Definition: dsa.c:102
int64 total_size
Definition: pg_checksums.c:72

References Assert(), dsa_segment_header::bin, contiguous_pages_to_segment_bin(), dsa_area::control, DSA_AREA_LOCK, DSA_INITIAL_SEGMENT_SIZE, DSA_MAX_SEGMENT_SIZE, DSA_MAX_SEGMENTS, DSA_NUM_SEGMENTS_AT_EACH_SIZE, DSA_SEGMENT_HEADER_MAGIC, DSA_SEGMENT_INDEX_NONE, dsm_create(), DSM_HANDLE_INVALID, dsm_pin_mapping(), dsm_pin_segment(), dsm_segment_address(), dsm_segment_handle(), dsa_segment_map::fpm, FPM_PAGE_SIZE, dsa_segment_header::freed, FreePageManagerInitialize(), FreePageManagerPut(), get_segment_by_index(), dsa_area_control::handle, dsa_segment_map::header, dsa_area_control::high_segment_index, dsa_area::high_segment_index, LWLockHeldByMe(), dsa_segment_header::magic, dsa_segment_map::mapped_address, dsa_area::mapping_pinned, dsa_area_control::max_total_segment_size, MAXALIGN, Min, next, dsa_segment_header::next, dsa_segment_map::pagemap, dsa_segment_header::prev, dsa_segment_map::segment, dsa_area_control::segment_bins, dsa_area_control::segment_handles, dsa_area::segment_maps, dsa_segment_header::size, dsa_area_control::total_segment_size, total_size, and dsa_segment_header::usable_pages.

Referenced by dsa_allocate_extended(), and ensure_active_superblock().

◆ transfer_first_span()

static bool transfer_first_span ( dsa_area area,
dsa_area_pool pool,
int  fromclass,
int  toclass 
)
static

Definition at line 1409 of file dsa.c.

1411 {
1412  dsa_pointer span_pointer;
1413  dsa_area_span *span;
1414  dsa_area_span *nextspan;
1415 
1416  /* Can't do it if source list is empty. */
1417  span_pointer = pool->spans[fromclass];
1418  if (!DsaPointerIsValid(span_pointer))
1419  return false;
1420 
1421  /* Remove span from head of source list. */
1422  span = dsa_get_address(area, span_pointer);
1423  pool->spans[fromclass] = span->nextspan;
1424  if (DsaPointerIsValid(span->nextspan))
1425  {
1426  nextspan = (dsa_area_span *)
1427  dsa_get_address(area, span->nextspan);
1428  nextspan->prevspan = InvalidDsaPointer;
1429  }
1430 
1431  /* Add span to head of target list. */
1432  span->nextspan = pool->spans[toclass];
1433  pool->spans[toclass] = span_pointer;
1434  if (DsaPointerIsValid(span->nextspan))
1435  {
1436  nextspan = (dsa_area_span *)
1437  dsa_get_address(area, span->nextspan);
1438  nextspan->prevspan = span_pointer;
1439  }
1440  span->fclass = toclass;
1441 
1442  return true;
1443 }

References dsa_get_address(), DsaPointerIsValid, dsa_area_span::fclass, InvalidDsaPointer, dsa_area_span::nextspan, dsa_area_span::prevspan, and dsa_area_pool::spans.

Referenced by alloc_object(), and ensure_active_superblock().

◆ unlink_segment()

static void unlink_segment ( dsa_area area,
dsa_segment_map segment_map 
)
static

Definition at line 1948 of file dsa.c.

1949 {
1950  if (segment_map->header->prev != DSA_SEGMENT_INDEX_NONE)
1951  {
1952  dsa_segment_map *prev;
1953 
1954  prev = get_segment_by_index(area, segment_map->header->prev);
1955  prev->header->next = segment_map->header->next;
1956  }
1957  else
1958  {
1959  Assert(area->control->segment_bins[segment_map->header->bin] ==
1960  get_segment_index(area, segment_map));
1961  area->control->segment_bins[segment_map->header->bin] =
1962  segment_map->header->next;
1963  }
1964  if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE)
1965  {
1967 
1968  next = get_segment_by_index(area, segment_map->header->next);
1969  next->header->prev = segment_map->header->prev;
1970  }
1971 }

References Assert(), dsa_segment_header::bin, dsa_area::control, DSA_SEGMENT_INDEX_NONE, get_segment_by_index(), get_segment_index, dsa_segment_map::header, next, dsa_segment_header::next, dsa_segment_header::prev, and dsa_area_control::segment_bins.

Referenced by destroy_superblock(), and get_best_segment().

◆ unlink_span()

static void unlink_span ( dsa_area area,
dsa_area_span span 
)
static

Definition at line 1876 of file dsa.c.

1877 {
1878  if (DsaPointerIsValid(span->nextspan))
1879  {
1880  dsa_area_span *next = dsa_get_address(area, span->nextspan);
1881 
1882  next->prevspan = span->prevspan;
1883  }
1884  if (DsaPointerIsValid(span->prevspan))
1885  {
1886  dsa_area_span *prev = dsa_get_address(area, span->prevspan);
1887 
1888  prev->nextspan = span->nextspan;
1889  }
1890  else
1891  {
1892  dsa_area_pool *pool = dsa_get_address(area, span->pool);
1893 
1894  pool->spans[span->fclass] = span->nextspan;
1895  }
1896 }

References dsa_get_address(), DsaPointerIsValid, dsa_area_span::fclass, next, dsa_area_span::nextspan, dsa_area_span::pool, dsa_area_span::prevspan, and dsa_area_pool::spans.

Referenced by destroy_superblock(), and dsa_free().

Variable Documentation

◆ dsa_size_class_map

const uint8 dsa_size_class_map[]
static
Initial value:
= {
2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 12, 12, 13, 13,
14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 17,
18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19,
20, 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21,
22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25
}

Definition at line 271 of file dsa.c.

Referenced by dsa_allocate_extended().

◆ dsa_size_classes

const uint16 dsa_size_classes[]
static
Initial value:
= {
sizeof(dsa_area_span), 0,
8, 16, 24, 32, 40, 48, 56, 64,
80, 96, 112, 128,
160, 192, 224, 256,
320, 384, 448, 512,
640, 768, 896, 1024,
1280, 1560, 1816, 2048,
2616, 3120, 3640, 4096,
5456, 6552, 7280, 8192
}

Definition at line 248 of file dsa.c.

Referenced by alloc_object(), dsa_allocate_extended(), dsa_dump(), dsa_free(), ensure_active_superblock(), and init_span().