PostgreSQL Source Code  git master
dsa.c File Reference
#include "postgres.h"
#include "port/atomics.h"
#include "port/pg_bitutils.h"
#include "storage/dsm.h"
#include "storage/ipc.h"
#include "storage/lwlock.h"
#include "storage/shmem.h"
#include "utils/dsa.h"
#include "utils/freepage.h"
#include "utils/memutils.h"
#include "utils/resowner.h"
Include dependency graph for dsa.c:

Go to the source code of this file.

Data Structures

struct  dsa_segment_header
 
struct  dsa_area_span
 
struct  dsa_area_pool
 
struct  dsa_area_control
 
struct  dsa_segment_map
 
struct  dsa_area
 

Macros

#define DSA_INITIAL_SEGMENT_SIZE   ((size_t) (1 * 1024 * 1024))
 
#define DSA_NUM_SEGMENTS_AT_EACH_SIZE   2
 
#define DSA_OFFSET_WIDTH   40 /* 1024 segments of size up to 1TB */
 
#define DSA_MAX_SEGMENTS    Min(1024, (1 << ((SIZEOF_DSA_POINTER * 8) - DSA_OFFSET_WIDTH)))
 
#define DSA_OFFSET_BITMASK   (((dsa_pointer) 1 << DSA_OFFSET_WIDTH) - 1)
 
#define DSA_MAX_SEGMENT_SIZE   ((size_t) 1 << DSA_OFFSET_WIDTH)
 
#define DSA_PAGES_PER_SUPERBLOCK   16
 
#define DSA_SEGMENT_HEADER_MAGIC   0x0ce26608
 
#define DSA_MAKE_POINTER(segment_number, offset)    (((dsa_pointer) (segment_number) << DSA_OFFSET_WIDTH) | (offset))
 
#define DSA_EXTRACT_SEGMENT_NUMBER(dp)   ((dp) >> DSA_OFFSET_WIDTH)
 
#define DSA_EXTRACT_OFFSET(dp)   ((dp) & DSA_OFFSET_BITMASK)
 
#define DSA_SEGMENT_INDEX_NONE   (~(dsa_segment_index)0)
 
#define DSA_NUM_SEGMENT_BINS   16
 
#define DSA_AREA_LOCK(area)   (&area->control->lock)
 
#define DSA_SCLASS_LOCK(area, sclass)   (&area->control->pools[sclass].lock)
 
#define NextFreeObjectIndex(object)   (* (uint16 *) (object))
 
#define DSA_NUM_SIZE_CLASSES   lengthof(dsa_size_classes)
 
#define DSA_SCLASS_BLOCK_OF_SPANS   0
 
#define DSA_SCLASS_SPAN_LARGE   1
 
#define DSA_SIZE_CLASS_MAP_QUANTUM   8
 
#define DSA_FULLNESS_CLASSES   4
 
#define DsaAreaPoolToDsaPointer(area, p)    DSA_MAKE_POINTER(0, (char *) p - (char *) area->control)
 
#define DSA_SPAN_NOTHING_FREE   ((uint16) -1)
 
#define DSA_SUPERBLOCK_SIZE   (DSA_PAGES_PER_SUPERBLOCK * FPM_PAGE_SIZE)
 
#define get_segment_index(area, segment_map_ptr)    (segment_map_ptr - &area->segment_maps[0])
 

Typedefs

typedef size_t dsa_segment_index
 

Functions

static size_t contiguous_pages_to_segment_bin (size_t n)
 
static void init_span (dsa_area *area, dsa_pointer span_pointer, dsa_area_pool *pool, dsa_pointer start, size_t npages, uint16 size_class)
 
static bool transfer_first_span (dsa_area *area, dsa_area_pool *pool, int fromclass, int toclass)
 
static dsa_pointer alloc_object (dsa_area *area, int size_class)
 
static bool ensure_active_superblock (dsa_area *area, dsa_area_pool *pool, int size_class)
 
static dsa_segment_mapget_segment_by_index (dsa_area *area, dsa_segment_index index)
 
static void destroy_superblock (dsa_area *area, dsa_pointer span_pointer)
 
static void unlink_span (dsa_area *area, dsa_area_span *span)
 
static void add_span_to_fullness_class (dsa_area *area, dsa_area_span *span, dsa_pointer span_pointer, int fclass)
 
static void unlink_segment (dsa_area *area, dsa_segment_map *segment_map)
 
static dsa_segment_mapget_best_segment (dsa_area *area, size_t npages)
 
static dsa_segment_mapmake_new_segment (dsa_area *area, size_t requested_pages)
 
static dsa_areacreate_internal (void *place, size_t size, int tranche_id, dsm_handle control_handle, dsm_segment *control_segment)
 
static dsa_areaattach_internal (void *place, dsm_segment *segment, dsa_handle handle)
 
static void check_for_freed_segments (dsa_area *area)
 
static void check_for_freed_segments_locked (dsa_area *area)
 
static void rebin_segment (dsa_area *area, dsa_segment_map *segment_map)
 
dsa_areadsa_create (int tranche_id)
 
dsa_areadsa_create_in_place (void *place, size_t size, int tranche_id, dsm_segment *segment)
 
dsa_handle dsa_get_handle (dsa_area *area)
 
dsa_areadsa_attach (dsa_handle handle)
 
dsa_areadsa_attach_in_place (void *place, dsm_segment *segment)
 
void dsa_on_dsm_detach_release_in_place (dsm_segment *segment, Datum place)
 
void dsa_on_shmem_exit_release_in_place (int code, Datum place)
 
void dsa_release_in_place (void *place)
 
void dsa_pin_mapping (dsa_area *area)
 
dsa_pointer dsa_allocate_extended (dsa_area *area, size_t size, int flags)
 
void dsa_free (dsa_area *area, dsa_pointer dp)
 
void * dsa_get_address (dsa_area *area, dsa_pointer dp)
 
void dsa_pin (dsa_area *area)
 
void dsa_unpin (dsa_area *area)
 
void dsa_set_size_limit (dsa_area *area, size_t limit)
 
void dsa_trim (dsa_area *area)
 
void dsa_dump (dsa_area *area)
 
size_t dsa_minimum_size (void)
 
void dsa_detach (dsa_area *area)
 

Variables

static const uint16 dsa_size_classes []
 
static const uint8 dsa_size_class_map []
 

Macro Definition Documentation

◆ DSA_AREA_LOCK

#define DSA_AREA_LOCK (   area)    (&area->control->lock)

Definition at line 156 of file dsa.c.

◆ DSA_EXTRACT_OFFSET

#define DSA_EXTRACT_OFFSET (   dp)    ((dp) & DSA_OFFSET_BITMASK)

Definition at line 123 of file dsa.c.

◆ DSA_EXTRACT_SEGMENT_NUMBER

#define DSA_EXTRACT_SEGMENT_NUMBER (   dp)    ((dp) >> DSA_OFFSET_WIDTH)

Definition at line 120 of file dsa.c.

◆ DSA_FULLNESS_CLASSES

#define DSA_FULLNESS_CLASSES   4

Definition at line 290 of file dsa.c.

◆ DSA_INITIAL_SEGMENT_SIZE

#define DSA_INITIAL_SEGMENT_SIZE   ((size_t) (1 * 1024 * 1024))

Definition at line 70 of file dsa.c.

◆ DSA_MAKE_POINTER

#define DSA_MAKE_POINTER (   segment_number,
  offset 
)     (((dsa_pointer) (segment_number) << DSA_OFFSET_WIDTH) | (offset))

Definition at line 116 of file dsa.c.

◆ DSA_MAX_SEGMENT_SIZE

#define DSA_MAX_SEGMENT_SIZE   ((size_t) 1 << DSA_OFFSET_WIDTH)

Definition at line 103 of file dsa.c.

◆ DSA_MAX_SEGMENTS

#define DSA_MAX_SEGMENTS    Min(1024, (1 << ((SIZEOF_DSA_POINTER * 8) - DSA_OFFSET_WIDTH)))

Definition at line 96 of file dsa.c.

◆ DSA_NUM_SEGMENT_BINS

#define DSA_NUM_SEGMENT_BINS   16

Definition at line 135 of file dsa.c.

◆ DSA_NUM_SEGMENTS_AT_EACH_SIZE

#define DSA_NUM_SEGMENTS_AT_EACH_SIZE   2

Definition at line 79 of file dsa.c.

◆ DSA_NUM_SIZE_CLASSES

#define DSA_NUM_SIZE_CLASSES   lengthof(dsa_size_classes)

Definition at line 260 of file dsa.c.

◆ DSA_OFFSET_BITMASK

#define DSA_OFFSET_BITMASK   (((dsa_pointer) 1 << DSA_OFFSET_WIDTH) - 1)

Definition at line 100 of file dsa.c.

◆ DSA_OFFSET_WIDTH

#define DSA_OFFSET_WIDTH   40 /* 1024 segments of size up to 1TB */

Definition at line 89 of file dsa.c.

◆ DSA_PAGES_PER_SUPERBLOCK

#define DSA_PAGES_PER_SUPERBLOCK   16

Definition at line 106 of file dsa.c.

◆ DSA_SCLASS_BLOCK_OF_SPANS

#define DSA_SCLASS_BLOCK_OF_SPANS   0

Definition at line 263 of file dsa.c.

◆ DSA_SCLASS_LOCK

#define DSA_SCLASS_LOCK (   area,
  sclass 
)    (&area->control->pools[sclass].lock)

Definition at line 157 of file dsa.c.

◆ DSA_SCLASS_SPAN_LARGE

#define DSA_SCLASS_SPAN_LARGE   1

Definition at line 264 of file dsa.c.

◆ DSA_SEGMENT_HEADER_MAGIC

#define DSA_SEGMENT_HEADER_MAGIC   0x0ce26608

Definition at line 113 of file dsa.c.

◆ DSA_SEGMENT_INDEX_NONE

#define DSA_SEGMENT_INDEX_NONE   (~(dsa_segment_index)0)

Definition at line 129 of file dsa.c.

◆ DSA_SIZE_CLASS_MAP_QUANTUM

#define DSA_SIZE_CLASS_MAP_QUANTUM   8

Definition at line 282 of file dsa.c.

◆ DSA_SPAN_NOTHING_FREE

#define DSA_SPAN_NOTHING_FREE   ((uint16) -1)

Definition at line 395 of file dsa.c.

◆ DSA_SUPERBLOCK_SIZE

#define DSA_SUPERBLOCK_SIZE   (DSA_PAGES_PER_SUPERBLOCK * FPM_PAGE_SIZE)

Definition at line 396 of file dsa.c.

◆ DsaAreaPoolToDsaPointer

#define DsaAreaPoolToDsaPointer (   area,
 
)     DSA_MAKE_POINTER(0, (char *) p - (char *) area->control)

Definition at line 342 of file dsa.c.

◆ get_segment_index

#define get_segment_index (   area,
  segment_map_ptr 
)     (segment_map_ptr - &area->segment_maps[0])

Definition at line 399 of file dsa.c.

◆ NextFreeObjectIndex

#define NextFreeObjectIndex (   object)    (* (uint16 *) (object))

Definition at line 226 of file dsa.c.

Typedef Documentation

◆ dsa_segment_index

typedef size_t dsa_segment_index

Definition at line 126 of file dsa.c.

Function Documentation

◆ add_span_to_fullness_class()

static void add_span_to_fullness_class ( dsa_area area,
dsa_area_span span,
dsa_pointer  span_pointer,
int  fclass 
)
static

Definition at line 1919 of file dsa.c.

1922 {
1923  dsa_area_pool *pool = dsa_get_address(area, span->pool);
1924 
1925  if (DsaPointerIsValid(pool->spans[fclass]))
1926  {
1927  dsa_area_span *head = dsa_get_address(area,
1928  pool->spans[fclass]);
1929 
1930  head->prevspan = span_pointer;
1931  }
1932  span->prevspan = InvalidDsaPointer;
1933  span->nextspan = pool->spans[fclass];
1934  pool->spans[fclass] = span_pointer;
1935  span->fclass = fclass;
1936 }
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:957
#define InvalidDsaPointer
Definition: dsa.h:78
#define DsaPointerIsValid(x)
Definition: dsa.h:81
dsa_pointer spans[DSA_FULLNESS_CLASSES]
Definition: dsa.c:303
dsa_pointer nextspan
Definition: dsa.c:211
uint16 fclass
Definition: dsa.c:219
dsa_pointer prevspan
Definition: dsa.c:210
dsa_pointer pool
Definition: dsa.c:209

References dsa_get_address(), DsaPointerIsValid, dsa_area_span::fclass, InvalidDsaPointer, dsa_area_span::nextspan, dsa_area_span::pool, dsa_area_span::prevspan, and dsa_area_pool::spans.

Referenced by dsa_free().

◆ alloc_object()

static dsa_pointer alloc_object ( dsa_area area,
int  size_class 
)
inlinestatic

Definition at line 1462 of file dsa.c.

1463 {
1464  dsa_area_pool *pool = &area->control->pools[size_class];
1465  dsa_area_span *span;
1466  dsa_pointer block;
1467  dsa_pointer result;
1468  char *object;
1469  size_t size;
1470 
1471  /*
1472  * Even though ensure_active_superblock can in turn call alloc_object if
1473  * it needs to allocate a new span, that's always from a different pool,
1474  * and the order of lock acquisition is always the same, so it's OK that
1475  * we hold this lock for the duration of this function.
1476  */
1477  Assert(!LWLockHeldByMe(DSA_SCLASS_LOCK(area, size_class)));
1478  LWLockAcquire(DSA_SCLASS_LOCK(area, size_class), LW_EXCLUSIVE);
1479 
1480  /*
1481  * If there's no active superblock, we must successfully obtain one or
1482  * fail the request.
1483  */
1484  if (!DsaPointerIsValid(pool->spans[1]) &&
1485  !ensure_active_superblock(area, pool, size_class))
1486  {
1487  result = InvalidDsaPointer;
1488  }
1489  else
1490  {
1491  /*
1492  * There should be a block in fullness class 1 at this point, and it
1493  * should never be completely full. Thus we can either pop an object
1494  * from the free list or, failing that, initialize a new object.
1495  */
1496  Assert(DsaPointerIsValid(pool->spans[1]));
1497  span = (dsa_area_span *)
1498  dsa_get_address(area, pool->spans[1]);
1499  Assert(span->nallocatable > 0);
1500  block = span->start;
1501  Assert(size_class < DSA_NUM_SIZE_CLASSES);
1502  size = dsa_size_classes[size_class];
1503  if (span->firstfree != DSA_SPAN_NOTHING_FREE)
1504  {
1505  result = block + span->firstfree * size;
1506  object = dsa_get_address(area, result);
1507  span->firstfree = NextFreeObjectIndex(object);
1508  }
1509  else
1510  {
1511  result = block + span->ninitialized * size;
1512  ++span->ninitialized;
1513  }
1514  --span->nallocatable;
1515 
1516  /* If it's now full, move it to the highest-numbered fullness class. */
1517  if (span->nallocatable == 0)
1518  transfer_first_span(area, pool, 1, DSA_FULLNESS_CLASSES - 1);
1519  }
1520 
1521  Assert(LWLockHeldByMe(DSA_SCLASS_LOCK(area, size_class)));
1522  LWLockRelease(DSA_SCLASS_LOCK(area, size_class));
1523 
1524  return result;
1525 }
static const uint16 dsa_size_classes[]
Definition: dsa.c:249
static bool ensure_active_superblock(dsa_area *area, dsa_area_pool *pool, int size_class)
Definition: dsa.c:1550
#define DSA_SPAN_NOTHING_FREE
Definition: dsa.c:395
#define DSA_SCLASS_LOCK(area, sclass)
Definition: dsa.c:157
static bool transfer_first_span(dsa_area *area, dsa_area_pool *pool, int fromclass, int toclass)
Definition: dsa.c:1422
#define DSA_NUM_SIZE_CLASSES
Definition: dsa.c:260
#define NextFreeObjectIndex(object)
Definition: dsa.c:226
#define DSA_FULLNESS_CLASSES
Definition: dsa.c:290
uint64 dsa_pointer
Definition: dsa.h:62
Assert(fmt[strlen(fmt) - 1] !='\n')
bool LWLockHeldByMe(LWLock *lock)
Definition: lwlock.c:1920
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1195
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1808
@ LW_EXCLUSIVE
Definition: lwlock.h:116
dsa_area_pool pools[DSA_NUM_SIZE_CLASSES]
Definition: dsa.c:322
dsa_pointer start
Definition: dsa.c:212
uint16 nallocatable
Definition: dsa.c:216
uint16 ninitialized
Definition: dsa.c:215
uint16 firstfree
Definition: dsa.c:217
dsa_area_control * control
Definition: dsa.c:370

References Assert(), dsa_area::control, DSA_FULLNESS_CLASSES, dsa_get_address(), DSA_NUM_SIZE_CLASSES, DSA_SCLASS_LOCK, dsa_size_classes, DSA_SPAN_NOTHING_FREE, DsaPointerIsValid, ensure_active_superblock(), dsa_area_span::firstfree, InvalidDsaPointer, LW_EXCLUSIVE, LWLockAcquire(), LWLockHeldByMe(), LWLockRelease(), dsa_area_span::nallocatable, NextFreeObjectIndex, dsa_area_span::ninitialized, dsa_area_control::pools, dsa_area_pool::spans, dsa_area_span::start, and transfer_first_span().

Referenced by dsa_allocate_extended(), and ensure_active_superblock().

◆ attach_internal()

static dsa_area * attach_internal ( void *  place,
dsm_segment segment,
dsa_handle  handle 
)
static

Definition at line 1316 of file dsa.c.

1317 {
1318  dsa_area_control *control;
1319  dsa_area *area;
1320  dsa_segment_map *segment_map;
1321 
1322  control = (dsa_area_control *) place;
1323  Assert(control->handle == handle);
1324  Assert(control->segment_handles[0] == handle);
1325  Assert(control->segment_header.magic ==
1326  (DSA_SEGMENT_HEADER_MAGIC ^ handle ^ 0));
1327 
1328  /* Build the backend-local area object. */
1329  area = palloc(sizeof(dsa_area));
1330  area->control = control;
1332  memset(&area->segment_maps[0], 0,
1333  sizeof(dsa_segment_map) * DSA_MAX_SEGMENTS);
1334  area->high_segment_index = 0;
1335 
1336  /* Set up the segment map for this process's mapping. */
1337  segment_map = &area->segment_maps[0];
1338  segment_map->segment = segment; /* NULL for in-place */
1339  segment_map->mapped_address = place;
1340  segment_map->header = (dsa_segment_header *) segment_map->mapped_address;
1341  segment_map->fpm = (FreePageManager *)
1342  (segment_map->mapped_address + MAXALIGN(sizeof(dsa_area_control)));
1343  segment_map->pagemap = (dsa_pointer *)
1344  (segment_map->mapped_address + MAXALIGN(sizeof(dsa_area_control)) +
1345  MAXALIGN(sizeof(FreePageManager)));
1346 
1347  /* Bump the reference count. */
1349  if (control->refcnt == 0)
1350  {
1351  /* We can't attach to a DSA area that has already been destroyed. */
1352  ereport(ERROR,
1353  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1354  errmsg("could not attach to dynamic shared area")));
1355  }
1356  ++control->refcnt;
1359 
1360  return area;
1361 }
#define MAXALIGN(LEN)
Definition: c.h:800
#define DSA_AREA_LOCK(area)
Definition: dsa.c:156
#define DSA_SEGMENT_HEADER_MAGIC
Definition: dsa.c:113
#define DSA_MAX_SEGMENTS
Definition: dsa.c:96
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
void * palloc(Size size)
Definition: mcxt.c:1226
ResourceOwner CurrentResourceOwner
Definition: resowner.c:164
dsa_segment_header segment_header
Definition: dsa.c:314
size_t freed_segment_counter
Definition: dsa.c:334
int refcnt
Definition: dsa.c:330
dsa_handle handle
Definition: dsa.c:316
dsm_handle segment_handles[DSA_MAX_SEGMENTS]
Definition: dsa.c:318
Definition: dsa.c:368
dsa_segment_map segment_maps[DSA_MAX_SEGMENTS]
Definition: dsa.c:386
dsa_segment_index high_segment_index
Definition: dsa.c:389
size_t freed_segment_counter
Definition: dsa.c:392
ResourceOwner resowner
Definition: dsa.c:378
uint32 magic
Definition: dsa.c:167
dsa_segment_header * header
Definition: dsa.c:356
FreePageManager * fpm
Definition: dsa.c:357
dsm_segment * segment
Definition: dsa.c:354
dsa_pointer * pagemap
Definition: dsa.c:358
char * mapped_address
Definition: dsa.c:355

References Assert(), dsa_area::control, CurrentResourceOwner, DSA_AREA_LOCK, DSA_MAX_SEGMENTS, DSA_SEGMENT_HEADER_MAGIC, ereport, errcode(), errmsg(), ERROR, dsa_segment_map::fpm, dsa_area_control::freed_segment_counter, dsa_area::freed_segment_counter, dsa_area_control::handle, dsa_segment_map::header, dsa_area::high_segment_index, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_segment_header::magic, dsa_segment_map::mapped_address, MAXALIGN, dsa_segment_map::pagemap, palloc(), dsa_area_control::refcnt, dsa_area::resowner, dsa_segment_map::segment, dsa_area_control::segment_handles, dsa_area_control::segment_header, and dsa_area::segment_maps.

Referenced by dsa_attach(), and dsa_attach_in_place().

◆ check_for_freed_segments()

static void check_for_freed_segments ( dsa_area area)
static

Definition at line 2242 of file dsa.c.

2243 {
2244  size_t freed_segment_counter;
2245 
2246  /*
2247  * Any other process that has freed a segment has incremented
2248  * freed_segment_counter while holding an LWLock, and that must precede
2249  * any backend creating a new segment in the same slot while holding an
2250  * LWLock, and that must precede the creation of any dsa_pointer pointing
2251  * into the new segment which might reach us here, and the caller must
2252  * have sent the dsa_pointer to this process using appropriate memory
2253  * synchronization (some kind of locking or atomic primitive or system
2254  * call). So all we need to do on the reading side is ask for the load of
2255  * freed_segment_counter to follow the caller's load of the dsa_pointer it
2256  * has, and we can be sure to detect any segments that had been freed as
2257  * of the time that the dsa_pointer reached this process.
2258  */
2259  pg_read_barrier();
2260  freed_segment_counter = area->control->freed_segment_counter;
2261  if (unlikely(area->freed_segment_counter != freed_segment_counter))
2262  {
2263  /* Check all currently mapped segments to find what's been freed. */
2267  }
2268 }
#define pg_read_barrier()
Definition: atomics.h:153
#define unlikely(x)
Definition: c.h:300
static void check_for_freed_segments_locked(dsa_area *area)
Definition: dsa.c:2278

References check_for_freed_segments_locked(), dsa_area::control, DSA_AREA_LOCK, dsa_area_control::freed_segment_counter, dsa_area::freed_segment_counter, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), pg_read_barrier, and unlikely.

Referenced by dsa_free(), and dsa_get_address().

◆ check_for_freed_segments_locked()

static void check_for_freed_segments_locked ( dsa_area area)
static

Definition at line 2278 of file dsa.c.

2279 {
2280  size_t freed_segment_counter;
2281  int i;
2282 
2284  freed_segment_counter = area->control->freed_segment_counter;
2285  if (unlikely(area->freed_segment_counter != freed_segment_counter))
2286  {
2287  for (i = 0; i <= area->high_segment_index; ++i)
2288  {
2289  if (area->segment_maps[i].header != NULL &&
2290  area->segment_maps[i].header->freed)
2291  {
2292  dsm_detach(area->segment_maps[i].segment);
2293  area->segment_maps[i].segment = NULL;
2294  area->segment_maps[i].header = NULL;
2295  area->segment_maps[i].mapped_address = NULL;
2296  }
2297  }
2298  area->freed_segment_counter = freed_segment_counter;
2299  }
2300 }
void dsm_detach(dsm_segment *seg)
Definition: dsm.c:804
int i
Definition: isn.c:73

References Assert(), dsa_area::control, DSA_AREA_LOCK, dsm_detach(), dsa_segment_header::freed, dsa_area_control::freed_segment_counter, dsa_area::freed_segment_counter, dsa_segment_map::header, dsa_area::high_segment_index, i, LWLockHeldByMe(), dsa_segment_map::mapped_address, dsa_segment_map::segment, dsa_area::segment_maps, and unlikely.

Referenced by check_for_freed_segments(), destroy_superblock(), dsa_dump(), and get_best_segment().

◆ contiguous_pages_to_segment_bin()

static size_t contiguous_pages_to_segment_bin ( size_t  n)
inlinestatic

Definition at line 143 of file dsa.c.

144 {
145  size_t bin;
146 
147  if (n == 0)
148  bin = 0;
149  else
150  bin = pg_leftmost_one_pos_size_t(n) + 1;
151 
152  return Min(bin, DSA_NUM_SEGMENT_BINS - 1);
153 }
#define Min(x, y)
Definition: c.h:993
#define DSA_NUM_SEGMENT_BINS
Definition: dsa.c:135
#define pg_leftmost_one_pos_size_t
Definition: pg_bitutils.h:338

References DSA_NUM_SEGMENT_BINS, Min, and pg_leftmost_one_pos_size_t.

Referenced by create_internal(), get_best_segment(), make_new_segment(), and rebin_segment().

◆ create_internal()

static dsa_area * create_internal ( void *  place,
size_t  size,
int  tranche_id,
dsm_handle  control_handle,
dsm_segment control_segment 
)
static

Definition at line 1216 of file dsa.c.

1220 {
1221  dsa_area_control *control;
1222  dsa_area *area;
1223  dsa_segment_map *segment_map;
1224  size_t usable_pages;
1225  size_t total_pages;
1226  size_t metadata_bytes;
1227  int i;
1228 
1229  /* Sanity check on the space we have to work in. */
1230  if (size < dsa_minimum_size())
1231  elog(ERROR, "dsa_area space must be at least %zu, but %zu provided",
1232  dsa_minimum_size(), size);
1233 
1234  /* Now figure out how much space is usable */
1235  total_pages = size / FPM_PAGE_SIZE;
1236  metadata_bytes =
1237  MAXALIGN(sizeof(dsa_area_control)) +
1238  MAXALIGN(sizeof(FreePageManager)) +
1239  total_pages * sizeof(dsa_pointer);
1240  /* Add padding up to next page boundary. */
1241  if (metadata_bytes % FPM_PAGE_SIZE != 0)
1242  metadata_bytes += FPM_PAGE_SIZE - (metadata_bytes % FPM_PAGE_SIZE);
1243  Assert(metadata_bytes <= size);
1244  usable_pages = (size - metadata_bytes) / FPM_PAGE_SIZE;
1245 
1246  /*
1247  * Initialize the dsa_area_control object located at the start of the
1248  * space.
1249  */
1250  control = (dsa_area_control *) place;
1251  memset(place, 0, sizeof(*control));
1252  control->segment_header.magic =
1253  DSA_SEGMENT_HEADER_MAGIC ^ control_handle ^ 0;
1256  control->segment_header.usable_pages = usable_pages;
1257  control->segment_header.freed = false;
1259  control->handle = control_handle;
1260  control->max_total_segment_size = (size_t) -1;
1261  control->total_segment_size = size;
1262  control->segment_handles[0] = control_handle;
1263  for (i = 0; i < DSA_NUM_SEGMENT_BINS; ++i)
1265  control->refcnt = 1;
1266  control->lwlock_tranche_id = tranche_id;
1267 
1268  /*
1269  * Create the dsa_area object that this backend will use to access the
1270  * area. Other backends will need to obtain their own dsa_area object by
1271  * attaching.
1272  */
1273  area = palloc(sizeof(dsa_area));
1274  area->control = control;
1276  memset(area->segment_maps, 0, sizeof(dsa_segment_map) * DSA_MAX_SEGMENTS);
1277  area->high_segment_index = 0;
1278  area->freed_segment_counter = 0;
1279  LWLockInitialize(&control->lock, control->lwlock_tranche_id);
1280  for (i = 0; i < DSA_NUM_SIZE_CLASSES; ++i)
1282  control->lwlock_tranche_id);
1283 
1284  /* Set up the segment map for this process's mapping. */
1285  segment_map = &area->segment_maps[0];
1286  segment_map->segment = control_segment;
1287  segment_map->mapped_address = place;
1288  segment_map->header = (dsa_segment_header *) place;
1289  segment_map->fpm = (FreePageManager *)
1290  (segment_map->mapped_address +
1291  MAXALIGN(sizeof(dsa_area_control)));
1292  segment_map->pagemap = (dsa_pointer *)
1293  (segment_map->mapped_address +
1294  MAXALIGN(sizeof(dsa_area_control)) +
1295  MAXALIGN(sizeof(FreePageManager)));
1296 
1297  /* Set up the free page map. */
1298  FreePageManagerInitialize(segment_map->fpm, segment_map->mapped_address);
1299  /* There can be 0 usable pages if size is dsa_minimum_size(). */
1300 
1301  if (usable_pages > 0)
1302  FreePageManagerPut(segment_map->fpm, metadata_bytes / FPM_PAGE_SIZE,
1303  usable_pages);
1304 
1305  /* Put this segment into the appropriate bin. */
1306  control->segment_bins[contiguous_pages_to_segment_bin(usable_pages)] = 0;
1307  segment_map->header->bin = contiguous_pages_to_segment_bin(usable_pages);
1308 
1309  return area;
1310 }
#define DSA_SEGMENT_INDEX_NONE
Definition: dsa.c:129
#define DSA_INITIAL_SEGMENT_SIZE
Definition: dsa.c:70
static size_t contiguous_pages_to_segment_bin(size_t n)
Definition: dsa.c:143
size_t dsa_minimum_size(void)
Definition: dsa.c:1194
void FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages)
Definition: freepage.c:379
void FreePageManagerInitialize(FreePageManager *fpm, char *base)
Definition: freepage.c:183
#define FPM_PAGE_SIZE
Definition: freepage.h:30
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:730
size_t total_segment_size
Definition: dsa.c:324
int lwlock_tranche_id
Definition: dsa.c:336
size_t max_total_segment_size
Definition: dsa.c:326
dsa_segment_index segment_bins[DSA_NUM_SEGMENT_BINS]
Definition: dsa.c:320
LWLock lock
Definition: dsa.c:338
size_t size
Definition: dsa.c:171
dsa_segment_index next
Definition: dsa.c:183
dsa_segment_index prev
Definition: dsa.c:177
size_t usable_pages
Definition: dsa.c:169
size_t bin
Definition: dsa.c:185

References Assert(), dsa_segment_header::bin, contiguous_pages_to_segment_bin(), dsa_area::control, CurrentResourceOwner, DSA_INITIAL_SEGMENT_SIZE, DSA_MAX_SEGMENTS, dsa_minimum_size(), DSA_NUM_SEGMENT_BINS, DSA_NUM_SIZE_CLASSES, DSA_SCLASS_LOCK, DSA_SEGMENT_HEADER_MAGIC, DSA_SEGMENT_INDEX_NONE, elog(), ERROR, dsa_segment_map::fpm, FPM_PAGE_SIZE, dsa_segment_header::freed, dsa_area::freed_segment_counter, FreePageManagerInitialize(), FreePageManagerPut(), dsa_area_control::handle, dsa_segment_map::header, dsa_area::high_segment_index, i, dsa_area_control::lock, dsa_area_control::lwlock_tranche_id, LWLockInitialize(), dsa_segment_header::magic, dsa_segment_map::mapped_address, dsa_area_control::max_total_segment_size, MAXALIGN, dsa_segment_header::next, dsa_segment_map::pagemap, palloc(), dsa_segment_header::prev, dsa_area_control::refcnt, dsa_area::resowner, dsa_segment_map::segment, dsa_area_control::segment_bins, dsa_area_control::segment_handles, dsa_area_control::segment_header, dsa_area::segment_maps, dsa_segment_header::size, dsa_area_control::total_segment_size, and dsa_segment_header::usable_pages.

Referenced by dsa_create(), and dsa_create_in_place().

◆ destroy_superblock()

static void destroy_superblock ( dsa_area area,
dsa_pointer  span_pointer 
)
static

Definition at line 1827 of file dsa.c.

1828 {
1829  dsa_area_span *span = dsa_get_address(area, span_pointer);
1830  int size_class = span->size_class;
1831  dsa_segment_map *segment_map;
1832 
1833 
1834  /* Remove it from its fullness class list. */
1835  unlink_span(area, span);
1836 
1837  /*
1838  * Note: Here we acquire the area lock while we already hold a per-pool
1839  * lock. We never hold the area lock and then take a pool lock, or we
1840  * could deadlock.
1841  */
1844  segment_map =
1846  FreePageManagerPut(segment_map->fpm,
1848  span->npages);
1849  /* Check if the segment is now entirely free. */
1850  if (fpm_largest(segment_map->fpm) == segment_map->header->usable_pages)
1851  {
1852  dsa_segment_index index = get_segment_index(area, segment_map);
1853 
1854  /* If it's not the segment with extra control data, free it. */
1855  if (index != 0)
1856  {
1857  /*
1858  * Give it back to the OS, and allow other backends to detect that
1859  * they need to detach.
1860  */
1861  unlink_segment(area, segment_map);
1862  segment_map->header->freed = true;
1864  segment_map->header->size);
1865  area->control->total_segment_size -=
1866  segment_map->header->size;
1868  dsm_detach(segment_map->segment);
1870  ++area->control->freed_segment_counter;
1871  segment_map->segment = NULL;
1872  segment_map->header = NULL;
1873  segment_map->mapped_address = NULL;
1874  }
1875  }
1876 
1877  /* Move segment to appropriate bin if necessary. */
1878  if (segment_map->header != NULL)
1879  rebin_segment(area, segment_map);
1880 
1882 
1883  /*
1884  * Span-of-spans blocks store the span which describes them within the
1885  * block itself, so freeing the storage implicitly frees the descriptor
1886  * also. If this is a block of any other type, we need to separately free
1887  * the span object also. This recursive call to dsa_free will acquire the
1888  * span pool's lock. We can't deadlock because the acquisition order is
1889  * always some other pool and then the span pool.
1890  */
1891  if (size_class != DSA_SCLASS_BLOCK_OF_SPANS)
1892  dsa_free(area, span_pointer);
1893 }
static void unlink_segment(dsa_area *area, dsa_segment_map *segment_map)
Definition: dsa.c:1968
#define DSA_EXTRACT_SEGMENT_NUMBER(dp)
Definition: dsa.c:120
#define get_segment_index(area, segment_map_ptr)
Definition: dsa.c:399
#define DSA_EXTRACT_OFFSET(dp)
Definition: dsa.c:123
size_t dsa_segment_index
Definition: dsa.c:126
static void rebin_segment(dsa_area *area, dsa_segment_map *segment_map)
Definition: dsa.c:2306
static dsa_segment_map * get_segment_by_index(dsa_area *area, dsa_segment_index index)
Definition: dsa.c:1747
#define DSA_SCLASS_BLOCK_OF_SPANS
Definition: dsa.c:263
static void unlink_span(dsa_area *area, dsa_area_span *span)
Definition: dsa.c:1896
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:841
dsm_handle dsm_segment_handle(dsm_segment *seg)
Definition: dsm.c:1124
void dsm_unpin_segment(dsm_handle handle)
Definition: dsm.c:989
#define DSM_HANDLE_INVALID
Definition: dsm_impl.h:58
#define fpm_largest(fpm)
Definition: freepage.h:88
uint16 size_class
Definition: dsa.c:214
size_t npages
Definition: dsa.c:213
Definition: type.h:95

References Assert(), check_for_freed_segments_locked(), dsa_area::control, DSA_AREA_LOCK, DSA_EXTRACT_OFFSET, DSA_EXTRACT_SEGMENT_NUMBER, dsa_free(), dsa_get_address(), DSA_SCLASS_BLOCK_OF_SPANS, dsm_detach(), DSM_HANDLE_INVALID, dsm_segment_handle(), dsm_unpin_segment(), dsa_segment_map::fpm, fpm_largest, FPM_PAGE_SIZE, dsa_segment_header::freed, dsa_area_control::freed_segment_counter, FreePageManagerPut(), get_segment_by_index(), get_segment_index, dsa_segment_map::header, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_segment_map::mapped_address, dsa_area_span::npages, rebin_segment(), dsa_segment_map::segment, dsa_area_control::segment_handles, dsa_segment_header::size, dsa_area_span::size_class, dsa_area_span::start, dsa_area_control::total_segment_size, unlink_segment(), unlink_span(), and dsa_segment_header::usable_pages.

Referenced by dsa_free(), and dsa_trim().

◆ dsa_allocate_extended()

dsa_pointer dsa_allocate_extended ( dsa_area area,
size_t  size,
int  flags 
)

Definition at line 686 of file dsa.c.

687 {
688  uint16 size_class;
689  dsa_pointer start_pointer;
690  dsa_segment_map *segment_map;
691  dsa_pointer result;
692 
693  Assert(size > 0);
694 
695  /* Sanity check on huge individual allocation size. */
696  if (((flags & DSA_ALLOC_HUGE) != 0 && !AllocHugeSizeIsValid(size)) ||
697  ((flags & DSA_ALLOC_HUGE) == 0 && !AllocSizeIsValid(size)))
698  elog(ERROR, "invalid DSA memory alloc request size %zu", size);
699 
700  /*
701  * If bigger than the largest size class, just grab a run of pages from
702  * the free page manager, instead of allocating an object from a pool.
703  * There will still be a span, but it's a special class of span that
704  * manages this whole allocation and simply gives all pages back to the
705  * free page manager when dsa_free is called.
706  */
707  if (size > dsa_size_classes[lengthof(dsa_size_classes) - 1])
708  {
709  size_t npages = fpm_size_to_pages(size);
710  size_t first_page;
711  dsa_pointer span_pointer;
713 
714  /* Obtain a span object. */
715  span_pointer = alloc_object(area, DSA_SCLASS_BLOCK_OF_SPANS);
716  if (!DsaPointerIsValid(span_pointer))
717  {
718  /* Raise error unless asked not to. */
719  if ((flags & DSA_ALLOC_NO_OOM) == 0)
720  ereport(ERROR,
721  (errcode(ERRCODE_OUT_OF_MEMORY),
722  errmsg("out of memory"),
723  errdetail("Failed on DSA request of size %zu.",
724  size)));
725  return InvalidDsaPointer;
726  }
727 
729 
730  /* Find a segment from which to allocate. */
731  segment_map = get_best_segment(area, npages);
732  if (segment_map == NULL)
733  segment_map = make_new_segment(area, npages);
734  if (segment_map == NULL)
735  {
736  /* Can't make any more segments: game over. */
738  dsa_free(area, span_pointer);
739 
740  /* Raise error unless asked not to. */
741  if ((flags & DSA_ALLOC_NO_OOM) == 0)
742  ereport(ERROR,
743  (errcode(ERRCODE_OUT_OF_MEMORY),
744  errmsg("out of memory"),
745  errdetail("Failed on DSA request of size %zu.",
746  size)));
747  return InvalidDsaPointer;
748  }
749 
750  /*
751  * Ask the free page manager for a run of pages. This should always
752  * succeed, since both get_best_segment and make_new_segment should
753  * only return a non-NULL pointer if it actually contains enough
754  * contiguous freespace. If it does fail, something in our backend
755  * private state is out of whack, so use FATAL to kill the process.
756  */
757  if (!FreePageManagerGet(segment_map->fpm, npages, &first_page))
758  elog(FATAL,
759  "dsa_allocate could not find %zu free pages", npages);
761 
762  start_pointer = DSA_MAKE_POINTER(get_segment_index(area, segment_map),
763  first_page * FPM_PAGE_SIZE);
764 
765  /* Initialize span and pagemap. */
767  LW_EXCLUSIVE);
768  init_span(area, span_pointer, pool, start_pointer, npages,
770  segment_map->pagemap[first_page] = span_pointer;
772 
773  /* Zero-initialize the memory if requested. */
774  if ((flags & DSA_ALLOC_ZERO) != 0)
775  memset(dsa_get_address(area, start_pointer), 0, size);
776 
777  return start_pointer;
778  }
779 
780  /* Map allocation to a size class. */
782  {
783  int mapidx;
784 
785  /* For smaller sizes we have a lookup table... */
786  mapidx = ((size + DSA_SIZE_CLASS_MAP_QUANTUM - 1) /
788  size_class = dsa_size_class_map[mapidx];
789  }
790  else
791  {
792  uint16 min;
793  uint16 max;
794 
795  /* ... and for the rest we search by binary chop. */
797  max = lengthof(dsa_size_classes) - 1;
798 
799  while (min < max)
800  {
801  uint16 mid = (min + max) / 2;
802  uint16 class_size = dsa_size_classes[mid];
803 
804  if (class_size < size)
805  min = mid + 1;
806  else
807  max = mid;
808  }
809 
810  size_class = min;
811  }
812  Assert(size <= dsa_size_classes[size_class]);
813  Assert(size_class == 0 || size > dsa_size_classes[size_class - 1]);
814 
815  /* Attempt to allocate an object from the appropriate pool. */
816  result = alloc_object(area, size_class);
817 
818  /* Check for failure to allocate. */
819  if (!DsaPointerIsValid(result))
820  {
821  /* Raise error unless asked not to. */
822  if ((flags & DSA_ALLOC_NO_OOM) == 0)
823  ereport(ERROR,
824  (errcode(ERRCODE_OUT_OF_MEMORY),
825  errmsg("out of memory"),
826  errdetail("Failed on DSA request of size %zu.", size)));
827  return InvalidDsaPointer;
828  }
829 
830  /* Zero-initialize the memory if requested. */
831  if ((flags & DSA_ALLOC_ZERO) != 0)
832  memset(dsa_get_address(area, result), 0, size);
833 
834  return result;
835 }
unsigned short uint16
Definition: c.h:494
#define lengthof(array)
Definition: c.h:777
#define DSA_MAKE_POINTER(segment_number, offset)
Definition: dsa.c:116
static dsa_pointer alloc_object(dsa_area *area, int size_class)
Definition: dsa.c:1462
#define DSA_SIZE_CLASS_MAP_QUANTUM
Definition: dsa.c:282
static const uint8 dsa_size_class_map[]
Definition: dsa.c:272
static dsa_segment_map * make_new_segment(dsa_area *area, size_t requested_pages)
Definition: dsa.c:2071
#define DSA_SCLASS_SPAN_LARGE
Definition: dsa.c:264
static void init_span(dsa_area *area, dsa_pointer span_pointer, dsa_area_pool *pool, dsa_pointer start, size_t npages, uint16 size_class)
Definition: dsa.c:1367
static dsa_segment_map * get_best_segment(dsa_area *area, size_t npages)
Definition: dsa.c:2000
#define DSA_ALLOC_NO_OOM
Definition: dsa.h:74
#define DSA_ALLOC_HUGE
Definition: dsa.h:73
#define DSA_ALLOC_ZERO
Definition: dsa.h:75
int errdetail(const char *fmt,...)
Definition: elog.c:1202
#define FATAL
Definition: elog.h:41
bool FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page)
Definition: freepage.c:210
#define fpm_size_to_pages(sz)
Definition: freepage.h:74
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:49
#define AllocSizeIsValid(size)
Definition: memutils.h:42

References alloc_object(), AllocHugeSizeIsValid, AllocSizeIsValid, Assert(), dsa_area::control, DSA_ALLOC_HUGE, DSA_ALLOC_NO_OOM, DSA_ALLOC_ZERO, DSA_AREA_LOCK, dsa_free(), dsa_get_address(), DSA_MAKE_POINTER, DSA_SCLASS_BLOCK_OF_SPANS, DSA_SCLASS_LOCK, DSA_SCLASS_SPAN_LARGE, dsa_size_class_map, DSA_SIZE_CLASS_MAP_QUANTUM, dsa_size_classes, DsaPointerIsValid, elog(), ereport, errcode(), errdetail(), errmsg(), ERROR, FATAL, dsa_segment_map::fpm, FPM_PAGE_SIZE, fpm_size_to_pages, FreePageManagerGet(), get_best_segment(), get_segment_index, init_span(), InvalidDsaPointer, lengthof, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), make_new_segment(), dsa_segment_map::pagemap, and dsa_area_control::pools.

Referenced by dshash_create(), and pagetable_allocate().

◆ dsa_attach()

dsa_area* dsa_attach ( dsa_handle  handle)

Definition at line 525 of file dsa.c.

526 {
527  dsm_segment *segment;
528  dsa_area *area;
529 
530  /*
531  * An area handle is really a DSM segment handle for the first segment, so
532  * we go ahead and attach to that.
533  */
534  segment = dsm_attach(handle);
535  if (segment == NULL)
536  ereport(ERROR,
537  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
538  errmsg("could not attach to dynamic shared area")));
539 
540  area = attach_internal(dsm_segment_address(segment), segment, handle);
541 
542  /* Clean up when the control segment detaches. */
545 
546  return area;
547 }
void dsa_on_dsm_detach_release_in_place(dsm_segment *segment, Datum place)
Definition: dsa.c:591
static dsa_area * attach_internal(void *place, dsm_segment *segment, dsa_handle handle)
Definition: dsa.c:1316
void * dsm_segment_address(dsm_segment *seg)
Definition: dsm.c:1096
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1133
dsm_segment * dsm_attach(dsm_handle h)
Definition: dsm.c:666
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322

References attach_internal(), dsa_on_dsm_detach_release_in_place(), dsm_attach(), dsm_segment_address(), ereport, errcode(), errmsg(), ERROR, on_dsm_detach(), and PointerGetDatum().

Referenced by logicalrep_launcher_attach_dshmem().

◆ dsa_attach_in_place()

dsa_area* dsa_attach_in_place ( void *  place,
dsm_segment segment 
)

Definition at line 560 of file dsa.c.

561 {
562  dsa_area *area;
563 
564  area = attach_internal(place, NULL, DSA_HANDLE_INVALID);
565 
566  /*
567  * Clean up when the control segment detaches, if a containing DSM segment
568  * was provided.
569  */
570  if (segment != NULL)
572  PointerGetDatum(place));
573 
574  return area;
575 }
#define DSA_HANDLE_INVALID
Definition: dsa.h:103

References attach_internal(), DSA_HANDLE_INVALID, dsa_on_dsm_detach_release_in_place(), on_dsm_detach(), and PointerGetDatum().

Referenced by AttachSession(), ParallelQueryMain(), and pgstat_attach_shmem().

◆ dsa_create()

dsa_area* dsa_create ( int  tranche_id)

Definition at line 439 of file dsa.c.

440 {
441  dsm_segment *segment;
442  dsa_area *area;
443 
444  /*
445  * Create the DSM segment that will hold the shared control object and the
446  * first segment of usable space.
447  */
448  segment = dsm_create(DSA_INITIAL_SEGMENT_SIZE, 0);
449 
450  /*
451  * All segments backing this area are pinned, so that DSA can explicitly
452  * control their lifetime (otherwise a newly created segment belonging to
453  * this area might be freed when the only backend that happens to have it
454  * mapped in ends, corrupting the area).
455  */
456  dsm_pin_segment(segment);
457 
458  /* Create a new DSA area with the control object in this segment. */
459  area = create_internal(dsm_segment_address(segment),
461  tranche_id,
462  dsm_segment_handle(segment), segment);
463 
464  /* Clean up when the control segment detaches. */
467 
468  return area;
469 }
static dsa_area * create_internal(void *place, size_t size, int tranche_id, dsm_handle control_handle, dsm_segment *control_segment)
Definition: dsa.c:1216
dsm_segment * dsm_create(Size size, int flags)
Definition: dsm.c:517
void dsm_pin_segment(dsm_segment *seg)
Definition: dsm.c:956

References create_internal(), DSA_INITIAL_SEGMENT_SIZE, dsa_on_dsm_detach_release_in_place(), dsm_create(), dsm_pin_segment(), dsm_segment_address(), dsm_segment_handle(), on_dsm_detach(), and PointerGetDatum().

Referenced by logicalrep_launcher_attach_dshmem(), test_dsa_basic(), and test_dsa_resowners().

◆ dsa_create_in_place()

dsa_area* dsa_create_in_place ( void *  place,
size_t  size,
int  tranche_id,
dsm_segment segment 
)

Definition at line 488 of file dsa.c.

490 {
491  dsa_area *area;
492 
493  area = create_internal(place, size, tranche_id,
494  DSM_HANDLE_INVALID, NULL);
495 
496  /*
497  * Clean up when the control segment detaches, if a containing DSM segment
498  * was provided.
499  */
500  if (segment != NULL)
502  PointerGetDatum(place));
503 
504  return area;
505 }

References create_internal(), dsa_on_dsm_detach_release_in_place(), DSM_HANDLE_INVALID, on_dsm_detach(), and PointerGetDatum().

Referenced by ExecInitParallelPlan(), GetSessionDsmHandle(), and StatsShmemInit().

◆ dsa_detach()

void dsa_detach ( dsa_area area)

Definition at line 1942 of file dsa.c.

1943 {
1944  int i;
1945 
1946  /* Detach from all segments. */
1947  for (i = 0; i <= area->high_segment_index; ++i)
1948  if (area->segment_maps[i].segment != NULL)
1949  dsm_detach(area->segment_maps[i].segment);
1950 
1951  /*
1952  * Note that 'detaching' (= detaching from DSM segments) doesn't include
1953  * 'releasing' (= adjusting the reference count). It would be nice to
1954  * combine these operations, but client code might never get around to
1955  * calling dsa_detach because of an error path, and a detach hook on any
1956  * particular segment is too late to detach other segments in the area
1957  * without risking a 'leak' warning in the non-error path.
1958  */
1959 
1960  /* Free the backend-local area object. */
1961  pfree(area);
1962 }
void pfree(void *pointer)
Definition: mcxt.c:1456

References dsm_detach(), dsa_area::high_segment_index, i, pfree(), dsa_segment_map::segment, and dsa_area::segment_maps.

Referenced by DetachSession(), ExecParallelCleanup(), ParallelQueryMain(), pgstat_detach_shmem(), StatsShmemInit(), test_dsa_basic(), and test_dsa_resowners().

◆ dsa_dump()

void dsa_dump ( dsa_area area)

Definition at line 1090 of file dsa.c.

1091 {
1092  size_t i,
1093  j;
1094 
1095  /*
1096  * Note: This gives an inconsistent snapshot as it acquires and releases
1097  * individual locks as it goes...
1098  */
1099 
1102  fprintf(stderr, "dsa_area handle %x:\n", area->control->handle);
1103  fprintf(stderr, " max_total_segment_size: %zu\n",
1105  fprintf(stderr, " total_segment_size: %zu\n",
1106  area->control->total_segment_size);
1107  fprintf(stderr, " refcnt: %d\n", area->control->refcnt);
1108  fprintf(stderr, " pinned: %c\n", area->control->pinned ? 't' : 'f');
1109  fprintf(stderr, " segment bins:\n");
1110  for (i = 0; i < DSA_NUM_SEGMENT_BINS; ++i)
1111  {
1113  {
1114  dsa_segment_index segment_index;
1115 
1116  fprintf(stderr,
1117  " segment bin %zu (at least %d contiguous pages free):\n",
1118  i, 1 << (i - 1));
1119  segment_index = area->control->segment_bins[i];
1120  while (segment_index != DSA_SEGMENT_INDEX_NONE)
1121  {
1122  dsa_segment_map *segment_map;
1123 
1124  segment_map =
1125  get_segment_by_index(area, segment_index);
1126 
1127  fprintf(stderr,
1128  " segment index %zu, usable_pages = %zu, "
1129  "contiguous_pages = %zu, mapped at %p\n",
1130  segment_index,
1131  segment_map->header->usable_pages,
1132  fpm_largest(segment_map->fpm),
1133  segment_map->mapped_address);
1134  segment_index = segment_map->header->next;
1135  }
1136  }
1137  }
1139 
1140  fprintf(stderr, " pools:\n");
1141  for (i = 0; i < DSA_NUM_SIZE_CLASSES; ++i)
1142  {
1143  bool found = false;
1144 
1146  for (j = 0; j < DSA_FULLNESS_CLASSES; ++j)
1147  if (DsaPointerIsValid(area->control->pools[i].spans[j]))
1148  found = true;
1149  if (found)
1150  {
1152  fprintf(stderr, " pool for blocks of span objects:\n");
1153  else if (i == DSA_SCLASS_SPAN_LARGE)
1154  fprintf(stderr, " pool for large object spans:\n");
1155  else
1156  fprintf(stderr,
1157  " pool for size class %zu (object size %hu bytes):\n",
1158  i, dsa_size_classes[i]);
1159  for (j = 0; j < DSA_FULLNESS_CLASSES; ++j)
1160  {
1161  if (!DsaPointerIsValid(area->control->pools[i].spans[j]))
1162  fprintf(stderr, " fullness class %zu is empty\n", j);
1163  else
1164  {
1165  dsa_pointer span_pointer = area->control->pools[i].spans[j];
1166 
1167  fprintf(stderr, " fullness class %zu:\n", j);
1168  while (DsaPointerIsValid(span_pointer))
1169  {
1170  dsa_area_span *span;
1171 
1172  span = dsa_get_address(area, span_pointer);
1173  fprintf(stderr,
1174  " span descriptor at "
1175  DSA_POINTER_FORMAT ", superblock at "
1177  ", pages = %zu, objects free = %hu/%hu\n",
1178  span_pointer, span->start, span->npages,
1179  span->nallocatable, span->nmax);
1180  span_pointer = span->nextspan;
1181  }
1182  }
1183  }
1184  }
1186  }
1187 }
#define DSA_POINTER_FORMAT
Definition: dsa.h:69
int j
Definition: isn.c:74
#define fprintf
Definition: port.h:242
bool pinned
Definition: dsa.c:332
uint16 nmax
Definition: dsa.c:218

References check_for_freed_segments_locked(), dsa_area::control, DSA_AREA_LOCK, DSA_FULLNESS_CLASSES, dsa_get_address(), DSA_NUM_SEGMENT_BINS, DSA_NUM_SIZE_CLASSES, DSA_POINTER_FORMAT, DSA_SCLASS_BLOCK_OF_SPANS, DSA_SCLASS_LOCK, DSA_SCLASS_SPAN_LARGE, DSA_SEGMENT_INDEX_NONE, dsa_size_classes, DsaPointerIsValid, dsa_segment_map::fpm, fpm_largest, fprintf, get_segment_by_index(), dsa_area_control::handle, dsa_segment_map::header, i, j, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_segment_map::mapped_address, dsa_area_control::max_total_segment_size, dsa_area_span::nallocatable, dsa_segment_header::next, dsa_area_span::nextspan, dsa_area_span::nmax, dsa_area_span::npages, dsa_area_control::pinned, dsa_area_control::pools, dsa_area_control::refcnt, dsa_area_control::segment_bins, dsa_area_pool::spans, dsa_area_span::start, dsa_area_control::total_segment_size, and dsa_segment_header::usable_pages.

◆ dsa_free()

void dsa_free ( dsa_area area,
dsa_pointer  dp 
)

Definition at line 841 of file dsa.c.

842 {
843  dsa_segment_map *segment_map;
844  int pageno;
845  dsa_pointer span_pointer;
846  dsa_area_span *span;
847  char *superblock;
848  char *object;
849  size_t size;
850  int size_class;
851 
852  /* Make sure we don't have a stale segment in the slot 'dp' refers to. */
854 
855  /* Locate the object, span and pool. */
856  segment_map = get_segment_by_index(area, DSA_EXTRACT_SEGMENT_NUMBER(dp));
857  pageno = DSA_EXTRACT_OFFSET(dp) / FPM_PAGE_SIZE;
858  span_pointer = segment_map->pagemap[pageno];
859  span = dsa_get_address(area, span_pointer);
860  superblock = dsa_get_address(area, span->start);
861  object = dsa_get_address(area, dp);
862  size_class = span->size_class;
863  size = dsa_size_classes[size_class];
864 
865  /*
866  * Special case for large objects that live in a special span: we return
867  * those pages directly to the free page manager and free the span.
868  */
869  if (span->size_class == DSA_SCLASS_SPAN_LARGE)
870  {
871 
872 #ifdef CLOBBER_FREED_MEMORY
873  memset(object, 0x7f, span->npages * FPM_PAGE_SIZE);
874 #endif
875 
876  /* Give pages back to free page manager. */
878  FreePageManagerPut(segment_map->fpm,
880  span->npages);
881 
882  /* Move segment to appropriate bin if necessary. */
883  rebin_segment(area, segment_map);
885 
886  /* Unlink span. */
888  LW_EXCLUSIVE);
889  unlink_span(area, span);
891  /* Free the span object so it can be reused. */
892  dsa_free(area, span_pointer);
893  return;
894  }
895 
896 #ifdef CLOBBER_FREED_MEMORY
897  memset(object, 0x7f, size);
898 #endif
899 
900  LWLockAcquire(DSA_SCLASS_LOCK(area, size_class), LW_EXCLUSIVE);
901 
902  /* Put the object on the span's freelist. */
903  Assert(object >= superblock);
904  Assert(object < superblock + DSA_SUPERBLOCK_SIZE);
905  Assert((object - superblock) % size == 0);
906  NextFreeObjectIndex(object) = span->firstfree;
907  span->firstfree = (object - superblock) / size;
908  ++span->nallocatable;
909 
910  /*
911  * See if the span needs to moved to a different fullness class, or be
912  * freed so its pages can be given back to the segment.
913  */
914  if (span->nallocatable == 1 && span->fclass == DSA_FULLNESS_CLASSES - 1)
915  {
916  /*
917  * The block was completely full and is located in the
918  * highest-numbered fullness class, which is never scanned for free
919  * chunks. We must move it to the next-lower fullness class.
920  */
921  unlink_span(area, span);
922  add_span_to_fullness_class(area, span, span_pointer,
924 
925  /*
926  * If this is the only span, and there is no active span, then we
927  * should probably move this span to fullness class 1. (Otherwise if
928  * you allocate exactly all the objects in the only span, it moves to
929  * class 3, then you free them all, it moves to 2, and then is given
930  * back, leaving no active span).
931  */
932  }
933  else if (span->nallocatable == span->nmax &&
934  (span->fclass != 1 || span->prevspan != InvalidDsaPointer))
935  {
936  /*
937  * This entire block is free, and it's not the active block for this
938  * size class. Return the memory to the free page manager. We don't
939  * do this for the active block to prevent hysteresis: if we
940  * repeatedly allocate and free the only chunk in the active block, it
941  * will be very inefficient if we deallocate and reallocate the block
942  * every time.
943  */
944  destroy_superblock(area, span_pointer);
945  }
946 
947  LWLockRelease(DSA_SCLASS_LOCK(area, size_class));
948 }
static void check_for_freed_segments(dsa_area *area)
Definition: dsa.c:2242
static void add_span_to_fullness_class(dsa_area *area, dsa_area_span *span, dsa_pointer span_pointer, int fclass)
Definition: dsa.c:1919
#define DSA_SUPERBLOCK_SIZE
Definition: dsa.c:396
static void destroy_superblock(dsa_area *area, dsa_pointer span_pointer)
Definition: dsa.c:1827

References add_span_to_fullness_class(), Assert(), check_for_freed_segments(), destroy_superblock(), DSA_AREA_LOCK, DSA_EXTRACT_OFFSET, DSA_EXTRACT_SEGMENT_NUMBER, DSA_FULLNESS_CLASSES, dsa_get_address(), DSA_SCLASS_LOCK, DSA_SCLASS_SPAN_LARGE, dsa_size_classes, DSA_SUPERBLOCK_SIZE, dsa_area_span::fclass, dsa_area_span::firstfree, dsa_segment_map::fpm, FPM_PAGE_SIZE, FreePageManagerPut(), get_segment_by_index(), InvalidDsaPointer, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_area_span::nallocatable, NextFreeObjectIndex, dsa_area_span::nmax, dsa_area_span::npages, dsa_segment_map::pagemap, dsa_area_span::prevspan, rebin_segment(), dsa_area_span::size_class, dsa_area_span::start, and unlink_span().

Referenced by delete_item_from_bucket(), delete_key_from_bucket(), destroy_superblock(), dsa_allocate_extended(), dshash_create(), dshash_destroy(), ExecHashTableDetach(), ExecHashTableDetachBatch(), ExecParallelCleanup(), ExecParallelHashIncreaseNumBatches(), ExecParallelHashIncreaseNumBuckets(), ExecParallelHashRepartitionFirst(), ExecParallelReinitialize(), find_or_make_matching_shared_tupledesc(), pagetable_free(), pgstat_free_entry(), resize(), tbm_free_shared_area(), test_dsa_basic(), and test_dsa_resowners().

◆ dsa_get_address()

void* dsa_get_address ( dsa_area area,
dsa_pointer  dp 
)

Definition at line 957 of file dsa.c.

958 {
960  size_t offset;
961 
962  /* Convert InvalidDsaPointer to NULL. */
963  if (!DsaPointerIsValid(dp))
964  return NULL;
965 
966  /* Process any requests to detach from freed segments. */
968 
969  /* Break the dsa_pointer into its components. */
971  offset = DSA_EXTRACT_OFFSET(dp);
973 
974  /* Check if we need to cause this segment to be mapped in. */
975  if (unlikely(area->segment_maps[index].mapped_address == NULL))
976  {
977  /* Call for effect (we don't need the result). */
979  }
980 
981  return area->segment_maps[index].mapped_address + offset;
982 }

References Assert(), check_for_freed_segments(), DSA_EXTRACT_OFFSET, DSA_EXTRACT_SEGMENT_NUMBER, DSA_MAX_SEGMENTS, DsaPointerIsValid, get_segment_by_index(), dsa_segment_map::mapped_address, dsa_area::segment_maps, and unlikely.

Referenced by add_span_to_fullness_class(), alloc_object(), delete_item_from_bucket(), delete_key_from_bucket(), destroy_superblock(), dsa_allocate_extended(), dsa_dump(), dsa_free(), dsa_trim(), dshash_attach(), dshash_create(), dshash_destroy(), dshash_dump(), dshash_seq_next(), ensure_active_superblock(), ensure_valid_bucket_pointers(), ExecHashTableDetachBatch(), ExecParallelHashEnsureBatchAccessors(), ExecParallelHashFirstTuple(), ExecParallelHashIncreaseNumBatches(), ExecParallelHashIncreaseNumBuckets(), ExecParallelHashJoinSetUpBatches(), ExecParallelHashNextTuple(), ExecParallelHashPopChunkQueue(), ExecParallelHashRepartitionRest(), ExecParallelHashTableAlloc(), ExecParallelHashTableSetCurrentBatch(), ExecParallelHashTupleAlloc(), find_in_bucket(), find_or_make_matching_shared_tupledesc(), init_span(), insert_into_bucket(), insert_item_into_bucket(), lookup_rowtype_tupdesc_internal(), pagetable_allocate(), ParallelQueryMain(), pgstat_build_snapshot(), pgstat_get_entry_ref(), pgstat_init_entry(), pgstat_reinit_entry(), pgstat_reset_matching_entries(), pgstat_write_statsfile(), resize(), SerializeParamExecParams(), share_tupledesc(), shared_record_table_compare(), shared_record_table_hash(), tbm_attach_shared_iterate(), tbm_free_shared_area(), tbm_prepare_shared_iterate(), test_dsa_basic(), test_dsa_resowners(), transfer_first_span(), and unlink_span().

◆ dsa_get_handle()

dsa_handle dsa_get_handle ( dsa_area area)

Definition at line 513 of file dsa.c.

514 {
516  return area->control->handle;
517 }

References Assert(), dsa_area::control, DSA_HANDLE_INVALID, and dsa_area_control::handle.

Referenced by logicalrep_launcher_attach_dshmem().

◆ dsa_minimum_size()

size_t dsa_minimum_size ( void  )

Definition at line 1194 of file dsa.c.

1195 {
1196  size_t size;
1197  int pages = 0;
1198 
1199  size = MAXALIGN(sizeof(dsa_area_control)) +
1200  MAXALIGN(sizeof(FreePageManager));
1201 
1202  /* Figure out how many pages we need, including the page map... */
1203  while (((size + FPM_PAGE_SIZE - 1) / FPM_PAGE_SIZE) > pages)
1204  {
1205  ++pages;
1206  size += sizeof(dsa_pointer);
1207  }
1208 
1209  return pages * FPM_PAGE_SIZE;
1210 }

References FPM_PAGE_SIZE, and MAXALIGN.

Referenced by create_internal(), ExecInitParallelPlan(), and pgstat_dsa_init_size().

◆ dsa_on_dsm_detach_release_in_place()

void dsa_on_dsm_detach_release_in_place ( dsm_segment segment,
Datum  place 
)

Definition at line 591 of file dsa.c.

592 {
594 }
void dsa_release_in_place(void *place)
Definition: dsa.c:620
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312

References DatumGetPointer(), and dsa_release_in_place().

Referenced by dsa_attach(), dsa_attach_in_place(), dsa_create(), and dsa_create_in_place().

◆ dsa_on_shmem_exit_release_in_place()

void dsa_on_shmem_exit_release_in_place ( int  code,
Datum  place 
)

Definition at line 605 of file dsa.c.

606 {
608 }

References DatumGetPointer(), and dsa_release_in_place().

◆ dsa_pin()

void dsa_pin ( dsa_area area)

Definition at line 990 of file dsa.c.

991 {
993  if (area->control->pinned)
994  {
996  elog(ERROR, "dsa_area already pinned");
997  }
998  area->control->pinned = true;
999  ++area->control->refcnt;
1001 }

References dsa_area::control, DSA_AREA_LOCK, elog(), ERROR, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_area_control::pinned, and dsa_area_control::refcnt.

Referenced by logicalrep_launcher_attach_dshmem(), and StatsShmemInit().

◆ dsa_pin_mapping()

void dsa_pin_mapping ( dsa_area area)

Definition at line 650 of file dsa.c.

651 {
652  int i;
653 
654  if (area->resowner != NULL)
655  {
656  area->resowner = NULL;
657 
658  for (i = 0; i <= area->high_segment_index; ++i)
659  if (area->segment_maps[i].segment != NULL)
661  }
662 }
void dsm_pin_mapping(dsm_segment *seg)
Definition: dsm.c:916

References dsm_pin_mapping(), dsa_area::high_segment_index, i, dsa_area::resowner, dsa_segment_map::segment, and dsa_area::segment_maps.

Referenced by AttachSession(), GetSessionDsmHandle(), logicalrep_launcher_attach_dshmem(), and pgstat_attach_shmem().

◆ dsa_release_in_place()

void dsa_release_in_place ( void *  place)

Definition at line 620 of file dsa.c.

621 {
622  dsa_area_control *control = (dsa_area_control *) place;
623  int i;
624 
625  LWLockAcquire(&control->lock, LW_EXCLUSIVE);
626  Assert(control->segment_header.magic ==
627  (DSA_SEGMENT_HEADER_MAGIC ^ control->handle ^ 0));
628  Assert(control->refcnt > 0);
629  if (--control->refcnt == 0)
630  {
631  for (i = 0; i <= control->high_segment_index; ++i)
632  {
633  dsm_handle handle;
634 
635  handle = control->segment_handles[i];
636  if (handle != DSM_HANDLE_INVALID)
637  dsm_unpin_segment(handle);
638  }
639  }
640  LWLockRelease(&control->lock);
641 }
uint32 dsm_handle
Definition: dsm_impl.h:55
dsa_segment_index high_segment_index
Definition: dsa.c:328

References Assert(), DSA_SEGMENT_HEADER_MAGIC, DSM_HANDLE_INVALID, dsm_unpin_segment(), dsa_area_control::handle, dsa_area_control::high_segment_index, i, dsa_area_control::lock, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_segment_header::magic, dsa_area_control::refcnt, dsa_area_control::segment_handles, and dsa_area_control::segment_header.

Referenced by dsa_on_dsm_detach_release_in_place(), and dsa_on_shmem_exit_release_in_place().

◆ dsa_set_size_limit()

void dsa_set_size_limit ( dsa_area area,
size_t  limit 
)

◆ dsa_trim()

void dsa_trim ( dsa_area area)

Definition at line 1045 of file dsa.c.

1046 {
1047  int size_class;
1048 
1049  /*
1050  * Trim in reverse pool order so we get to the spans-of-spans last, just
1051  * in case any become entirely free while processing all the other pools.
1052  */
1053  for (size_class = DSA_NUM_SIZE_CLASSES - 1; size_class >= 0; --size_class)
1054  {
1055  dsa_area_pool *pool = &area->control->pools[size_class];
1056  dsa_pointer span_pointer;
1057 
1058  if (size_class == DSA_SCLASS_SPAN_LARGE)
1059  {
1060  /* Large object frees give back segments aggressively already. */
1061  continue;
1062  }
1063 
1064  /*
1065  * Search fullness class 1 only. That is where we expect to find an
1066  * entirely empty superblock (entirely empty superblocks in other
1067  * fullness classes are returned to the free page map by dsa_free).
1068  */
1069  LWLockAcquire(DSA_SCLASS_LOCK(area, size_class), LW_EXCLUSIVE);
1070  span_pointer = pool->spans[1];
1071  while (DsaPointerIsValid(span_pointer))
1072  {
1073  dsa_area_span *span = dsa_get_address(area, span_pointer);
1074  dsa_pointer next = span->nextspan;
1075 
1076  if (span->nallocatable == span->nmax)
1077  destroy_superblock(area, span_pointer);
1078 
1079  span_pointer = next;
1080  }
1081  LWLockRelease(DSA_SCLASS_LOCK(area, size_class));
1082  }
1083 }
static int32 next
Definition: blutils.c:221

References dsa_area::control, destroy_superblock(), dsa_get_address(), DSA_NUM_SIZE_CLASSES, DSA_SCLASS_LOCK, DSA_SCLASS_SPAN_LARGE, DsaPointerIsValid, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_area_span::nallocatable, next, dsa_area_span::nextspan, dsa_area_span::nmax, dsa_area_control::pools, and dsa_area_pool::spans.

◆ dsa_unpin()

void dsa_unpin ( dsa_area area)

Definition at line 1009 of file dsa.c.

1010 {
1012  Assert(area->control->refcnt > 1);
1013  if (!area->control->pinned)
1014  {
1016  elog(ERROR, "dsa_area not pinned");
1017  }
1018  area->control->pinned = false;
1019  --area->control->refcnt;
1021 }

References Assert(), dsa_area::control, DSA_AREA_LOCK, elog(), ERROR, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), dsa_area_control::pinned, and dsa_area_control::refcnt.

◆ ensure_active_superblock()

static bool ensure_active_superblock ( dsa_area area,
dsa_area_pool pool,
int  size_class 
)
static

Definition at line 1550 of file dsa.c.

1552 {
1553  dsa_pointer span_pointer;
1554  dsa_pointer start_pointer;
1555  size_t obsize = dsa_size_classes[size_class];
1556  size_t nmax;
1557  int fclass;
1558  size_t npages = 1;
1559  size_t first_page;
1560  size_t i;
1561  dsa_segment_map *segment_map;
1562 
1563  Assert(LWLockHeldByMe(DSA_SCLASS_LOCK(area, size_class)));
1564 
1565  /*
1566  * Compute the number of objects that will fit in a block of this size
1567  * class. Span-of-spans blocks are just a single page, and the first
1568  * object isn't available for use because it describes the block-of-spans
1569  * itself.
1570  */
1571  if (size_class == DSA_SCLASS_BLOCK_OF_SPANS)
1572  nmax = FPM_PAGE_SIZE / obsize - 1;
1573  else
1574  nmax = DSA_SUPERBLOCK_SIZE / obsize;
1575 
1576  /*
1577  * If fullness class 1 is empty, try to find a span to put in it by
1578  * scanning higher-numbered fullness classes (excluding the last one,
1579  * whose blocks are certain to all be completely full).
1580  */
1581  for (fclass = 2; fclass < DSA_FULLNESS_CLASSES - 1; ++fclass)
1582  {
1583  span_pointer = pool->spans[fclass];
1584 
1585  while (DsaPointerIsValid(span_pointer))
1586  {
1587  int tfclass;
1588  dsa_area_span *span;
1589  dsa_area_span *nextspan;
1590  dsa_area_span *prevspan;
1591  dsa_pointer next_span_pointer;
1592 
1593  span = (dsa_area_span *)
1594  dsa_get_address(area, span_pointer);
1595  next_span_pointer = span->nextspan;
1596 
1597  /* Figure out what fullness class should contain this span. */
1598  tfclass = (nmax - span->nallocatable)
1599  * (DSA_FULLNESS_CLASSES - 1) / nmax;
1600 
1601  /* Look up next span. */
1602  if (DsaPointerIsValid(span->nextspan))
1603  nextspan = (dsa_area_span *)
1604  dsa_get_address(area, span->nextspan);
1605  else
1606  nextspan = NULL;
1607 
1608  /*
1609  * If utilization has dropped enough that this now belongs in some
1610  * other fullness class, move it there.
1611  */
1612  if (tfclass < fclass)
1613  {
1614  /* Remove from the current fullness class list. */
1615  if (pool->spans[fclass] == span_pointer)
1616  {
1617  /* It was the head; remove it. */
1619  pool->spans[fclass] = span->nextspan;
1620  if (nextspan != NULL)
1621  nextspan->prevspan = InvalidDsaPointer;
1622  }
1623  else
1624  {
1625  /* It was not the head. */
1627  prevspan = (dsa_area_span *)
1628  dsa_get_address(area, span->prevspan);
1629  prevspan->nextspan = span->nextspan;
1630  }
1631  if (nextspan != NULL)
1632  nextspan->prevspan = span->prevspan;
1633 
1634  /* Push onto the head of the new fullness class list. */
1635  span->nextspan = pool->spans[tfclass];
1636  pool->spans[tfclass] = span_pointer;
1637  span->prevspan = InvalidDsaPointer;
1638  if (DsaPointerIsValid(span->nextspan))
1639  {
1640  nextspan = (dsa_area_span *)
1641  dsa_get_address(area, span->nextspan);
1642  nextspan->prevspan = span_pointer;
1643  }
1644  span->fclass = tfclass;
1645  }
1646 
1647  /* Advance to next span on list. */
1648  span_pointer = next_span_pointer;
1649  }
1650 
1651  /* Stop now if we found a suitable block. */
1652  if (DsaPointerIsValid(pool->spans[1]))
1653  return true;
1654  }
1655 
1656  /*
1657  * If there are no blocks that properly belong in fullness class 1, pick
1658  * one from some other fullness class and move it there anyway, so that we
1659  * have an allocation target. Our last choice is to transfer a block
1660  * that's almost empty (and might become completely empty soon if left
1661  * alone), but even that is better than failing, which is what we must do
1662  * if there are no blocks at all with freespace.
1663  */
1664  Assert(!DsaPointerIsValid(pool->spans[1]));
1665  for (fclass = 2; fclass < DSA_FULLNESS_CLASSES - 1; ++fclass)
1666  if (transfer_first_span(area, pool, fclass, 1))
1667  return true;
1668  if (!DsaPointerIsValid(pool->spans[1]) &&
1669  transfer_first_span(area, pool, 0, 1))
1670  return true;
1671 
1672  /*
1673  * We failed to find an existing span with free objects, so we need to
1674  * allocate a new superblock and construct a new span to manage it.
1675  *
1676  * First, get a dsa_area_span object to describe the new superblock block
1677  * ... unless this allocation is for a dsa_area_span object, in which case
1678  * that's surely not going to work. We handle that case by storing the
1679  * span describing a block-of-spans inline.
1680  */
1681  if (size_class != DSA_SCLASS_BLOCK_OF_SPANS)
1682  {
1683  span_pointer = alloc_object(area, DSA_SCLASS_BLOCK_OF_SPANS);
1684  if (!DsaPointerIsValid(span_pointer))
1685  return false;
1686  npages = DSA_PAGES_PER_SUPERBLOCK;
1687  }
1688 
1689  /* Find or create a segment and allocate the superblock. */
1691  segment_map = get_best_segment(area, npages);
1692  if (segment_map == NULL)
1693  {
1694  segment_map = make_new_segment(area, npages);
1695  if (segment_map == NULL)
1696  {
1698  return false;
1699  }
1700  }
1701 
1702  /*
1703  * This shouldn't happen: get_best_segment() or make_new_segment()
1704  * promised that we can successfully allocate npages.
1705  */
1706  if (!FreePageManagerGet(segment_map->fpm, npages, &first_page))
1707  elog(FATAL,
1708  "dsa_allocate could not find %zu free pages for superblock",
1709  npages);
1711 
1712  /* Compute the start of the superblock. */
1713  start_pointer =
1714  DSA_MAKE_POINTER(get_segment_index(area, segment_map),
1715  first_page * FPM_PAGE_SIZE);
1716 
1717  /*
1718  * If this is a block-of-spans, carve the descriptor right out of the
1719  * allocated space.
1720  */
1721  if (size_class == DSA_SCLASS_BLOCK_OF_SPANS)
1722  {
1723  /*
1724  * We have a pointer into the segment. We need to build a dsa_pointer
1725  * from the segment index and offset into the segment.
1726  */
1727  span_pointer = start_pointer;
1728  }
1729 
1730  /* Initialize span and pagemap. */
1731  init_span(area, span_pointer, pool, start_pointer, npages, size_class);
1732  for (i = 0; i < npages; ++i)
1733  segment_map->pagemap[first_page + i] = span_pointer;
1734 
1735  return true;
1736 }
#define DSA_PAGES_PER_SUPERBLOCK
Definition: dsa.c:106

References alloc_object(), Assert(), DSA_AREA_LOCK, DSA_FULLNESS_CLASSES, dsa_get_address(), DSA_MAKE_POINTER, DSA_PAGES_PER_SUPERBLOCK, DSA_SCLASS_BLOCK_OF_SPANS, DSA_SCLASS_LOCK, dsa_size_classes, DSA_SUPERBLOCK_SIZE, DsaPointerIsValid, elog(), FATAL, dsa_area_span::fclass, dsa_segment_map::fpm, FPM_PAGE_SIZE, FreePageManagerGet(), get_best_segment(), get_segment_index, i, init_span(), InvalidDsaPointer, LW_EXCLUSIVE, LWLockAcquire(), LWLockHeldByMe(), LWLockRelease(), make_new_segment(), dsa_area_span::nallocatable, dsa_area_span::nextspan, dsa_segment_map::pagemap, dsa_area_span::prevspan, dsa_area_pool::spans, and transfer_first_span().

Referenced by alloc_object().

◆ get_best_segment()

static dsa_segment_map * get_best_segment ( dsa_area area,
size_t  npages 
)
static

Definition at line 2000 of file dsa.c.

2001 {
2002  size_t bin;
2003 
2006 
2007  /*
2008  * Start searching from the first bin that *might* have enough contiguous
2009  * pages.
2010  */
2011  for (bin = contiguous_pages_to_segment_bin(npages);
2012  bin < DSA_NUM_SEGMENT_BINS;
2013  ++bin)
2014  {
2015  /*
2016  * The minimum contiguous size that any segment in this bin should
2017  * have. We'll re-bin if we see segments with fewer.
2018  */
2019  size_t threshold = (size_t) 1 << (bin - 1);
2020  dsa_segment_index segment_index;
2021 
2022  /* Search this bin for a segment with enough contiguous space. */
2023  segment_index = area->control->segment_bins[bin];
2024  while (segment_index != DSA_SEGMENT_INDEX_NONE)
2025  {
2026  dsa_segment_map *segment_map;
2027  dsa_segment_index next_segment_index;
2028  size_t contiguous_pages;
2029 
2030  segment_map = get_segment_by_index(area, segment_index);
2031  next_segment_index = segment_map->header->next;
2032  contiguous_pages = fpm_largest(segment_map->fpm);
2033 
2034  /* Not enough for the request, still enough for this bin. */
2035  if (contiguous_pages >= threshold && contiguous_pages < npages)
2036  {
2037  segment_index = next_segment_index;
2038  continue;
2039  }
2040 
2041  /* Re-bin it if it's no longer in the appropriate bin. */
2042  if (contiguous_pages < threshold)
2043  {
2044  rebin_segment(area, segment_map);
2045 
2046  /*
2047  * But fall through to see if it's enough to satisfy this
2048  * request anyway....
2049  */
2050  }
2051 
2052  /* Check if we are done. */
2053  if (contiguous_pages >= npages)
2054  return segment_map;
2055 
2056  /* Continue searching the same bin. */
2057  segment_index = next_segment_index;
2058  }
2059  }
2060 
2061  /* Not found. */
2062  return NULL;
2063 }

References Assert(), check_for_freed_segments_locked(), contiguous_pages_to_segment_bin(), dsa_area::control, DSA_AREA_LOCK, DSA_NUM_SEGMENT_BINS, DSA_SEGMENT_INDEX_NONE, dsa_segment_map::fpm, fpm_largest, get_segment_by_index(), dsa_segment_map::header, LWLockHeldByMe(), dsa_segment_header::next, rebin_segment(), and dsa_area_control::segment_bins.

Referenced by dsa_allocate_extended(), and ensure_active_superblock().

◆ get_segment_by_index()

static dsa_segment_map * get_segment_by_index ( dsa_area area,
dsa_segment_index  index 
)
static

Definition at line 1747 of file dsa.c.

1748 {
1749  if (unlikely(area->segment_maps[index].mapped_address == NULL))
1750  {
1751  dsm_handle handle;
1752  dsm_segment *segment;
1753  dsa_segment_map *segment_map;
1754  ResourceOwner oldowner;
1755 
1756  /*
1757  * If we are reached by dsa_free or dsa_get_address, there must be at
1758  * least one object allocated in the referenced segment. Otherwise,
1759  * their caller has a double-free or access-after-free bug, which we
1760  * have no hope of detecting. So we know it's safe to access this
1761  * array slot without holding a lock; it won't change underneath us.
1762  * Furthermore, we know that we can see the latest contents of the
1763  * slot, as explained in check_for_freed_segments, which those
1764  * functions call before arriving here.
1765  */
1766  handle = area->control->segment_handles[index];
1767 
1768  /* It's an error to try to access an unused slot. */
1769  if (handle == DSM_HANDLE_INVALID)
1770  elog(ERROR,
1771  "dsa_area could not attach to a segment that has been freed");
1772 
1773  oldowner = CurrentResourceOwner;
1775  segment = dsm_attach(handle);
1776  CurrentResourceOwner = oldowner;
1777  if (segment == NULL)
1778  elog(ERROR, "dsa_area could not attach to segment");
1779  segment_map = &area->segment_maps[index];
1780  segment_map->segment = segment;
1781  segment_map->mapped_address = dsm_segment_address(segment);
1782  segment_map->header =
1783  (dsa_segment_header *) segment_map->mapped_address;
1784  segment_map->fpm = (FreePageManager *)
1785  (segment_map->mapped_address +
1786  MAXALIGN(sizeof(dsa_segment_header)));
1787  segment_map->pagemap = (dsa_pointer *)
1788  (segment_map->mapped_address +
1789  MAXALIGN(sizeof(dsa_segment_header)) +
1790  MAXALIGN(sizeof(FreePageManager)));
1791 
1792  /* Remember the highest index this backend has ever mapped. */
1793  if (area->high_segment_index < index)
1794  area->high_segment_index = index;
1795 
1796  Assert(segment_map->header->magic ==
1798  }
1799 
1800  /*
1801  * Callers of dsa_get_address() and dsa_free() don't hold the area lock,
1802  * but it's a bug in the calling code and undefined behavior if the
1803  * address is not live (ie if the segment might possibly have been freed,
1804  * they're trying to use a dangling pointer).
1805  *
1806  * For dsa.c code that holds the area lock to manipulate segment_bins
1807  * lists, it would be a bug if we ever reach a freed segment here. After
1808  * it's marked as freed, the only thing any backend should do with it is
1809  * unmap it, and it should always have done that in
1810  * check_for_freed_segments_locked() before arriving here to resolve an
1811  * index to a segment_map.
1812  *
1813  * Either way we can assert that we aren't returning a freed segment.
1814  */
1815  Assert(!area->segment_maps[index].header->freed);
1816 
1817  return &area->segment_maps[index];
1818 }

References Assert(), dsa_area::control, CurrentResourceOwner, DSA_SEGMENT_HEADER_MAGIC, dsm_attach(), DSM_HANDLE_INVALID, dsm_segment_address(), elog(), ERROR, dsa_segment_map::fpm, dsa_segment_header::freed, dsa_area_control::handle, dsa_segment_map::header, dsa_area::high_segment_index, dsa_segment_header::magic, dsa_segment_map::mapped_address, MAXALIGN, dsa_segment_map::pagemap, dsa_area::resowner, dsa_segment_map::segment, dsa_area_control::segment_handles, dsa_area::segment_maps, and unlikely.

Referenced by destroy_superblock(), dsa_dump(), dsa_free(), dsa_get_address(), get_best_segment(), make_new_segment(), rebin_segment(), and unlink_segment().

◆ init_span()

static void init_span ( dsa_area area,
dsa_pointer  span_pointer,
dsa_area_pool pool,
dsa_pointer  start,
size_t  npages,
uint16  size_class 
)
static

Definition at line 1367 of file dsa.c.

1371 {
1372  dsa_area_span *span = dsa_get_address(area, span_pointer);
1373  size_t obsize = dsa_size_classes[size_class];
1374 
1375  /*
1376  * The per-pool lock must be held because we manipulate the span list for
1377  * this pool.
1378  */
1379  Assert(LWLockHeldByMe(DSA_SCLASS_LOCK(area, size_class)));
1380 
1381  /* Push this span onto the front of the span list for fullness class 1. */
1382  if (DsaPointerIsValid(pool->spans[1]))
1383  {
1384  dsa_area_span *head = (dsa_area_span *)
1385  dsa_get_address(area, pool->spans[1]);
1386 
1387  head->prevspan = span_pointer;
1388  }
1389  span->pool = DsaAreaPoolToDsaPointer(area, pool);
1390  span->nextspan = pool->spans[1];
1391  span->prevspan = InvalidDsaPointer;
1392  pool->spans[1] = span_pointer;
1393 
1394  span->start = start;
1395  span->npages = npages;
1396  span->size_class = size_class;
1397  span->ninitialized = 0;
1398  if (size_class == DSA_SCLASS_BLOCK_OF_SPANS)
1399  {
1400  /*
1401  * A block-of-spans contains its own descriptor, so mark one object as
1402  * initialized and reduce the count of allocatable objects by one.
1403  * Doing this here has the side effect of also reducing nmax by one,
1404  * which is important to make sure we free this object at the correct
1405  * time.
1406  */
1407  span->ninitialized = 1;
1408  span->nallocatable = FPM_PAGE_SIZE / obsize - 1;
1409  }
1410  else if (size_class != DSA_SCLASS_SPAN_LARGE)
1411  span->nallocatable = DSA_SUPERBLOCK_SIZE / obsize;
1413  span->nmax = span->nallocatable;
1414  span->fclass = 1;
1415 }
#define DsaAreaPoolToDsaPointer(area, p)
Definition: dsa.c:342

References Assert(), dsa_get_address(), DSA_SCLASS_BLOCK_OF_SPANS, DSA_SCLASS_LOCK, DSA_SCLASS_SPAN_LARGE, dsa_size_classes, DSA_SPAN_NOTHING_FREE, DSA_SUPERBLOCK_SIZE, DsaAreaPoolToDsaPointer, DsaPointerIsValid, dsa_area_span::fclass, dsa_area_span::firstfree, FPM_PAGE_SIZE, InvalidDsaPointer, LWLockHeldByMe(), dsa_area_span::nallocatable, dsa_area_span::nextspan, dsa_area_span::ninitialized, dsa_area_span::nmax, dsa_area_span::npages, dsa_area_span::pool, dsa_area_span::prevspan, dsa_area_span::size_class, dsa_area_pool::spans, and dsa_area_span::start.

Referenced by dsa_allocate_extended(), and ensure_active_superblock().

◆ make_new_segment()

static dsa_segment_map * make_new_segment ( dsa_area area,
size_t  requested_pages 
)
static

Definition at line 2071 of file dsa.c.

2072 {
2073  dsa_segment_index new_index;
2074  size_t metadata_bytes;
2075  size_t total_size;
2076  size_t total_pages;
2077  size_t usable_pages;
2078  dsa_segment_map *segment_map;
2079  dsm_segment *segment;
2080  ResourceOwner oldowner;
2081 
2083 
2084  /* Find a segment slot that is not in use (linearly for now). */
2085  for (new_index = 1; new_index < DSA_MAX_SEGMENTS; ++new_index)
2086  {
2087  if (area->control->segment_handles[new_index] == DSM_HANDLE_INVALID)
2088  break;
2089  }
2090  if (new_index == DSA_MAX_SEGMENTS)
2091  return NULL;
2092 
2093  /*
2094  * If the total size limit is already exceeded, then we exit early and
2095  * avoid arithmetic wraparound in the unsigned expressions below.
2096  */
2097  if (area->control->total_segment_size >=
2099  return NULL;
2100 
2101  /*
2102  * The size should be at least as big as requested, and at least big
2103  * enough to follow a geometric series that approximately doubles the
2104  * total storage each time we create a new segment. We use geometric
2105  * growth because the underlying DSM system isn't designed for large
2106  * numbers of segments (otherwise we might even consider just using one
2107  * DSM segment for each large allocation and for each superblock, and then
2108  * we wouldn't need to use FreePageManager).
2109  *
2110  * We decide on a total segment size first, so that we produce tidy
2111  * power-of-two sized segments. This is a good property to have if we
2112  * move to huge pages in the future. Then we work back to the number of
2113  * pages we can fit.
2114  */
2116  ((size_t) 1 << (new_index / DSA_NUM_SEGMENTS_AT_EACH_SIZE));
2120  area->control->total_segment_size);
2121 
2122  total_pages = total_size / FPM_PAGE_SIZE;
2123  metadata_bytes =
2124  MAXALIGN(sizeof(dsa_segment_header)) +
2125  MAXALIGN(sizeof(FreePageManager)) +
2126  sizeof(dsa_pointer) * total_pages;
2127 
2128  /* Add padding up to next page boundary. */
2129  if (metadata_bytes % FPM_PAGE_SIZE != 0)
2130  metadata_bytes += FPM_PAGE_SIZE - (metadata_bytes % FPM_PAGE_SIZE);
2131  if (total_size <= metadata_bytes)
2132  return NULL;
2133  usable_pages = (total_size - metadata_bytes) / FPM_PAGE_SIZE;
2134  Assert(metadata_bytes + usable_pages * FPM_PAGE_SIZE <= total_size);
2135 
2136  /* See if that is enough... */
2137  if (requested_pages > usable_pages)
2138  {
2139  /*
2140  * We'll make an odd-sized segment, working forward from the requested
2141  * number of pages.
2142  */
2143  usable_pages = requested_pages;
2144  metadata_bytes =
2145  MAXALIGN(sizeof(dsa_segment_header)) +
2146  MAXALIGN(sizeof(FreePageManager)) +
2147  usable_pages * sizeof(dsa_pointer);
2148 
2149  /* Add padding up to next page boundary. */
2150  if (metadata_bytes % FPM_PAGE_SIZE != 0)
2151  metadata_bytes += FPM_PAGE_SIZE - (metadata_bytes % FPM_PAGE_SIZE);
2152  total_size = metadata_bytes + usable_pages * FPM_PAGE_SIZE;
2153 
2154  /* Is that too large for dsa_pointer's addressing scheme? */
2156  return NULL;
2157 
2158  /* Would that exceed the limit? */
2160  area->control->total_segment_size)
2161  return NULL;
2162  }
2163 
2164  /* Create the segment. */
2165  oldowner = CurrentResourceOwner;
2167  segment = dsm_create(total_size, 0);
2168  CurrentResourceOwner = oldowner;
2169  if (segment == NULL)
2170  return NULL;
2171  dsm_pin_segment(segment);
2172 
2173  /* Store the handle in shared memory to be found by index. */
2174  area->control->segment_handles[new_index] =
2175  dsm_segment_handle(segment);
2176  /* Track the highest segment index in the history of the area. */
2177  if (area->control->high_segment_index < new_index)
2178  area->control->high_segment_index = new_index;
2179  /* Track the highest segment index this backend has ever mapped. */
2180  if (area->high_segment_index < new_index)
2181  area->high_segment_index = new_index;
2182  /* Track total size of all segments. */
2186 
2187  /* Build a segment map for this segment in this backend. */
2188  segment_map = &area->segment_maps[new_index];
2189  segment_map->segment = segment;
2190  segment_map->mapped_address = dsm_segment_address(segment);
2191  segment_map->header = (dsa_segment_header *) segment_map->mapped_address;
2192  segment_map->fpm = (FreePageManager *)
2193  (segment_map->mapped_address +
2194  MAXALIGN(sizeof(dsa_segment_header)));
2195  segment_map->pagemap = (dsa_pointer *)
2196  (segment_map->mapped_address +
2197  MAXALIGN(sizeof(dsa_segment_header)) +
2198  MAXALIGN(sizeof(FreePageManager)));
2199 
2200  /* Set up the free page map. */
2201  FreePageManagerInitialize(segment_map->fpm, segment_map->mapped_address);
2202  FreePageManagerPut(segment_map->fpm, metadata_bytes / FPM_PAGE_SIZE,
2203  usable_pages);
2204 
2205  /* Set up the segment header and put it in the appropriate bin. */
2206  segment_map->header->magic =
2207  DSA_SEGMENT_HEADER_MAGIC ^ area->control->handle ^ new_index;
2208  segment_map->header->usable_pages = usable_pages;
2209  segment_map->header->size = total_size;
2210  segment_map->header->bin = contiguous_pages_to_segment_bin(usable_pages);
2211  segment_map->header->prev = DSA_SEGMENT_INDEX_NONE;
2212  segment_map->header->next =
2213  area->control->segment_bins[segment_map->header->bin];
2214  segment_map->header->freed = false;
2215  area->control->segment_bins[segment_map->header->bin] = new_index;
2216  if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE)
2217  {
2219  get_segment_by_index(area, segment_map->header->next);
2220 
2221  Assert(next->header->bin == segment_map->header->bin);
2222  next->header->prev = new_index;
2223  }
2224 
2225  return segment_map;
2226 }
#define DSA_NUM_SEGMENTS_AT_EACH_SIZE
Definition: dsa.c:79
#define DSA_MAX_SEGMENT_SIZE
Definition: dsa.c:103
int64 total_size
Definition: pg_checksums.c:63

References Assert(), dsa_segment_header::bin, contiguous_pages_to_segment_bin(), dsa_area::control, CurrentResourceOwner, DSA_AREA_LOCK, DSA_INITIAL_SEGMENT_SIZE, DSA_MAX_SEGMENT_SIZE, DSA_MAX_SEGMENTS, DSA_NUM_SEGMENTS_AT_EACH_SIZE, DSA_SEGMENT_HEADER_MAGIC, DSA_SEGMENT_INDEX_NONE, dsm_create(), DSM_HANDLE_INVALID, dsm_pin_segment(), dsm_segment_address(), dsm_segment_handle(), dsa_segment_map::fpm, FPM_PAGE_SIZE, dsa_segment_header::freed, FreePageManagerInitialize(), FreePageManagerPut(), get_segment_by_index(), dsa_area_control::handle, dsa_segment_map::header, dsa_area_control::high_segment_index, dsa_area::high_segment_index, LWLockHeldByMe(), dsa_segment_header::magic, dsa_segment_map::mapped_address, dsa_area_control::max_total_segment_size, MAXALIGN, Min, next, dsa_segment_header::next, dsa_segment_map::pagemap, dsa_segment_header::prev, dsa_area::resowner, dsa_segment_map::segment, dsa_area_control::segment_bins, dsa_area_control::segment_handles, dsa_area::segment_maps, dsa_segment_header::size, dsa_area_control::total_segment_size, total_size, and dsa_segment_header::usable_pages.

Referenced by dsa_allocate_extended(), and ensure_active_superblock().

◆ rebin_segment()

static void rebin_segment ( dsa_area area,
dsa_segment_map segment_map 
)
static

Definition at line 2306 of file dsa.c.

2307 {
2308  size_t new_bin;
2309  dsa_segment_index segment_index;
2310 
2311  new_bin = contiguous_pages_to_segment_bin(fpm_largest(segment_map->fpm));
2312  if (segment_map->header->bin == new_bin)
2313  return;
2314 
2315  /* Remove it from its current bin. */
2316  unlink_segment(area, segment_map);
2317 
2318  /* Push it onto the front of its new bin. */
2319  segment_index = get_segment_index(area, segment_map);
2320  segment_map->header->prev = DSA_SEGMENT_INDEX_NONE;
2321  segment_map->header->next = area->control->segment_bins[new_bin];
2322  segment_map->header->bin = new_bin;
2323  area->control->segment_bins[new_bin] = segment_index;
2324  if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE)
2325  {
2327 
2328  next = get_segment_by_index(area, segment_map->header->next);
2329  Assert(next->header->bin == new_bin);
2330  next->header->prev = segment_index;
2331  }
2332 }

References Assert(), dsa_segment_header::bin, contiguous_pages_to_segment_bin(), dsa_area::control, DSA_SEGMENT_INDEX_NONE, dsa_segment_map::fpm, fpm_largest, get_segment_by_index(), get_segment_index, dsa_segment_map::header, next, dsa_segment_header::next, dsa_segment_header::prev, dsa_area_control::segment_bins, and unlink_segment().

Referenced by destroy_superblock(), dsa_free(), and get_best_segment().

◆ transfer_first_span()

static bool transfer_first_span ( dsa_area area,
dsa_area_pool pool,
int  fromclass,
int  toclass 
)
static

Definition at line 1422 of file dsa.c.

1424 {
1425  dsa_pointer span_pointer;
1426  dsa_area_span *span;
1427  dsa_area_span *nextspan;
1428 
1429  /* Can't do it if source list is empty. */
1430  span_pointer = pool->spans[fromclass];
1431  if (!DsaPointerIsValid(span_pointer))
1432  return false;
1433 
1434  /* Remove span from head of source list. */
1435  span = dsa_get_address(area, span_pointer);
1436  pool->spans[fromclass] = span->nextspan;
1437  if (DsaPointerIsValid(span->nextspan))
1438  {
1439  nextspan = (dsa_area_span *)
1440  dsa_get_address(area, span->nextspan);
1441  nextspan->prevspan = InvalidDsaPointer;
1442  }
1443 
1444  /* Add span to head of target list. */
1445  span->nextspan = pool->spans[toclass];
1446  pool->spans[toclass] = span_pointer;
1447  if (DsaPointerIsValid(span->nextspan))
1448  {
1449  nextspan = (dsa_area_span *)
1450  dsa_get_address(area, span->nextspan);
1451  nextspan->prevspan = span_pointer;
1452  }
1453  span->fclass = toclass;
1454 
1455  return true;
1456 }

References dsa_get_address(), DsaPointerIsValid, dsa_area_span::fclass, InvalidDsaPointer, dsa_area_span::nextspan, dsa_area_span::prevspan, and dsa_area_pool::spans.

Referenced by alloc_object(), and ensure_active_superblock().

◆ unlink_segment()

static void unlink_segment ( dsa_area area,
dsa_segment_map segment_map 
)
static

Definition at line 1968 of file dsa.c.

1969 {
1970  if (segment_map->header->prev != DSA_SEGMENT_INDEX_NONE)
1971  {
1972  dsa_segment_map *prev;
1973 
1974  prev = get_segment_by_index(area, segment_map->header->prev);
1975  prev->header->next = segment_map->header->next;
1976  }
1977  else
1978  {
1979  Assert(area->control->segment_bins[segment_map->header->bin] ==
1980  get_segment_index(area, segment_map));
1981  area->control->segment_bins[segment_map->header->bin] =
1982  segment_map->header->next;
1983  }
1984  if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE)
1985  {
1987 
1988  next = get_segment_by_index(area, segment_map->header->next);
1989  next->header->prev = segment_map->header->prev;
1990  }
1991 }

References Assert(), dsa_segment_header::bin, dsa_area::control, DSA_SEGMENT_INDEX_NONE, get_segment_by_index(), get_segment_index, dsa_segment_map::header, next, dsa_segment_header::next, dsa_segment_header::prev, and dsa_area_control::segment_bins.

Referenced by destroy_superblock(), and rebin_segment().

◆ unlink_span()

static void unlink_span ( dsa_area area,
dsa_area_span span 
)
static

Definition at line 1896 of file dsa.c.

1897 {
1898  if (DsaPointerIsValid(span->nextspan))
1899  {
1900  dsa_area_span *next = dsa_get_address(area, span->nextspan);
1901 
1902  next->prevspan = span->prevspan;
1903  }
1904  if (DsaPointerIsValid(span->prevspan))
1905  {
1906  dsa_area_span *prev = dsa_get_address(area, span->prevspan);
1907 
1908  prev->nextspan = span->nextspan;
1909  }
1910  else
1911  {
1912  dsa_area_pool *pool = dsa_get_address(area, span->pool);
1913 
1914  pool->spans[span->fclass] = span->nextspan;
1915  }
1916 }

References dsa_get_address(), DsaPointerIsValid, dsa_area_span::fclass, next, dsa_area_span::nextspan, dsa_area_span::pool, dsa_area_span::prevspan, and dsa_area_pool::spans.

Referenced by destroy_superblock(), and dsa_free().

Variable Documentation

◆ dsa_size_class_map

const uint8 dsa_size_class_map[]
static
Initial value:
= {
2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 12, 12, 13, 13,
14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 17,
18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19,
20, 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21,
22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25
}

Definition at line 272 of file dsa.c.

Referenced by dsa_allocate_extended().

◆ dsa_size_classes

const uint16 dsa_size_classes[]
static
Initial value:
= {
sizeof(dsa_area_span), 0,
8, 16, 24, 32, 40, 48, 56, 64,
80, 96, 112, 128,
160, 192, 224, 256,
320, 384, 448, 512,
640, 768, 896, 1024,
1280, 1560, 1816, 2048,
2616, 3120, 3640, 4096,
5456, 6552, 7280, 8192
}

Definition at line 249 of file dsa.c.

Referenced by alloc_object(), dsa_allocate_extended(), dsa_dump(), dsa_free(), ensure_active_superblock(), and init_span().