PostgreSQL Source Code  git master
dsm.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * dsm.c
4  * manage dynamic shared memory segments
5  *
6  * This file provides a set of services to make programming with dynamic
7  * shared memory segments more convenient. Unlike the low-level
8  * facilities provided by dsm_impl.h and dsm_impl.c, mappings and segments
9  * created using this module will be cleaned up automatically. Mappings
10  * will be removed when the resource owner under which they were created
11  * is cleaned up, unless dsm_pin_mapping() is used, in which case they
12  * have session lifespan. Segments will be removed when there are no
13  * remaining mappings, or at postmaster shutdown in any case. After a
14  * hard postmaster crash, remaining segments will be removed, if they
15  * still exist, at the next postmaster startup.
16  *
17  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
18  * Portions Copyright (c) 1994, Regents of the University of California
19  *
20  *
21  * IDENTIFICATION
22  * src/backend/storage/ipc/dsm.c
23  *
24  *-------------------------------------------------------------------------
25  */
26 
27 #include "postgres.h"
28 
29 #include <fcntl.h>
30 #include <unistd.h>
31 #ifndef WIN32
32 #include <sys/mman.h>
33 #endif
34 #include <sys/stat.h>
35 
36 #include "lib/ilist.h"
37 #include "miscadmin.h"
38 #include "port/pg_bitutils.h"
39 #include "storage/dsm.h"
40 #include "storage/ipc.h"
41 #include "storage/lwlock.h"
42 #include "storage/pg_shmem.h"
43 #include "utils/freepage.h"
44 #include "utils/guc.h"
45 #include "utils/memutils.h"
46 #include "utils/resowner_private.h"
47 
48 #define PG_DYNSHMEM_CONTROL_MAGIC 0x9a503d32
49 
50 #define PG_DYNSHMEM_FIXED_SLOTS 64
51 #define PG_DYNSHMEM_SLOTS_PER_BACKEND 5
52 
53 #define INVALID_CONTROL_SLOT ((uint32) -1)
54 
55 /* Backend-local tracking for on-detach callbacks. */
57 {
62 
63 /* Backend-local state for a dynamic shared memory segment. */
65 {
66  dlist_node node; /* List link in dsm_segment_list. */
67  ResourceOwner resowner; /* Resource owner. */
68  dsm_handle handle; /* Segment name. */
69  uint32 control_slot; /* Slot in control segment. */
70  void *impl_private; /* Implementation-specific private data. */
71  void *mapped_address; /* Mapping address, or NULL if unmapped. */
72  Size mapped_size; /* Size of our mapping. */
73  slist_head on_detach; /* On-detach callbacks. */
74 };
75 
76 /* Shared-memory state for a dynamic shared memory segment. */
77 typedef struct dsm_control_item
78 {
80  uint32 refcnt; /* 2+ = active, 1 = moribund, 0 = gone */
81  size_t first_page;
82  size_t npages;
83  void *impl_private_pm_handle; /* only needed on Windows */
84  bool pinned;
86 
87 /* Layout of the dynamic shared memory control segment. */
88 typedef struct dsm_control_header
89 {
95 
96 static void dsm_cleanup_for_mmap(void);
97 static void dsm_postmaster_shutdown(int code, Datum arg);
99 static bool dsm_control_segment_sane(dsm_control_header *control,
100  Size mapped_size);
101 static uint64 dsm_control_bytes_needed(uint32 nitems);
102 static inline dsm_handle make_main_region_dsm_handle(int slot);
103 static inline bool is_main_region_dsm_handle(dsm_handle handle);
104 
105 /* Has this backend initialized the dynamic shared memory system yet? */
106 static bool dsm_init_done = false;
107 
108 /* Preallocated DSM space in the main shared memory region. */
109 static void *dsm_main_space_begin = NULL;
110 
111 /*
112  * List of dynamic shared memory segments used by this backend.
113  *
114  * At process exit time, we must decrement the reference count of each
115  * segment we have attached; this list makes it possible to find all such
116  * segments.
117  *
118  * This list should always be empty in the postmaster. We could probably
119  * allow the postmaster to map dynamic shared memory segments before it
120  * begins to start child processes, provided that each process adjusted
121  * the reference counts for those segments in the control segment at
122  * startup time, but there's no obvious need for such a facility, which
123  * would also be complex to handle in the EXEC_BACKEND case. Once the
124  * postmaster has begun spawning children, there's an additional problem:
125  * each new mapping would require an update to the control segment,
126  * which requires locking, in which the postmaster must not be involved.
127  */
128 static dlist_head dsm_segment_list = DLIST_STATIC_INIT(dsm_segment_list);
129 
130 /*
131  * Control segment information.
132  *
133  * Unlike ordinary shared memory segments, the control segment is not
134  * reference counted; instead, it lasts for the postmaster's entire
135  * life cycle. For simplicity, it doesn't have a dsm_segment object either.
136  */
140 static void *dsm_control_impl_private = NULL;
141 
142 /*
143  * Start up the dynamic shared memory system.
144  *
145  * This is called just once during each cluster lifetime, at postmaster
146  * startup time.
147  */
148 void
150 {
151  void *dsm_control_address = NULL;
152  uint32 maxitems;
153  Size segsize;
154 
156 
157  /*
158  * If we're using the mmap implementations, clean up any leftovers.
159  * Cleanup isn't needed on Windows, and happens earlier in startup for
160  * POSIX and System V shared memory, via a direct call to
161  * dsm_cleanup_using_control_segment.
162  */
165 
166  /* Determine size for new control segment. */
167  maxitems = PG_DYNSHMEM_FIXED_SLOTS
169  elog(DEBUG2, "dynamic shared memory system will support %u segments",
170  maxitems);
171  segsize = dsm_control_bytes_needed(maxitems);
172 
173  /*
174  * Loop until we find an unused identifier for the new control segment. We
175  * sometimes use 0 as a sentinel value indicating that no control segment
176  * is known to exist, so avoid using that value for a real control
177  * segment.
178  */
179  for (;;)
180  {
181  Assert(dsm_control_address == NULL);
183  dsm_control_handle = random() << 1; /* Even numbers only */
185  continue;
187  &dsm_control_impl_private, &dsm_control_address,
189  break;
190  }
191  dsm_control = dsm_control_address;
193  elog(DEBUG2,
194  "created dynamic shared memory control segment %u (%zu bytes)",
195  dsm_control_handle, segsize);
197 
198  /* Initialize control segment. */
199  dsm_control->magic = PG_DYNSHMEM_CONTROL_MAGIC;
200  dsm_control->nitems = 0;
201  dsm_control->maxitems = maxitems;
202 }
203 
204 /*
205  * Determine whether the control segment from the previous postmaster
206  * invocation still exists. If so, remove the dynamic shared memory
207  * segments to which it refers, and then the control segment itself.
208  */
209 void
211 {
212  void *mapped_address = NULL;
213  void *junk_mapped_address = NULL;
214  void *impl_private = NULL;
215  void *junk_impl_private = NULL;
216  Size mapped_size = 0;
217  Size junk_mapped_size = 0;
218  uint32 nitems;
219  uint32 i;
220  dsm_control_header *old_control;
221 
222  /*
223  * Try to attach the segment. If this fails, it probably just means that
224  * the operating system has been rebooted and the segment no longer
225  * exists, or an unrelated process has used the same shm ID. So just fall
226  * out quietly.
227  */
228  if (!dsm_impl_op(DSM_OP_ATTACH, old_control_handle, 0, &impl_private,
229  &mapped_address, &mapped_size, DEBUG1))
230  return;
231 
232  /*
233  * We've managed to reattach it, but the contents might not be sane. If
234  * they aren't, we disregard the segment after all.
235  */
236  old_control = (dsm_control_header *) mapped_address;
237  if (!dsm_control_segment_sane(old_control, mapped_size))
238  {
239  dsm_impl_op(DSM_OP_DETACH, old_control_handle, 0, &impl_private,
240  &mapped_address, &mapped_size, LOG);
241  return;
242  }
243 
244  /*
245  * OK, the control segment looks basically valid, so we can use it to get
246  * a list of segments that need to be removed.
247  */
248  nitems = old_control->nitems;
249  for (i = 0; i < nitems; ++i)
250  {
251  dsm_handle handle;
252  uint32 refcnt;
253 
254  /* If the reference count is 0, the slot is actually unused. */
255  refcnt = old_control->item[i].refcnt;
256  if (refcnt == 0)
257  continue;
258 
259  /* If it was using the main shmem area, there is nothing to do. */
260  handle = old_control->item[i].handle;
261  if (is_main_region_dsm_handle(handle))
262  continue;
263 
264  /* Log debugging information. */
265  elog(DEBUG2, "cleaning up orphaned dynamic shared memory with ID %u (reference count %u)",
266  handle, refcnt);
267 
268  /* Destroy the referenced segment. */
269  dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private,
270  &junk_mapped_address, &junk_mapped_size, LOG);
271  }
272 
273  /* Destroy the old control segment, too. */
274  elog(DEBUG2,
275  "cleaning up dynamic shared memory control segment with ID %u",
276  old_control_handle);
277  dsm_impl_op(DSM_OP_DESTROY, old_control_handle, 0, &impl_private,
278  &mapped_address, &mapped_size, LOG);
279 }
280 
281 /*
282  * When we're using the mmap shared memory implementation, "shared memory"
283  * segments might even manage to survive an operating system reboot.
284  * But there's no guarantee as to exactly what will survive: some segments
285  * may survive, and others may not, and the contents of some may be out
286  * of date. In particular, the control segment may be out of date, so we
287  * can't rely on it to figure out what to remove. However, since we know
288  * what directory contains the files we used as shared memory, we can simply
289  * scan the directory and blow everything away that shouldn't be there.
290  */
291 static void
293 {
294  DIR *dir;
295  struct dirent *dent;
296 
297  /* Scan the directory for something with a name of the correct format. */
299 
300  while ((dent = ReadDir(dir, PG_DYNSHMEM_DIR)) != NULL)
301  {
302  if (strncmp(dent->d_name, PG_DYNSHMEM_MMAP_FILE_PREFIX,
303  strlen(PG_DYNSHMEM_MMAP_FILE_PREFIX)) == 0)
304  {
305  char buf[MAXPGPATH + sizeof(PG_DYNSHMEM_DIR)];
306 
307  snprintf(buf, sizeof(buf), PG_DYNSHMEM_DIR "/%s", dent->d_name);
308 
309  elog(DEBUG2, "removing file \"%s\"", buf);
310 
311  /* We found a matching file; so remove it. */
312  if (unlink(buf) != 0)
313  ereport(ERROR,
315  errmsg("could not remove file \"%s\": %m", buf)));
316  }
317  }
318 
319  /* Cleanup complete. */
320  FreeDir(dir);
321 }
322 
323 /*
324  * At shutdown time, we iterate over the control segment and remove all
325  * remaining dynamic shared memory segments. We avoid throwing errors here;
326  * the postmaster is shutting down either way, and this is just non-critical
327  * resource cleanup.
328  */
329 static void
331 {
332  uint32 nitems;
333  uint32 i;
334  void *dsm_control_address;
335  void *junk_mapped_address = NULL;
336  void *junk_impl_private = NULL;
337  Size junk_mapped_size = 0;
339 
340  /*
341  * If some other backend exited uncleanly, it might have corrupted the
342  * control segment while it was dying. In that case, we warn and ignore
343  * the contents of the control segment. This may end up leaving behind
344  * stray shared memory segments, but there's not much we can do about that
345  * if the metadata is gone.
346  */
347  nitems = dsm_control->nitems;
349  {
350  ereport(LOG,
351  (errmsg("dynamic shared memory control segment is corrupt")));
352  return;
353  }
354 
355  /* Remove any remaining segments. */
356  for (i = 0; i < nitems; ++i)
357  {
358  dsm_handle handle;
359 
360  /* If the reference count is 0, the slot is actually unused. */
361  if (dsm_control->item[i].refcnt == 0)
362  continue;
363 
364  handle = dsm_control->item[i].handle;
365  if (is_main_region_dsm_handle(handle))
366  continue;
367 
368  /* Log debugging information. */
369  elog(DEBUG2, "cleaning up orphaned dynamic shared memory with ID %u",
370  handle);
371 
372  /* Destroy the segment. */
373  dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private,
374  &junk_mapped_address, &junk_mapped_size, LOG);
375  }
376 
377  /* Remove the control segment itself. */
378  elog(DEBUG2,
379  "cleaning up dynamic shared memory control segment with ID %u",
381  dsm_control_address = dsm_control;
383  &dsm_control_impl_private, &dsm_control_address,
385  dsm_control = dsm_control_address;
386  shim->dsm_control = 0;
387 }
388 
389 /*
390  * Prepare this backend for dynamic shared memory usage. Under EXEC_BACKEND,
391  * we must reread the state file and map the control segment; in other cases,
392  * we'll have inherited the postmaster's mapping and global variables.
393  */
394 static void
396 {
397 #ifdef EXEC_BACKEND
398  {
399  void *control_address = NULL;
400 
401  /* Attach control segment. */
404  &dsm_control_impl_private, &control_address,
406  dsm_control = control_address;
407  /* If control segment doesn't look sane, something is badly wrong. */
409  {
411  &dsm_control_impl_private, &control_address,
413  ereport(FATAL,
414  (errcode(ERRCODE_INTERNAL_ERROR),
415  errmsg("dynamic shared memory control segment is not valid")));
416  }
417  }
418 #endif
419 
420  dsm_init_done = true;
421 }
422 
423 #ifdef EXEC_BACKEND
424 /*
425  * When running under EXEC_BACKEND, we get a callback here when the main
426  * shared memory segment is re-attached, so that we can record the control
427  * handle retrieved from it.
428  */
429 void
430 dsm_set_control_handle(dsm_handle h)
431 {
432  Assert(dsm_control_handle == 0 && h != 0);
433  dsm_control_handle = h;
434 }
435 #endif
436 
437 /*
438  * Reserve some space in the main shared memory segment for DSM segments.
439  */
440 size_t
442 {
443  return 1024 * 1024 * (size_t) min_dynamic_shared_memory;
444 }
445 
446 /*
447  * Initialize space in the main shared memory segment for DSM segments.
448  */
449 void
451 {
452  size_t size = dsm_estimate_size();
453  bool found;
454 
455  if (size == 0)
456  return;
457 
458  dsm_main_space_begin = ShmemInitStruct("Preallocated DSM", size, &found);
459  if (!found)
460  {
462  size_t first_page = 0;
463  size_t pages;
464 
465  /* Reserve space for the FreePageManager. */
466  while (first_page * FPM_PAGE_SIZE < sizeof(FreePageManager))
467  ++first_page;
468 
469  /* Initialize it and give it all the rest of the space. */
471  pages = (size / FPM_PAGE_SIZE) - first_page;
472  FreePageManagerPut(fpm, first_page, pages);
473  }
474 }
475 
476 /*
477  * Create a new dynamic shared memory segment.
478  *
479  * If there is a non-NULL CurrentResourceOwner, the new segment is associated
480  * with it and must be detached before the resource owner releases, or a
481  * warning will be logged. If CurrentResourceOwner is NULL, the segment
482  * remains attached until explicitly detached or the session ends.
483  * Creating with a NULL CurrentResourceOwner is equivalent to creating
484  * with a non-NULL CurrentResourceOwner and then calling dsm_pin_mapping.
485  */
486 dsm_segment *
487 dsm_create(Size size, int flags)
488 {
489  dsm_segment *seg;
490  uint32 i;
491  uint32 nitems;
492  size_t npages = 0;
493  size_t first_page = 0;
494  FreePageManager *dsm_main_space_fpm = dsm_main_space_begin;
495  bool using_main_dsm_region = false;
496 
497  /* Unsafe in postmaster (and pointless in a stand-alone backend). */
499 
500  if (!dsm_init_done)
502 
503  /* Create a new segment descriptor. */
504  seg = dsm_create_descriptor();
505 
506  /*
507  * Lock the control segment while we try to allocate from the main shared
508  * memory area, if configured.
509  */
510  if (dsm_main_space_fpm)
511  {
512  npages = size / FPM_PAGE_SIZE;
513  if (size % FPM_PAGE_SIZE > 0)
514  ++npages;
515 
516  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
517  if (FreePageManagerGet(dsm_main_space_fpm, npages, &first_page))
518  {
519  /* We can carve out a piece of the main shared memory segment. */
520  seg->mapped_address = (char *) dsm_main_space_begin +
521  first_page * FPM_PAGE_SIZE;
522  seg->mapped_size = npages * FPM_PAGE_SIZE;
523  using_main_dsm_region = true;
524  /* We'll choose a handle below. */
525  }
526  }
527 
528  if (!using_main_dsm_region)
529  {
530  /*
531  * We need to create a new memory segment. Loop until we find an
532  * unused segment identifier.
533  */
534  if (dsm_main_space_fpm)
535  LWLockRelease(DynamicSharedMemoryControlLock);
536  for (;;)
537  {
538  Assert(seg->mapped_address == NULL && seg->mapped_size == 0);
539  seg->handle = random() << 1; /* Even numbers only */
540  if (seg->handle == DSM_HANDLE_INVALID) /* Reserve sentinel */
541  continue;
542  if (dsm_impl_op(DSM_OP_CREATE, seg->handle, size, &seg->impl_private,
543  &seg->mapped_address, &seg->mapped_size, ERROR))
544  break;
545  }
546  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
547  }
548 
549  /* Search the control segment for an unused slot. */
550  nitems = dsm_control->nitems;
551  for (i = 0; i < nitems; ++i)
552  {
553  if (dsm_control->item[i].refcnt == 0)
554  {
555  if (using_main_dsm_region)
556  {
558  dsm_control->item[i].first_page = first_page;
559  dsm_control->item[i].npages = npages;
560  }
561  else
563  dsm_control->item[i].handle = seg->handle;
564  /* refcnt of 1 triggers destruction, so start at 2 */
565  dsm_control->item[i].refcnt = 2;
566  dsm_control->item[i].impl_private_pm_handle = NULL;
567  dsm_control->item[i].pinned = false;
568  seg->control_slot = i;
569  LWLockRelease(DynamicSharedMemoryControlLock);
570  return seg;
571  }
572  }
573 
574  /* Verify that we can support an additional mapping. */
575  if (nitems >= dsm_control->maxitems)
576  {
577  if (using_main_dsm_region)
578  FreePageManagerPut(dsm_main_space_fpm, first_page, npages);
579  LWLockRelease(DynamicSharedMemoryControlLock);
580  if (!using_main_dsm_region)
582  &seg->mapped_address, &seg->mapped_size, WARNING);
583  if (seg->resowner != NULL)
584  ResourceOwnerForgetDSM(seg->resowner, seg);
585  dlist_delete(&seg->node);
586  pfree(seg);
587 
588  if ((flags & DSM_CREATE_NULL_IF_MAXSEGMENTS) != 0)
589  return NULL;
590  ereport(ERROR,
591  (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
592  errmsg("too many dynamic shared memory segments")));
593  }
594 
595  /* Enter the handle into a new array slot. */
596  if (using_main_dsm_region)
597  {
598  seg->handle = make_main_region_dsm_handle(nitems);
599  dsm_control->item[i].first_page = first_page;
600  dsm_control->item[i].npages = npages;
601  }
602  dsm_control->item[nitems].handle = seg->handle;
603  /* refcnt of 1 triggers destruction, so start at 2 */
604  dsm_control->item[nitems].refcnt = 2;
605  dsm_control->item[nitems].impl_private_pm_handle = NULL;
606  dsm_control->item[nitems].pinned = false;
607  seg->control_slot = nitems;
608  dsm_control->nitems++;
609  LWLockRelease(DynamicSharedMemoryControlLock);
610 
611  return seg;
612 }
613 
614 /*
615  * Attach a dynamic shared memory segment.
616  *
617  * See comments for dsm_segment_handle() for an explanation of how this
618  * is intended to be used.
619  *
620  * This function will return NULL if the segment isn't known to the system.
621  * This can happen if we're asked to attach the segment, but then everyone
622  * else detaches it (causing it to be destroyed) before we get around to
623  * attaching it.
624  *
625  * If there is a non-NULL CurrentResourceOwner, the attached segment is
626  * associated with it and must be detached before the resource owner releases,
627  * or a warning will be logged. Otherwise the segment remains attached until
628  * explicitly detached or the session ends. See the note atop dsm_create().
629  */
630 dsm_segment *
632 {
633  dsm_segment *seg;
634  dlist_iter iter;
635  uint32 i;
636  uint32 nitems;
637 
638  /* Unsafe in postmaster (and pointless in a stand-alone backend). */
640 
641  if (!dsm_init_done)
643 
644  /*
645  * Since this is just a debugging cross-check, we could leave it out
646  * altogether, or include it only in assert-enabled builds. But since the
647  * list of attached segments should normally be very short, let's include
648  * it always for right now.
649  *
650  * If you're hitting this error, you probably want to attempt to find an
651  * existing mapping via dsm_find_mapping() before calling dsm_attach() to
652  * create a new one.
653  */
654  dlist_foreach(iter, &dsm_segment_list)
655  {
656  seg = dlist_container(dsm_segment, node, iter.cur);
657  if (seg->handle == h)
658  elog(ERROR, "can't attach the same segment more than once");
659  }
660 
661  /* Create a new segment descriptor. */
662  seg = dsm_create_descriptor();
663  seg->handle = h;
664 
665  /* Bump reference count for this segment in shared memory. */
666  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
667  nitems = dsm_control->nitems;
668  for (i = 0; i < nitems; ++i)
669  {
670  /*
671  * If the reference count is 0, the slot is actually unused. If the
672  * reference count is 1, the slot is still in use, but the segment is
673  * in the process of going away; even if the handle matches, another
674  * slot may already have started using the same handle value by
675  * coincidence so we have to keep searching.
676  */
677  if (dsm_control->item[i].refcnt <= 1)
678  continue;
679 
680  /* If the handle doesn't match, it's not the slot we want. */
681  if (dsm_control->item[i].handle != seg->handle)
682  continue;
683 
684  /* Otherwise we've found a match. */
685  dsm_control->item[i].refcnt++;
686  seg->control_slot = i;
688  {
689  seg->mapped_address = (char *) dsm_main_space_begin +
690  dsm_control->item[i].first_page * FPM_PAGE_SIZE;
691  seg->mapped_size = dsm_control->item[i].npages * FPM_PAGE_SIZE;
692  }
693  break;
694  }
695  LWLockRelease(DynamicSharedMemoryControlLock);
696 
697  /*
698  * If we didn't find the handle we're looking for in the control segment,
699  * it probably means that everyone else who had it mapped, including the
700  * original creator, died before we got to this point. It's up to the
701  * caller to decide what to do about that.
702  */
704  {
705  dsm_detach(seg);
706  return NULL;
707  }
708 
709  /* Here's where we actually try to map the segment. */
712  &seg->mapped_address, &seg->mapped_size, ERROR);
713 
714  return seg;
715 }
716 
717 /*
718  * At backend shutdown time, detach any segments that are still attached.
719  * (This is similar to dsm_detach_all, except that there's no reason to
720  * unmap the control segment before exiting, so we don't bother.)
721  */
722 void
724 {
725  while (!dlist_is_empty(&dsm_segment_list))
726  {
727  dsm_segment *seg;
728 
729  seg = dlist_head_element(dsm_segment, node, &dsm_segment_list);
730  dsm_detach(seg);
731  }
732 }
733 
734 /*
735  * Detach all shared memory segments, including the control segments. This
736  * should be called, along with PGSharedMemoryDetach, in processes that
737  * might inherit mappings but are not intended to be connected to dynamic
738  * shared memory.
739  */
740 void
742 {
743  void *control_address = dsm_control;
744 
745  while (!dlist_is_empty(&dsm_segment_list))
746  {
747  dsm_segment *seg;
748 
749  seg = dlist_head_element(dsm_segment, node, &dsm_segment_list);
750  dsm_detach(seg);
751  }
752 
753  if (control_address != NULL)
755  &dsm_control_impl_private, &control_address,
757 }
758 
759 /*
760  * Detach from a shared memory segment, destroying the segment if we
761  * remove the last reference.
762  *
763  * This function should never fail. It will often be invoked when aborting
764  * a transaction, and a further error won't serve any purpose. It's not a
765  * complete disaster if we fail to unmap or destroy the segment; it means a
766  * resource leak, but that doesn't necessarily preclude further operations.
767  */
768 void
770 {
771  /*
772  * Invoke registered callbacks. Just in case one of those callbacks
773  * throws a further error that brings us back here, pop the callback
774  * before invoking it, to avoid infinite error recursion.
775  */
776  while (!slist_is_empty(&seg->on_detach))
777  {
778  slist_node *node;
780  on_dsm_detach_callback function;
781  Datum arg;
782 
783  node = slist_pop_head_node(&seg->on_detach);
785  function = cb->function;
786  arg = cb->arg;
787  pfree(cb);
788 
789  function(seg, arg);
790  }
791 
792  /*
793  * Try to remove the mapping, if one exists. Normally, there will be, but
794  * maybe not, if we failed partway through a create or attach operation.
795  * We remove the mapping before decrementing the reference count so that
796  * the process that sees a zero reference count can be certain that no
797  * remaining mappings exist. Even if this fails, we pretend that it
798  * works, because retrying is likely to fail in the same way.
799  */
800  if (seg->mapped_address != NULL)
801  {
804  &seg->mapped_address, &seg->mapped_size, WARNING);
805  seg->impl_private = NULL;
806  seg->mapped_address = NULL;
807  seg->mapped_size = 0;
808  }
809 
810  /* Reduce reference count, if we previously increased it. */
812  {
813  uint32 refcnt;
814  uint32 control_slot = seg->control_slot;
815 
816  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
817  Assert(dsm_control->item[control_slot].handle == seg->handle);
818  Assert(dsm_control->item[control_slot].refcnt > 1);
819  refcnt = --dsm_control->item[control_slot].refcnt;
821  LWLockRelease(DynamicSharedMemoryControlLock);
822 
823  /* If new reference count is 1, try to destroy the segment. */
824  if (refcnt == 1)
825  {
826  /* A pinned segment should never reach 1. */
827  Assert(!dsm_control->item[control_slot].pinned);
828 
829  /*
830  * If we fail to destroy the segment here, or are killed before we
831  * finish doing so, the reference count will remain at 1, which
832  * will mean that nobody else can attach to the segment. At
833  * postmaster shutdown time, or when a new postmaster is started
834  * after a hard kill, another attempt will be made to remove the
835  * segment.
836  *
837  * The main case we're worried about here is being killed by a
838  * signal before we can finish removing the segment. In that
839  * case, it's important to be sure that the segment still gets
840  * removed. If we actually fail to remove the segment for some
841  * other reason, the postmaster may not have any better luck than
842  * we did. There's not much we can do about that, though.
843  */
844  if (is_main_region_dsm_handle(seg->handle) ||
846  &seg->mapped_address, &seg->mapped_size, WARNING))
847  {
848  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
851  dsm_control->item[control_slot].first_page,
852  dsm_control->item[control_slot].npages);
853  Assert(dsm_control->item[control_slot].handle == seg->handle);
854  Assert(dsm_control->item[control_slot].refcnt == 1);
855  dsm_control->item[control_slot].refcnt = 0;
856  LWLockRelease(DynamicSharedMemoryControlLock);
857  }
858  }
859  }
860 
861  /* Clean up our remaining backend-private data structures. */
862  if (seg->resowner != NULL)
863  ResourceOwnerForgetDSM(seg->resowner, seg);
864  dlist_delete(&seg->node);
865  pfree(seg);
866 }
867 
868 /*
869  * Keep a dynamic shared memory mapping until end of session.
870  *
871  * By default, mappings are owned by the current resource owner, which
872  * typically means they stick around for the duration of the current query
873  * only.
874  */
875 void
877 {
878  if (seg->resowner != NULL)
879  {
880  ResourceOwnerForgetDSM(seg->resowner, seg);
881  seg->resowner = NULL;
882  }
883 }
884 
885 /*
886  * Arrange to remove a dynamic shared memory mapping at cleanup time.
887  *
888  * dsm_pin_mapping() can be used to preserve a mapping for the entire
889  * lifetime of a process; this function reverses that decision, making
890  * the segment owned by the current resource owner. This may be useful
891  * just before performing some operation that will invalidate the segment
892  * for future use by this backend.
893  */
894 void
896 {
897  Assert(seg->resowner == NULL);
901 }
902 
903 /*
904  * Keep a dynamic shared memory segment until postmaster shutdown, or until
905  * dsm_unpin_segment is called.
906  *
907  * This function should not be called more than once per segment, unless the
908  * segment is explicitly unpinned with dsm_unpin_segment in between calls.
909  *
910  * Note that this function does not arrange for the current process to
911  * keep the segment mapped indefinitely; if that behavior is desired,
912  * dsm_pin_mapping() should be used from each process that needs to
913  * retain the mapping.
914  */
915 void
917 {
918  void *handle;
919 
920  /*
921  * Bump reference count for this segment in shared memory. This will
922  * ensure that even if there is no session which is attached to this
923  * segment, it will remain until postmaster shutdown or an explicit call
924  * to unpin.
925  */
926  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
927  if (dsm_control->item[seg->control_slot].pinned)
928  elog(ERROR, "cannot pin a segment that is already pinned");
929  dsm_impl_pin_segment(seg->handle, seg->impl_private, &handle);
930  dsm_control->item[seg->control_slot].pinned = true;
931  dsm_control->item[seg->control_slot].refcnt++;
932  dsm_control->item[seg->control_slot].impl_private_pm_handle = handle;
933  LWLockRelease(DynamicSharedMemoryControlLock);
934 }
935 
936 /*
937  * Unpin a dynamic shared memory segment that was previously pinned with
938  * dsm_pin_segment. This function should not be called unless dsm_pin_segment
939  * was previously called for this segment.
940  *
941  * The argument is a dsm_handle rather than a dsm_segment in case you want
942  * to unpin a segment to which you haven't attached. This turns out to be
943  * useful if, for example, a reference to one shared memory segment is stored
944  * within another shared memory segment. You might want to unpin the
945  * referenced segment before destroying the referencing segment.
946  */
947 void
949 {
950  uint32 control_slot = INVALID_CONTROL_SLOT;
951  bool destroy = false;
952  uint32 i;
953 
954  /* Find the control slot for the given handle. */
955  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
956  for (i = 0; i < dsm_control->nitems; ++i)
957  {
958  /* Skip unused slots and segments that are concurrently going away. */
959  if (dsm_control->item[i].refcnt <= 1)
960  continue;
961 
962  /* If we've found our handle, we can stop searching. */
963  if (dsm_control->item[i].handle == handle)
964  {
965  control_slot = i;
966  break;
967  }
968  }
969 
970  /*
971  * We should definitely have found the slot, and it should not already be
972  * in the process of going away, because this function should only be
973  * called on a segment which is pinned.
974  */
975  if (control_slot == INVALID_CONTROL_SLOT)
976  elog(ERROR, "cannot unpin unknown segment handle");
977  if (!dsm_control->item[control_slot].pinned)
978  elog(ERROR, "cannot unpin a segment that is not pinned");
979  Assert(dsm_control->item[control_slot].refcnt > 1);
980 
981  /*
982  * Allow implementation-specific code to run. We have to do this before
983  * releasing the lock, because impl_private_pm_handle may get modified by
984  * dsm_impl_unpin_segment.
985  */
986  dsm_impl_unpin_segment(handle,
987  &dsm_control->item[control_slot].impl_private_pm_handle);
988 
989  /* Note that 1 means no references (0 means unused slot). */
990  if (--dsm_control->item[control_slot].refcnt == 1)
991  destroy = true;
992  dsm_control->item[control_slot].pinned = false;
993 
994  /* Now we can release the lock. */
995  LWLockRelease(DynamicSharedMemoryControlLock);
996 
997  /* Clean up resources if that was the last reference. */
998  if (destroy)
999  {
1000  void *junk_impl_private = NULL;
1001  void *junk_mapped_address = NULL;
1002  Size junk_mapped_size = 0;
1003 
1004  /*
1005  * For an explanation of how error handling works in this case, see
1006  * comments in dsm_detach. Note that if we reach this point, the
1007  * current process certainly does not have the segment mapped, because
1008  * if it did, the reference count would have still been greater than 1
1009  * even after releasing the reference count held by the pin. The fact
1010  * that there can't be a dsm_segment for this handle makes it OK to
1011  * pass the mapped size, mapped address, and private data as NULL
1012  * here.
1013  */
1014  if (is_main_region_dsm_handle(handle) ||
1015  dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private,
1016  &junk_mapped_address, &junk_mapped_size, WARNING))
1017  {
1018  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
1019  if (is_main_region_dsm_handle(handle))
1021  dsm_control->item[control_slot].first_page,
1022  dsm_control->item[control_slot].npages);
1023  Assert(dsm_control->item[control_slot].handle == handle);
1024  Assert(dsm_control->item[control_slot].refcnt == 1);
1025  dsm_control->item[control_slot].refcnt = 0;
1026  LWLockRelease(DynamicSharedMemoryControlLock);
1027  }
1028  }
1029 }
1030 
1031 /*
1032  * Find an existing mapping for a shared memory segment, if there is one.
1033  */
1034 dsm_segment *
1036 {
1037  dlist_iter iter;
1038  dsm_segment *seg;
1039 
1040  dlist_foreach(iter, &dsm_segment_list)
1041  {
1042  seg = dlist_container(dsm_segment, node, iter.cur);
1043  if (seg->handle == h)
1044  return seg;
1045  }
1046 
1047  return NULL;
1048 }
1049 
1050 /*
1051  * Get the address at which a dynamic shared memory segment is mapped.
1052  */
1053 void *
1055 {
1056  Assert(seg->mapped_address != NULL);
1057  return seg->mapped_address;
1058 }
1059 
1060 /*
1061  * Get the size of a mapping.
1062  */
1063 Size
1065 {
1066  Assert(seg->mapped_address != NULL);
1067  return seg->mapped_size;
1068 }
1069 
1070 /*
1071  * Get a handle for a mapping.
1072  *
1073  * To establish communication via dynamic shared memory between two backends,
1074  * one of them should first call dsm_create() to establish a new shared
1075  * memory mapping. That process should then call dsm_segment_handle() to
1076  * obtain a handle for the mapping, and pass that handle to the
1077  * coordinating backend via some means (e.g. bgw_main_arg, or via the
1078  * main shared memory segment). The recipient, once in possession of the
1079  * handle, should call dsm_attach().
1080  */
1081 dsm_handle
1083 {
1084  return seg->handle;
1085 }
1086 
1087 /*
1088  * Register an on-detach callback for a dynamic shared memory segment.
1089  */
1090 void
1092 {
1094 
1096  sizeof(dsm_segment_detach_callback));
1097  cb->function = function;
1098  cb->arg = arg;
1099  slist_push_head(&seg->on_detach, &cb->node);
1100 }
1101 
1102 /*
1103  * Unregister an on-detach callback for a dynamic shared memory segment.
1104  */
1105 void
1107  Datum arg)
1108 {
1109  slist_mutable_iter iter;
1110 
1111  slist_foreach_modify(iter, &seg->on_detach)
1112  {
1114 
1116  if (cb->function == function && cb->arg == arg)
1117  {
1118  slist_delete_current(&iter);
1119  pfree(cb);
1120  break;
1121  }
1122  }
1123 }
1124 
1125 /*
1126  * Discard all registered on-detach callbacks without executing them.
1127  */
1128 void
1130 {
1131  dlist_iter iter;
1132 
1133  dlist_foreach(iter, &dsm_segment_list)
1134  {
1136 
1137  /* Throw away explicit on-detach actions one by one. */
1138  while (!slist_is_empty(&seg->on_detach))
1139  {
1140  slist_node *node;
1142 
1143  node = slist_pop_head_node(&seg->on_detach);
1144  cb = slist_container(dsm_segment_detach_callback, node, node);
1145  pfree(cb);
1146  }
1147 
1148  /*
1149  * Decrementing the reference count is a sort of implicit on-detach
1150  * action; make sure we don't do that, either.
1151  */
1153  }
1154 }
1155 
1156 /*
1157  * Create a segment descriptor.
1158  */
1159 static dsm_segment *
1161 {
1162  dsm_segment *seg;
1163 
1166 
1168  dlist_push_head(&dsm_segment_list, &seg->node);
1169 
1170  /* seg->handle must be initialized by the caller */
1172  seg->impl_private = NULL;
1173  seg->mapped_address = NULL;
1174  seg->mapped_size = 0;
1175 
1179 
1180  slist_init(&seg->on_detach);
1181 
1182  return seg;
1183 }
1184 
1185 /*
1186  * Sanity check a control segment.
1187  *
1188  * The goal here isn't to detect everything that could possibly be wrong with
1189  * the control segment; there's not enough information for that. Rather, the
1190  * goal is to make sure that someone can iterate over the items in the segment
1191  * without overrunning the end of the mapping and crashing. We also check
1192  * the magic number since, if that's messed up, this may not even be one of
1193  * our segments at all.
1194  */
1195 static bool
1197 {
1198  if (mapped_size < offsetof(dsm_control_header, item))
1199  return false; /* Mapped size too short to read header. */
1200  if (control->magic != PG_DYNSHMEM_CONTROL_MAGIC)
1201  return false; /* Magic number doesn't match. */
1202  if (dsm_control_bytes_needed(control->maxitems) > mapped_size)
1203  return false; /* Max item count won't fit in map. */
1204  if (control->nitems > control->maxitems)
1205  return false; /* Overfull. */
1206  return true;
1207 }
1208 
1209 /*
1210  * Compute the number of control-segment bytes needed to store a given
1211  * number of items.
1212  */
1213 static uint64
1215 {
1216  return offsetof(dsm_control_header, item)
1217  + sizeof(dsm_control_item) * (uint64) nitems;
1218 }
1219 
1220 static inline dsm_handle
1222 {
1223  dsm_handle handle;
1224 
1225  /*
1226  * We need to create a handle that doesn't collide with any existing extra
1227  * segment created by dsm_impl_op(), so we'll make it odd. It also
1228  * mustn't collide with any other main area pseudo-segment, so we'll
1229  * include the slot number in some of the bits. We also want to make an
1230  * effort to avoid newly created and recently destroyed handles from being
1231  * confused, so we'll make the rest of the bits random.
1232  */
1233  handle = 1;
1234  handle |= slot << 1;
1235  handle |= random() << (pg_leftmost_one_pos32(dsm_control->maxitems) + 1);
1236  return handle;
1237 }
1238 
1239 static inline bool
1241 {
1242  return handle & 1;
1243 }
static void * dsm_control_impl_private
Definition: dsm.c:140
#define DSM_IMPL_MMAP
Definition: dsm_impl.h:20
void dsm_postmaster_startup(PGShmemHeader *shim)
Definition: dsm.c:149
dlist_node node
Definition: dsm.c:66
void ResourceOwnerRememberDSM(ResourceOwner owner, dsm_segment *seg)
Definition: resowner.c:1313
dsm_segment * dsm_find_mapping(dsm_handle h)
Definition: dsm.c:1035
#define DEBUG1
Definition: elog.h:25
void dsm_impl_unpin_segment(dsm_handle handle, void **impl_private)
Definition: dsm_impl.c:1005
void reset_on_dsm_detach(void)
Definition: dsm.c:1129
void dsm_shmem_init(void)
Definition: dsm.c:450
uint32 maxitems
Definition: dsm.c:92
static void * dsm_main_space_begin
Definition: dsm.c:109
#define PG_DYNSHMEM_SLOTS_PER_BACKEND
Definition: dsm.c:51
uint32 dsm_handle
Definition: dsm_impl.h:55
#define PG_DYNSHMEM_DIR
Definition: dsm_impl.h:51
#define PG_DYNSHMEM_FIXED_SLOTS
Definition: dsm.c:50
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
Size mapped_size
Definition: dsm.c:72
void dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
Definition: dsm.c:210
dsm_handle dsm_control
Definition: pg_shmem.h:36
#define PointerGetDatum(X)
Definition: postgres.h:556
dsm_handle handle
Definition: dsm.c:79
long random(void)
Definition: random.c:22
#define dlist_foreach(iter, lhead)
Definition: ilist.h:507
ResourceOwner CurrentResourceOwner
Definition: resowner.c:142
struct dsm_control_header dsm_control_header
slist_node * cur
Definition: ilist.h:241
dsm_segment * dsm_attach(dsm_handle h)
Definition: dsm.c:631
static dsm_handle dsm_control_handle
Definition: dsm.c:137
static void slist_push_head(slist_head *head, slist_node *node)
Definition: ilist.h:574
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:283
dsm_handle dsm_segment_handle(dsm_segment *seg)
Definition: dsm.c:1082
int errcode(int sqlerrcode)
Definition: elog.c:610
static int pg_leftmost_one_pos32(uint32 word)
Definition: pg_bitutils.h:32
static void dsm_backend_startup(void)
Definition: dsm.c:395
static dlist_head dsm_segment_list
Definition: dsm.c:128
#define LOG
Definition: elog.h:26
static dsm_handle make_main_region_dsm_handle(int slot)
Definition: dsm.c:1221
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1091
static uint64 dsm_control_bytes_needed(uint32 nitems)
Definition: dsm.c:1214
Definition: dirent.h:9
size_t first_page
Definition: dsm.c:81
uint32 nitems
Definition: dsm.c:91
bool pinned
Definition: dsm.c:84
void FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages)
Definition: freepage.c:379
#define slist_foreach_modify(iter, lhead)
Definition: ilist.h:716
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
void dsm_pin_segment(dsm_segment *seg)
Definition: dsm.c:916
#define DSM_HANDLE_INVALID
Definition: dsm.h:23
bool FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page)
Definition: freepage.c:210
#define dlist_container(type, membername, ptr)
Definition: ilist.h:477
void pfree(void *pointer)
Definition: mcxt.c:1057
static void slist_init(slist_head *head)
Definition: ilist.h:554
Definition: dirent.c:25
#define ERROR
Definition: elog.h:43
uint32 magic
Definition: dsm.c:90
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:392
#define FATAL
Definition: elog.h:52
#define MAXPGPATH
int MaxBackends
Definition: globals.c:136
on_dsm_detach_callback function
Definition: dsm.c:58
#define DEBUG2
Definition: elog.h:24
void dsm_pin_mapping(dsm_segment *seg)
Definition: dsm.c:876
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
size_t dsm_estimate_size(void)
Definition: dsm.c:441
static char * buf
Definition: pg_test_fsync.c:68
#define DSM_CREATE_NULL_IF_MAXSEGMENTS
Definition: dsm.h:20
void ResourceOwnerEnlargeDSMs(ResourceOwner owner)
Definition: resowner.c:1302
bool IsUnderPostmaster
Definition: globals.c:109
dsm_control_item item[FLEXIBLE_ARRAY_MEMBER]
Definition: dsm.c:93
static bool is_main_region_dsm_handle(dsm_handle handle)
Definition: dsm.c:1240
int errcode_for_file_access(void)
Definition: elog.c:633
static void dsm_postmaster_shutdown(int code, Datum arg)
Definition: dsm.c:330
int dynamic_shared_memory_type
Definition: dsm_impl.c:114
unsigned int uint32
Definition: c.h:374
DIR * AllocateDir(const char *dirname)
Definition: fd.c:2583
void dsm_unpin_segment(dsm_handle handle)
Definition: dsm.c:948
void * mapped_address
Definition: dsm.c:71
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
static bool dsm_control_segment_sane(dsm_control_header *control, Size mapped_size)
Definition: dsm.c:1196
MemoryContext TopMemoryContext
Definition: mcxt.c:44
void dsm_backend_shutdown(void)
Definition: dsm.c:723
static slist_node * slist_pop_head_node(slist_head *head)
Definition: ilist.h:596
slist_head on_detach
Definition: dsm.c:73
static bool slist_is_empty(slist_head *head)
Definition: ilist.h:563
#define DLIST_STATIC_INIT(name)
Definition: ilist.h:248
#define WARNING
Definition: elog.h:40
dsm_handle handle
Definition: dsm.c:68
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:487
#define slist_container(type, membername, ptr)
Definition: ilist.h:674
void FreePageManagerInitialize(FreePageManager *fpm, char *base)
Definition: freepage.c:183
uintptr_t Datum
Definition: postgres.h:367
dsm_segment * dsm_create(Size size, int flags)
Definition: dsm.c:487
static void dsm_cleanup_for_mmap(void)
Definition: dsm.c:292
dlist_node * cur
Definition: ilist.h:161
void dsm_unpin_mapping(dsm_segment *seg)
Definition: dsm.c:895
#define ereport(elevel,...)
Definition: elog.h:144
static dsm_control_header * dsm_control
Definition: dsm.c:138
#define FPM_PAGE_SIZE
Definition: freepage.h:30
ResourceOwner resowner
Definition: dsm.c:67
void * dsm_segment_address(dsm_segment *seg)
Definition: dsm.c:1054
#define Assert(condition)
Definition: c.h:745
struct dirent * ReadDir(DIR *dir, const char *dirname)
Definition: fd.c:2649
void * impl_private
Definition: dsm.c:70
size_t npages
Definition: dsm.c:82
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
void ResourceOwnerForgetDSM(ResourceOwner owner, dsm_segment *seg)
Definition: resowner.c:1322
size_t Size
Definition: c.h:473
uint32 control_slot
Definition: dsm.c:69
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208
void dsm_detach_all(void)
Definition: dsm.c:741
#define DatumGetPointer(X)
Definition: postgres.h:549
static dsm_segment * dsm_create_descriptor(void)
Definition: dsm.c:1160
uint32 refcnt
Definition: dsm.c:80
static bool dsm_init_done
Definition: dsm.c:106
void dsm_impl_pin_segment(dsm_handle handle, void *impl_private, void **impl_private_pm_handle)
Definition: dsm_impl.c:955
void dsm_detach(dsm_segment *seg)
Definition: dsm.c:769
int errmsg(const char *fmt,...)
Definition: elog.c:824
struct dsm_control_item dsm_control_item
static Size dsm_control_mapped_size
Definition: dsm.c:139
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:797
struct dsm_segment_detach_callback dsm_segment_detach_callback
#define elog(elevel,...)
Definition: elog.h:214
#define PG_DYNSHMEM_MMAP_FILE_PREFIX
Definition: dsm_impl.h:52
int i
void * impl_private_pm_handle
Definition: dsm.c:83
static void slist_delete_current(slist_mutable_iter *iter)
Definition: ilist.h:652
char d_name[MAX_PATH]
Definition: dirent.h:15
#define INVALID_CONTROL_SLOT
Definition: dsm.c:53
void(* on_dsm_detach_callback)(dsm_segment *, Datum arg)
Definition: dsm.h:57
bool dsm_impl_op(dsm_op op, dsm_handle handle, Size request_size, void **impl_private, void **mapped_address, Size *mapped_size, int elevel)
Definition: dsm_impl.c:161
void cancel_on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1106
Size dsm_segment_map_length(dsm_segment *seg)
Definition: dsm.c:1064
#define PG_DYNSHMEM_CONTROL_MAGIC
Definition: dsm.c:48
int min_dynamic_shared_memory
Definition: dsm_impl.c:117
#define snprintf
Definition: port.h:193
int FreeDir(DIR *dir)
Definition: fd.c:2701
#define offsetof(type, field)
Definition: c.h:668