PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
pgstat_shmem.c
Go to the documentation of this file.
1/* -------------------------------------------------------------------------
2 *
3 * pgstat_shmem.c
4 * Storage of stats entries in shared memory
5 *
6 * Copyright (c) 2001-2025, PostgreSQL Global Development Group
7 *
8 * IDENTIFICATION
9 * src/backend/utils/activity/pgstat_shmem.c
10 * -------------------------------------------------------------------------
11 */
12
13#include "postgres.h"
14
15#include "pgstat.h"
16#include "storage/shmem.h"
17#include "utils/memutils.h"
19
20
21#define PGSTAT_ENTRY_REF_HASH_SIZE 128
22
23/* hash table entry for finding the PgStat_EntryRef for a key */
25{
26 PgStat_HashKey key; /* hash key */
27 char status; /* for simplehash use */
30
31
32/* for references to shared statistics entries */
33#define SH_PREFIX pgstat_entry_ref_hash
34#define SH_ELEMENT_TYPE PgStat_EntryRefHashEntry
35#define SH_KEY_TYPE PgStat_HashKey
36#define SH_KEY key
37#define SH_HASH_KEY(tb, key) \
38 pgstat_hash_hash_key(&key, sizeof(PgStat_HashKey), NULL)
39#define SH_EQUAL(tb, a, b) \
40 pgstat_cmp_hash_key(&a, &b, sizeof(PgStat_HashKey), NULL) == 0
41#define SH_SCOPE static inline
42#define SH_DEFINE
43#define SH_DECLARE
44#include "lib/simplehash.h"
45
46
47static void pgstat_drop_database_and_contents(Oid dboid);
48
50
51static void pgstat_release_entry_ref(PgStat_HashKey key, PgStat_EntryRef *entry_ref, bool discard_pending);
52static bool pgstat_need_entry_refs_gc(void);
53static void pgstat_gc_entry_refs(void);
54static void pgstat_release_all_entry_refs(bool discard_pending);
56static void pgstat_release_matching_entry_refs(bool discard_pending, ReleaseMatchCB match, Datum match_data);
57
58static void pgstat_setup_memcxt(void);
59
60
61/* parameter for the shared hash */
63 sizeof(PgStat_HashKey),
69};
70
71
72/*
73 * Backend local references to shared stats entries. If there are pending
74 * updates to a stats entry, the PgStat_EntryRef is added to the pgStatPending
75 * list.
76 *
77 * When a stats entry is dropped each backend needs to release its reference
78 * to it before the memory can be released. To trigger that
79 * pgStatLocal.shmem->gc_request_count is incremented - which each backend
80 * compares to their copy of pgStatSharedRefAge on a regular basis.
81 */
82static pgstat_entry_ref_hash_hash *pgStatEntryRefHash = NULL;
83static int pgStatSharedRefAge = 0; /* cache age of pgStatLocal.shmem */
84
85/*
86 * Memory contexts containing the pgStatEntryRefHash table and the
87 * pgStatSharedRef entries respectively. Kept separate to make it easier to
88 * track / attribute memory usage.
89 */
92
93
94/* ------------------------------------------------------------
95 * Public functions called from postmaster follow
96 * ------------------------------------------------------------
97 */
98
99/*
100 * The size of the shared memory allocation for stats stored in the shared
101 * stats hash table. This allocation will be done as part of the main shared
102 * memory, rather than dynamic shared memory, allowing it to be initialized in
103 * postmaster.
104 */
105static Size
107{
108 Size sz;
109
110 /*
111 * The dshash header / initial buckets array needs to fit into "plain"
112 * shared memory, but it's beneficial to not need dsm segments
113 * immediately. A size of 256kB seems works well and is not
114 * disproportional compared to other constant sized shared memory
115 * allocations. NB: To avoid DSMs further, the user can configure
116 * min_dynamic_shared_memory.
117 */
118 sz = 256 * 1024;
119 Assert(dsa_minimum_size() <= sz);
120 return MAXALIGN(sz);
121}
122
123/*
124 * Compute shared memory space needed for cumulative statistics
125 */
126Size
128{
129 Size sz;
130
131 sz = MAXALIGN(sizeof(PgStat_ShmemControl));
132 sz = add_size(sz, pgstat_dsa_init_size());
133
134 /* Add shared memory for all the custom fixed-numbered statistics */
135 for (PgStat_Kind kind = PGSTAT_KIND_CUSTOM_MIN; kind <= PGSTAT_KIND_CUSTOM_MAX; kind++)
136 {
137 const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
138
139 if (!kind_info)
140 continue;
141 if (!kind_info->fixed_amount)
142 continue;
143
144 Assert(kind_info->shared_size != 0);
145
146 sz += MAXALIGN(kind_info->shared_size);
147 }
148
149 return sz;
150}
151
152/*
153 * Initialize cumulative statistics system during startup
154 */
155void
157{
158 bool found;
159 Size sz;
160
161 sz = StatsShmemSize();
163 ShmemInitStruct("Shared Memory Stats", sz, &found);
164
166 {
167 dsa_area *dsa;
168 dshash_table *dsh;
170 char *p = (char *) ctl;
171
172 Assert(!found);
173
174 /* the allocation of pgStatLocal.shmem itself */
175 p += MAXALIGN(sizeof(PgStat_ShmemControl));
176
177 /*
178 * Create a small dsa allocation in plain shared memory. This is
179 * required because postmaster cannot use dsm segments. It also
180 * provides a small efficiency win.
181 */
182 ctl->raw_dsa_area = p;
184 dsa = dsa_create_in_place(ctl->raw_dsa_area,
187 dsa_pin(dsa);
188
189 /*
190 * To ensure dshash is created in "plain" shared memory, temporarily
191 * limit size of dsa to the initial size of the dsa.
192 */
194
195 /*
196 * With the limit in place, create the dshash table. XXX: It'd be nice
197 * if there were dshash_create_in_place().
198 */
199 dsh = dshash_create(dsa, &dsh_params, NULL);
200 ctl->hash_handle = dshash_get_hash_table_handle(dsh);
201
202 /* lift limit set above */
203 dsa_set_size_limit(dsa, -1);
204
205 /*
206 * Postmaster will never access these again, thus free the local
207 * dsa/dshash references.
208 */
209 dshash_detach(dsh);
210 dsa_detach(dsa);
211
212 pg_atomic_init_u64(&ctl->gc_request_count, 1);
213
214 /* initialize fixed-numbered stats */
215 for (PgStat_Kind kind = PGSTAT_KIND_MIN; kind <= PGSTAT_KIND_MAX; kind++)
216 {
217 const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
218 char *ptr;
219
220 if (!kind_info || !kind_info->fixed_amount)
221 continue;
222
223 if (pgstat_is_kind_builtin(kind))
224 ptr = ((char *) ctl) + kind_info->shared_ctl_off;
225 else
226 {
227 int idx = kind - PGSTAT_KIND_CUSTOM_MIN;
228
229 Assert(kind_info->shared_size != 0);
230 ctl->custom_data[idx] = ShmemAlloc(kind_info->shared_size);
231 ptr = ctl->custom_data[idx];
232 }
233
234 kind_info->init_shmem_cb(ptr);
235 }
236 }
237 else
238 {
239 Assert(found);
240 }
241}
242
243void
245{
246 MemoryContext oldcontext;
247
248 Assert(pgStatLocal.dsa == NULL);
249
250 /* stats shared memory persists for the backend lifetime */
252
254 NULL);
256
259
260 MemoryContextSwitchTo(oldcontext);
261}
262
263void
265{
267
268 /* we shouldn't leave references to shared stats */
270
273
275
276 /*
277 * dsa_detach() does not decrement the DSA reference count as no segment
278 * was provided to dsa_attach_in_place(), causing no cleanup callbacks to
279 * be registered. Hence, release it manually now.
280 */
282
283 pgStatLocal.dsa = NULL;
284}
285
286
287/* ------------------------------------------------------------
288 * Maintenance of shared memory stats entries
289 * ------------------------------------------------------------
290 */
291
294 PgStatShared_HashEntry *shhashent)
295{
296 /* Create new stats entry. */
297 dsa_pointer chunk;
298 PgStatShared_Common *shheader;
299
300 /*
301 * Initialize refcount to 1, marking it as valid / not dropped. The entry
302 * can't be freed before the initialization because it can't be found as
303 * long as we hold the dshash partition lock. Caller needs to increase
304 * further if a longer lived reference is needed.
305 */
306 pg_atomic_init_u32(&shhashent->refcount, 1);
307
308 /*
309 * Initialize "generation" to 0, as freshly created.
310 */
311 pg_atomic_init_u32(&shhashent->generation, 0);
312 shhashent->dropped = false;
313
314 chunk = dsa_allocate0(pgStatLocal.dsa, pgstat_get_kind_info(kind)->shared_size);
315 shheader = dsa_get_address(pgStatLocal.dsa, chunk);
316 shheader->magic = 0xdeadbeef;
317
318 /* Link the new entry from the hash entry. */
319 shhashent->body = chunk;
320
322
323 return shheader;
324}
325
326static PgStatShared_Common *
328{
329 PgStatShared_Common *shheader;
330
331 shheader = dsa_get_address(pgStatLocal.dsa, shhashent->body);
332
333 /* mark as not dropped anymore */
334 pg_atomic_fetch_add_u32(&shhashent->refcount, 1);
335
336 /*
337 * Increment "generation", to let any backend with local references know
338 * that what they point to is outdated.
339 */
340 pg_atomic_fetch_add_u32(&shhashent->generation, 1);
341 shhashent->dropped = false;
342
343 /* reinitialize content */
344 Assert(shheader->magic == 0xdeadbeef);
345 memset(pgstat_get_entry_data(kind, shheader), 0,
347
348 return shheader;
349}
350
351static void
353{
354 if (likely(pgStatEntryRefHash != NULL))
355 return;
356
358 pgstat_entry_ref_hash_create(pgStatEntryRefHashContext,
362}
363
364/*
365 * Helper function for pgstat_get_entry_ref().
366 */
367static void
369 PgStatShared_HashEntry *shhashent,
370 PgStatShared_Common *shheader)
371{
372 Assert(shheader->magic == 0xdeadbeef);
373 Assert(pg_atomic_read_u32(&shhashent->refcount) > 0);
374
375 pg_atomic_fetch_add_u32(&shhashent->refcount, 1);
376
378
379 entry_ref->shared_stats = shheader;
380 entry_ref->shared_entry = shhashent;
381 entry_ref->generation = pg_atomic_read_u32(&shhashent->generation);
382}
383
384/*
385 * Helper function for pgstat_get_entry_ref().
386 */
387static bool
389{
390 bool found;
391 PgStat_EntryRefHashEntry *cache_entry;
392
393 /*
394 * We immediately insert a cache entry, because it avoids 1) multiple
395 * hashtable lookups in case of a cache miss 2) having to deal with
396 * out-of-memory errors after incrementing PgStatShared_Common->refcount.
397 */
398
399 cache_entry = pgstat_entry_ref_hash_insert(pgStatEntryRefHash, key, &found);
400
401 if (!found || !cache_entry->entry_ref)
402 {
403 PgStat_EntryRef *entry_ref;
404
405 cache_entry->entry_ref = entry_ref =
407 sizeof(PgStat_EntryRef));
408 entry_ref->shared_stats = NULL;
409 entry_ref->shared_entry = NULL;
410 entry_ref->pending = NULL;
411
412 found = false;
413 }
414 else if (cache_entry->entry_ref->shared_stats == NULL)
415 {
416 Assert(cache_entry->entry_ref->pending == NULL);
417 found = false;
418 }
419 else
420 {
422
423 entry_ref = cache_entry->entry_ref;
424 Assert(entry_ref->shared_entry != NULL);
425 Assert(entry_ref->shared_stats != NULL);
426
427 Assert(entry_ref->shared_stats->magic == 0xdeadbeef);
428 /* should have at least our reference */
429 Assert(pg_atomic_read_u32(&entry_ref->shared_entry->refcount) > 0);
430 }
431
432 *entry_ref_p = cache_entry->entry_ref;
433 return found;
434}
435
436/*
437 * Get a shared stats reference. If create is true, the shared stats object is
438 * created if it does not exist.
439 *
440 * When create is true, and created_entry is non-NULL, it'll be set to true
441 * if the entry is newly created, false otherwise.
442 */
444pgstat_get_entry_ref(PgStat_Kind kind, Oid dboid, uint64 objid, bool create,
445 bool *created_entry)
446{
448 PgStatShared_HashEntry *shhashent;
449 PgStatShared_Common *shheader = NULL;
450 PgStat_EntryRef *entry_ref;
451
452 /* clear padding */
453 memset(&key, 0, sizeof(struct PgStat_HashKey));
454
455 key.kind = kind;
456 key.dboid = dboid;
457 key.objid = objid;
458
459 /*
460 * passing in created_entry only makes sense if we possibly could create
461 * entry.
462 */
463 Assert(create || created_entry == NULL);
467
470
471 if (created_entry != NULL)
472 *created_entry = false;
473
474 /*
475 * Check if other backends dropped stats that could not be deleted because
476 * somebody held references to it. If so, check this backend's references.
477 * This is not expected to happen often. The location of the check is a
478 * bit random, but this is a relatively frequently called path, so better
479 * than most.
480 */
483
484 /*
485 * First check the lookup cache hashtable in local memory. If we find a
486 * match here we can avoid taking locks / causing contention.
487 */
488 if (pgstat_get_entry_ref_cached(key, &entry_ref))
489 return entry_ref;
490
491 Assert(entry_ref != NULL);
492
493 /*
494 * Do a lookup in the hash table first - it's quite likely that the entry
495 * already exists, and that way we only need a shared lock.
496 */
497 shhashent = dshash_find(pgStatLocal.shared_hash, &key, false);
498
499 if (create && !shhashent)
500 {
501 bool shfound;
502
503 /*
504 * It's possible that somebody created the entry since the above
505 * lookup. If so, fall through to the same path as if we'd have if it
506 * already had been created before the dshash_find() calls.
507 */
508 shhashent = dshash_find_or_insert(pgStatLocal.shared_hash, &key, &shfound);
509 if (!shfound)
510 {
511 shheader = pgstat_init_entry(kind, shhashent);
512 pgstat_acquire_entry_ref(entry_ref, shhashent, shheader);
513
514 if (created_entry != NULL)
515 *created_entry = true;
516
517 return entry_ref;
518 }
519 }
520
521 if (!shhashent)
522 {
523 /*
524 * If we're not creating, delete the reference again. In all
525 * likelihood it's just a stats lookup - no point wasting memory for a
526 * shared ref to nothing...
527 */
528 pgstat_release_entry_ref(key, entry_ref, false);
529
530 return NULL;
531 }
532 else
533 {
534 /*
535 * Can get here either because dshash_find() found a match, or if
536 * dshash_find_or_insert() found a concurrently inserted entry.
537 */
538
539 if (shhashent->dropped && create)
540 {
541 /*
542 * There are legitimate cases where the old stats entry might not
543 * yet have been dropped by the time it's reused. The most obvious
544 * case are replication slot stats, where a new slot can be
545 * created with the same index just after dropping. But oid
546 * wraparound can lead to other cases as well. We just reset the
547 * stats to their plain state, while incrementing its "generation"
548 * in the shared entry for any remaining local references.
549 */
550 shheader = pgstat_reinit_entry(kind, shhashent);
551 pgstat_acquire_entry_ref(entry_ref, shhashent, shheader);
552
553 if (created_entry != NULL)
554 *created_entry = true;
555
556 return entry_ref;
557 }
558 else if (shhashent->dropped)
559 {
561 pgstat_release_entry_ref(key, entry_ref, false);
562
563 return NULL;
564 }
565 else
566 {
567 shheader = dsa_get_address(pgStatLocal.dsa, shhashent->body);
568 pgstat_acquire_entry_ref(entry_ref, shhashent, shheader);
569
570 return entry_ref;
571 }
572 }
573}
574
575static void
577 bool discard_pending)
578{
579 if (entry_ref && entry_ref->pending)
580 {
581 if (discard_pending)
583 else
584 elog(ERROR, "releasing ref with pending data");
585 }
586
587 if (entry_ref && entry_ref->shared_stats)
588 {
589 Assert(entry_ref->shared_stats->magic == 0xdeadbeef);
590 Assert(entry_ref->pending == NULL);
591
592 /*
593 * This can't race with another backend looking up the stats entry and
594 * increasing the refcount because it is not "legal" to create
595 * additional references to dropped entries.
596 */
597 if (pg_atomic_fetch_sub_u32(&entry_ref->shared_entry->refcount, 1) == 1)
598 {
600
601 /*
602 * We're the last referrer to this entry, try to drop the shared
603 * entry.
604 */
605
606 /* only dropped entries can reach a 0 refcount */
607 Assert(entry_ref->shared_entry->dropped);
608
610 &entry_ref->shared_entry->key,
611 true);
612 if (!shent)
613 elog(ERROR, "could not find just referenced shared stats entry");
614
615 /*
616 * This entry may have been reinitialized while trying to release
617 * it, so double-check that it has not been reused while holding a
618 * lock on its shared entry.
619 */
620 if (pg_atomic_read_u32(&entry_ref->shared_entry->generation) ==
621 entry_ref->generation)
622 {
623 /* Same "generation", so we're OK with the removal */
625 Assert(entry_ref->shared_entry == shent);
626 pgstat_free_entry(shent, NULL);
627 }
628 else
629 {
630 /*
631 * Shared stats entry has been reinitialized, so do not drop
632 * its shared entry, only release its lock.
633 */
635 }
636 }
637 }
638
639 if (!pgstat_entry_ref_hash_delete(pgStatEntryRefHash, key))
640 elog(ERROR, "entry ref vanished before deletion");
641
642 if (entry_ref)
643 pfree(entry_ref);
644}
645
646/*
647 * Acquire exclusive lock on the entry.
648 *
649 * If nowait is true, it's just a conditional acquire, and the result
650 * *must* be checked to verify success.
651 * If nowait is false, waits as necessary, always returning true.
652 */
653bool
654pgstat_lock_entry(PgStat_EntryRef *entry_ref, bool nowait)
655{
656 LWLock *lock = &entry_ref->shared_stats->lock;
657
658 if (nowait)
660
662 return true;
663}
664
665/*
666 * Acquire shared lock on the entry.
667 *
668 * Separate from pgstat_lock_entry() as most callers will need to lock
669 * exclusively. The wait semantics are identical.
670 */
671bool
673{
674 LWLock *lock = &entry_ref->shared_stats->lock;
675
676 if (nowait)
678
680 return true;
681}
682
683void
685{
686 LWLockRelease(&entry_ref->shared_stats->lock);
687}
688
689/*
690 * Helper function to fetch and lock shared stats.
691 */
694 bool nowait)
695{
696 PgStat_EntryRef *entry_ref;
697
698 /* find shared table stats entry corresponding to the local entry */
699 entry_ref = pgstat_get_entry_ref(kind, dboid, objid, true, NULL);
700
701 /* lock the shared entry to protect the content, skip if failed */
702 if (!pgstat_lock_entry(entry_ref, nowait))
703 return NULL;
704
705 return entry_ref;
706}
707
708void
710{
712}
713
714static bool
716{
717 uint64 curage;
718
720 return false;
721
722 /* should have been initialized when creating pgStatEntryRefHash */
724
726
727 return pgStatSharedRefAge != curage;
728}
729
730static void
732{
733 pgstat_entry_ref_hash_iterator i;
735 uint64 curage;
736
738 Assert(curage != 0);
739
740 /*
741 * Some entries have been dropped or reinitialized. Invalidate cache
742 * pointer to them.
743 */
744 pgstat_entry_ref_hash_start_iterate(pgStatEntryRefHash, &i);
745 while ((ent = pgstat_entry_ref_hash_iterate(pgStatEntryRefHash, &i)) != NULL)
746 {
747 PgStat_EntryRef *entry_ref = ent->entry_ref;
748
749 Assert(!entry_ref->shared_stats ||
750 entry_ref->shared_stats->magic == 0xdeadbeef);
751
752 /*
753 * "generation" checks for the case of entries being reinitialized,
754 * and "dropped" for the case where these are.. dropped.
755 */
756 if (!entry_ref->shared_entry->dropped &&
758 entry_ref->generation)
759 continue;
760
761 /* cannot gc shared ref that has pending data */
762 if (entry_ref->pending != NULL)
763 continue;
764
765 pgstat_release_entry_ref(ent->key, entry_ref, false);
766 }
767
768 pgStatSharedRefAge = curage;
769}
770
771static void
773 Datum match_data)
774{
775 pgstat_entry_ref_hash_iterator i;
777
778 if (pgStatEntryRefHash == NULL)
779 return;
780
781 pgstat_entry_ref_hash_start_iterate(pgStatEntryRefHash, &i);
782
783 while ((ent = pgstat_entry_ref_hash_iterate(pgStatEntryRefHash, &i))
784 != NULL)
785 {
786 Assert(ent->entry_ref != NULL);
787
788 if (match && !match(ent, match_data))
789 continue;
790
791 pgstat_release_entry_ref(ent->key, ent->entry_ref, discard_pending);
792 }
793}
794
795/*
796 * Release all local references to shared stats entries.
797 *
798 * When a process exits it cannot do so while still holding references onto
799 * stats entries, otherwise the shared stats entries could never be freed.
800 */
801static void
802pgstat_release_all_entry_refs(bool discard_pending)
803{
804 if (pgStatEntryRefHash == NULL)
805 return;
806
807 pgstat_release_matching_entry_refs(discard_pending, NULL, 0);
808 Assert(pgStatEntryRefHash->members == 0);
809 pgstat_entry_ref_hash_destroy(pgStatEntryRefHash);
810 pgStatEntryRefHash = NULL;
811}
812
813static bool
815{
816 Oid dboid = DatumGetObjectId(match_data);
817
818 return ent->key.dboid == dboid;
819}
820
821static void
823{
824 pgstat_release_matching_entry_refs( /* discard pending = */ true,
825 match_db,
826 ObjectIdGetDatum(dboid));
827}
828
829
830/* ------------------------------------------------------------
831 * Dropping and resetting of stats entries
832 * ------------------------------------------------------------
833 */
834
835static void
837{
838 dsa_pointer pdsa;
839
840 /*
841 * Fetch dsa pointer before deleting entry - that way we can free the
842 * memory after releasing the lock.
843 */
844 pdsa = shent->body;
845
846 if (!hstat)
848 else
850
851 dsa_free(pgStatLocal.dsa, pdsa);
852}
853
854/*
855 * Helper for both pgstat_drop_database_and_contents() and
856 * pgstat_drop_entry(). If hstat is non-null delete the shared entry using
857 * dshash_delete_current(), otherwise use dshash_delete_entry(). In either
858 * case the entry needs to be already locked.
859 */
860static bool
862 dshash_seq_status *hstat)
863{
864 Assert(shent->body != InvalidDsaPointer);
865
866 /* should already have released local reference */
868 Assert(!pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, shent->key));
869
870 /*
871 * Signal that the entry is dropped - this will eventually cause other
872 * backends to release their references.
873 */
874 if (shent->dropped)
875 elog(ERROR,
876 "trying to drop stats entry already dropped: kind=%s dboid=%u objid=%" PRIu64 " refcount=%u",
878 shent->key.dboid,
879 shent->key.objid,
881 shent->dropped = true;
882
883 /* release refcount marking entry as not dropped */
884 if (pg_atomic_sub_fetch_u32(&shent->refcount, 1) == 0)
885 {
886 pgstat_free_entry(shent, hstat);
887 return true;
888 }
889 else
890 {
891 if (!hstat)
893 return false;
894 }
895}
896
897/*
898 * Drop stats for the database and all the objects inside that database.
899 */
900static void
902{
903 dshash_seq_status hstat;
905 uint64 not_freed_count = 0;
906
907 Assert(OidIsValid(dboid));
908
910
911 /*
912 * This backend might very well be the only backend holding a reference to
913 * about-to-be-dropped entries. Ensure that we're not preventing it from
914 * being cleaned up till later.
915 *
916 * Doing this separately from the dshash iteration below avoids having to
917 * do so while holding a partition lock on the shared hashtable.
918 */
920
921 /* some of the dshash entries are to be removed, take exclusive lock. */
923 while ((p = dshash_seq_next(&hstat)) != NULL)
924 {
925 if (p->dropped)
926 continue;
927
928 if (p->key.dboid != dboid)
929 continue;
930
931 if (!pgstat_drop_entry_internal(p, &hstat))
932 {
933 /*
934 * Even statistics for a dropped database might currently be
935 * accessed (consider e.g. database stats for pg_stat_database).
936 */
937 not_freed_count++;
938 }
939 }
940 dshash_seq_term(&hstat);
941
942 /*
943 * If some of the stats data could not be freed, signal the reference
944 * holders to run garbage collection of their cached pgStatLocal.shmem.
945 */
946 if (not_freed_count > 0)
948}
949
950/*
951 * Drop a single stats entry.
952 *
953 * This routine returns false if the stats entry of the dropped object could
954 * not be freed, true otherwise.
955 *
956 * The callers of this function should call pgstat_request_entry_refs_gc()
957 * if the stats entry could not be freed, to ensure that this entry's memory
958 * can be reclaimed later by a different backend calling
959 * pgstat_gc_entry_refs().
960 */
961bool
963{
966 bool freed = true;
967
968 /* clear padding */
969 memset(&key, 0, sizeof(struct PgStat_HashKey));
970
971 key.kind = kind;
972 key.dboid = dboid;
973 key.objid = objid;
974
975 /* delete local reference */
977 {
978 PgStat_EntryRefHashEntry *lohashent =
979 pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key);
980
981 if (lohashent)
982 pgstat_release_entry_ref(lohashent->key, lohashent->entry_ref,
983 true);
984 }
985
986 /* mark entry in shared hashtable as deleted, drop if possible */
987 shent = dshash_find(pgStatLocal.shared_hash, &key, true);
988 if (shent)
989 {
990 freed = pgstat_drop_entry_internal(shent, NULL);
991
992 /*
993 * Database stats contain other stats. Drop those as well when
994 * dropping the database. XXX: Perhaps this should be done in a
995 * slightly more principled way? But not obvious what that'd look
996 * like, and so far this is the only case...
997 */
998 if (key.kind == PGSTAT_KIND_DATABASE)
1000 }
1001
1002 return freed;
1003}
1004
1005/*
1006 * Scan through the shared hashtable of stats, dropping statistics if
1007 * approved by the optional do_drop() function.
1008 */
1009void
1011 Datum match_data)
1012{
1013 dshash_seq_status hstat;
1015 uint64 not_freed_count = 0;
1016
1017 /* entries are removed, take an exclusive lock */
1019 while ((ps = dshash_seq_next(&hstat)) != NULL)
1020 {
1021 if (ps->dropped)
1022 continue;
1023
1024 if (do_drop != NULL && !do_drop(ps, match_data))
1025 continue;
1026
1027 /* delete local reference */
1029 {
1030 PgStat_EntryRefHashEntry *lohashent =
1031 pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, ps->key);
1032
1033 if (lohashent)
1034 pgstat_release_entry_ref(lohashent->key, lohashent->entry_ref,
1035 true);
1036 }
1037
1038 if (!pgstat_drop_entry_internal(ps, &hstat))
1039 not_freed_count++;
1040 }
1041 dshash_seq_term(&hstat);
1042
1043 if (not_freed_count > 0)
1045}
1046
1047/*
1048 * Scan through the shared hashtable of stats and drop all entries.
1049 */
1050void
1052{
1054}
1055
1056static void
1058 TimestampTz ts)
1059{
1060 const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
1061
1062 memset(pgstat_get_entry_data(kind, header), 0,
1063 pgstat_get_entry_len(kind));
1064
1065 if (kind_info->reset_timestamp_cb)
1066 kind_info->reset_timestamp_cb(header, ts);
1067}
1068
1069/*
1070 * Reset one variable-numbered stats entry.
1071 */
1072void
1074{
1075 PgStat_EntryRef *entry_ref;
1076
1077 Assert(!pgstat_get_kind_info(kind)->fixed_amount);
1078
1079 entry_ref = pgstat_get_entry_ref(kind, dboid, objid, false, NULL);
1080 if (!entry_ref || entry_ref->shared_entry->dropped)
1081 return;
1082
1083 (void) pgstat_lock_entry(entry_ref, false);
1084 shared_stat_reset_contents(kind, entry_ref->shared_stats, ts);
1085 pgstat_unlock_entry(entry_ref);
1086}
1087
1088/*
1089 * Scan through the shared hashtable of stats, resetting statistics if
1090 * approved by the provided do_reset() function.
1091 */
1092void
1094 Datum match_data, TimestampTz ts)
1095{
1096 dshash_seq_status hstat;
1098
1099 /* dshash entry is not modified, take shared lock */
1100 dshash_seq_init(&hstat, pgStatLocal.shared_hash, false);
1101 while ((p = dshash_seq_next(&hstat)) != NULL)
1102 {
1103 PgStatShared_Common *header;
1104
1105 if (p->dropped)
1106 continue;
1107
1108 if (!do_reset(p, match_data))
1109 continue;
1110
1111 header = dsa_get_address(pgStatLocal.dsa, p->body);
1112
1113 LWLockAcquire(&header->lock, LW_EXCLUSIVE);
1114
1115 shared_stat_reset_contents(p->key.kind, header, ts);
1116
1117 LWLockRelease(&header->lock);
1118 }
1119 dshash_seq_term(&hstat);
1120}
1121
1122static bool
1124{
1125 return p->key.kind == DatumGetInt32(match_data);
1126}
1127
1128void
1130{
1132}
1133
1134static void
1136{
1140 "PgStat Shared Ref",
1145 "PgStat Shared Ref Hash",
1147}
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:262
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:439
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:381
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:221
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:366
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:522
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:239
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:453
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:467
#define likely(x)
Definition: c.h:346
#define MAXALIGN(LEN)
Definition: c.h:782
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:224
uint64_t uint64
Definition: c.h:503
#define unlikely(x)
Definition: c.h:347
#define OidIsValid(objectId)
Definition: c.h:746
size_t Size
Definition: c.h:576
int64 TimestampTz
Definition: timestamp.h:39
dsa_area * dsa_attach_in_place(void *place, dsm_segment *segment)
Definition: dsa.c:545
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:942
void dsa_release_in_place(void *place)
Definition: dsa.c:605
void dsa_set_size_limit(dsa_area *area, size_t limit)
Definition: dsa.c:1018
void dsa_pin_mapping(dsa_area *area)
Definition: dsa.c:635
void dsa_detach(dsa_area *area)
Definition: dsa.c:1952
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:826
size_t dsa_minimum_size(void)
Definition: dsa.c:1196
void dsa_pin(dsa_area *area)
Definition: dsa.c:975
#define dsa_allocate0(area, size)
Definition: dsa.h:113
#define dsa_create_in_place(place, size, tranch_id, segment)
Definition: dsa.h:122
uint64 dsa_pointer
Definition: dsa.h:62
#define InvalidDsaPointer
Definition: dsa.h:78
void dshash_memcpy(void *dest, const void *src, size_t size, void *arg)
Definition: dshash.c:590
void dshash_delete_entry(dshash_table *hash_table, void *entry)
Definition: dshash.c:541
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition: dshash.c:558
void dshash_detach(dshash_table *hash_table)
Definition: dshash.c:307
void dshash_seq_init(dshash_seq_status *status, dshash_table *hash_table, bool exclusive)
Definition: dshash.c:638
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition: dshash.c:390
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition: dshash.c:367
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition: dshash.c:270
void dshash_seq_term(dshash_seq_status *status)
Definition: dshash.c:747
void * dshash_find_or_insert(dshash_table *hash_table, const void *key, bool *found)
Definition: dshash.c:433
void * dshash_seq_next(dshash_seq_status *status)
Definition: dshash.c:657
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition: dshash.c:206
void dshash_delete_current(dshash_seq_status *status)
Definition: dshash.c:757
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
bool IsUnderPostmaster
Definition: globals.c:121
Assert(PointerIsAligned(start, uint64))
struct parser_state ps
int i
Definition: isn.c:77
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1182
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1902
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:721
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1353
@ LWTRANCHE_PGSTATS_HASH
Definition: lwlock.h:209
@ LWTRANCHE_PGSTATS_DSA
Definition: lwlock.h:208
@ LWTRANCHE_PGSTATS_DATA
Definition: lwlock.h:210
@ LW_SHARED
Definition: lwlock.h:115
@ LW_EXCLUSIVE
Definition: lwlock.h:114
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1260
void pfree(void *pointer)
Definition: mcxt.c:2150
MemoryContext TopMemoryContext
Definition: mcxt.c:165
#define AllocSetContextCreate
Definition: memutils.h:149
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:190
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
const void * data
void pgstat_delete_pending_entry(PgStat_EntryRef *entry_ref)
Definition: pgstat.c:1331
const PgStat_KindInfo * pgstat_get_kind_info(PgStat_Kind kind)
Definition: pgstat.c:1450
PgStat_LocalState pgStatLocal
Definition: pgstat.c:212
static uint32 pgstat_hash_hash_key(const void *d, size_t size, void *arg)
static int pgstat_cmp_hash_key(const void *a, const void *b, size_t size, void *arg)
static void * pgstat_get_entry_data(PgStat_Kind kind, PgStatShared_Common *entry)
#define pgstat_assert_is_up()
struct PgStat_HashKey PgStat_HashKey
static size_t pgstat_get_entry_len(PgStat_Kind kind)
#define PGSTAT_KIND_CUSTOM_MAX
Definition: pgstat_kind.h:50
static bool pgstat_is_kind_builtin(PgStat_Kind kind)
Definition: pgstat_kind.h:61
#define PgStat_Kind
Definition: pgstat_kind.h:17
#define PGSTAT_KIND_MAX
Definition: pgstat_kind.h:21
#define PGSTAT_KIND_CUSTOM_MIN
Definition: pgstat_kind.h:49
#define PGSTAT_KIND_DATABASE
Definition: pgstat_kind.h:27
#define PGSTAT_KIND_MIN
Definition: pgstat_kind.h:20
static bool match_db(PgStat_EntryRefHashEntry *ent, Datum match_data)
Definition: pgstat_shmem.c:814
static void pgstat_setup_memcxt(void)
void pgstat_reset_entries_of_kind(PgStat_Kind kind, TimestampTz ts)
void pgstat_request_entry_refs_gc(void)
Definition: pgstat_shmem.c:709
static void pgstat_release_db_entry_refs(Oid dboid)
Definition: pgstat_shmem.c:822
static Size pgstat_dsa_init_size(void)
Definition: pgstat_shmem.c:106
static bool match_kind(PgStatShared_HashEntry *p, Datum match_data)
PgStat_EntryRef * pgstat_get_entry_ref(PgStat_Kind kind, Oid dboid, uint64 objid, bool create, bool *created_entry)
Definition: pgstat_shmem.c:444
static MemoryContext pgStatEntryRefHashContext
Definition: pgstat_shmem.c:91
void StatsShmemInit(void)
Definition: pgstat_shmem.c:156
bool pgstat_lock_entry_shared(PgStat_EntryRef *entry_ref, bool nowait)
Definition: pgstat_shmem.c:672
void pgstat_attach_shmem(void)
Definition: pgstat_shmem.c:244
static void pgstat_free_entry(PgStatShared_HashEntry *shent, dshash_seq_status *hstat)
Definition: pgstat_shmem.c:836
static int pgStatSharedRefAge
Definition: pgstat_shmem.c:83
#define PGSTAT_ENTRY_REF_HASH_SIZE
Definition: pgstat_shmem.c:21
bool pgstat_drop_entry(PgStat_Kind kind, Oid dboid, uint64 objid)
Definition: pgstat_shmem.c:962
void pgstat_reset_entry(PgStat_Kind kind, Oid dboid, uint64 objid, TimestampTz ts)
static void shared_stat_reset_contents(PgStat_Kind kind, PgStatShared_Common *header, TimestampTz ts)
static void pgstat_setup_shared_refs(void)
Definition: pgstat_shmem.c:352
static void pgstat_release_entry_ref(PgStat_HashKey key, PgStat_EntryRef *entry_ref, bool discard_pending)
Definition: pgstat_shmem.c:576
static PgStatShared_Common * pgstat_reinit_entry(PgStat_Kind kind, PgStatShared_HashEntry *shhashent)
Definition: pgstat_shmem.c:327
void pgstat_drop_matching_entries(bool(*do_drop)(PgStatShared_HashEntry *, Datum), Datum match_data)
static void pgstat_drop_database_and_contents(Oid dboid)
Definition: pgstat_shmem.c:901
static pgstat_entry_ref_hash_hash * pgStatEntryRefHash
Definition: pgstat_shmem.c:82
Size StatsShmemSize(void)
Definition: pgstat_shmem.c:127
static void pgstat_acquire_entry_ref(PgStat_EntryRef *entry_ref, PgStatShared_HashEntry *shhashent, PgStatShared_Common *shheader)
Definition: pgstat_shmem.c:368
static MemoryContext pgStatSharedRefContext
Definition: pgstat_shmem.c:90
PgStatShared_Common * pgstat_init_entry(PgStat_Kind kind, PgStatShared_HashEntry *shhashent)
Definition: pgstat_shmem.c:293
void pgstat_reset_matching_entries(bool(*do_reset)(PgStatShared_HashEntry *, Datum), Datum match_data, TimestampTz ts)
void pgstat_drop_all_entries(void)
static const dshash_parameters dsh_params
Definition: pgstat_shmem.c:62
static bool pgstat_get_entry_ref_cached(PgStat_HashKey key, PgStat_EntryRef **entry_ref_p)
Definition: pgstat_shmem.c:388
void pgstat_unlock_entry(PgStat_EntryRef *entry_ref)
Definition: pgstat_shmem.c:684
static void pgstat_release_all_entry_refs(bool discard_pending)
Definition: pgstat_shmem.c:802
struct PgStat_EntryRefHashEntry PgStat_EntryRefHashEntry
static void pgstat_release_matching_entry_refs(bool discard_pending, ReleaseMatchCB match, Datum match_data)
Definition: pgstat_shmem.c:772
bool(* ReleaseMatchCB)(PgStat_EntryRefHashEntry *, Datum data)
Definition: pgstat_shmem.c:55
bool pgstat_lock_entry(PgStat_EntryRef *entry_ref, bool nowait)
Definition: pgstat_shmem.c:654
PgStat_EntryRef * pgstat_get_entry_ref_locked(PgStat_Kind kind, Oid dboid, uint64 objid, bool nowait)
Definition: pgstat_shmem.c:693
static void pgstat_gc_entry_refs(void)
Definition: pgstat_shmem.c:731
static bool pgstat_need_entry_refs_gc(void)
Definition: pgstat_shmem.c:715
static bool pgstat_drop_entry_internal(PgStatShared_HashEntry *shent, dshash_seq_status *hstat)
Definition: pgstat_shmem.c:861
void pgstat_detach_shmem(void)
Definition: pgstat_shmem.c:264
uintptr_t Datum
Definition: postgres.h:69
static Oid DatumGetObjectId(Datum X)
Definition: postgres.h:247
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:257
static Datum Int32GetDatum(int32 X)
Definition: postgres.h:217
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:207
unsigned int Oid
Definition: postgres_ext.h:30
tree ctl
Definition: radixtree.h:1838
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
void * ShmemAlloc(Size size)
Definition: shmem.c:152
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:387
Definition: lwlock.h:42
pg_atomic_uint32 refcount
pg_atomic_uint32 generation
PgStat_EntryRef * entry_ref
Definition: pgstat_shmem.c:28
PgStatShared_Common * shared_stats
PgStatShared_HashEntry * shared_entry
PgStat_Kind kind
void(* reset_timestamp_cb)(PgStatShared_Common *header, TimestampTz ts)
const char *const name
void(* init_shmem_cb)(void *stats)
dshash_table * shared_hash
PgStat_ShmemControl * shmem
dshash_table_handle hash_handle
pg_atomic_uint64 gc_request_count
Definition: dsa.c:348