PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
pgstat.c
Go to the documentation of this file.
1 /* ----------
2  * pgstat.c
3  *
4  * All the statistics collector stuff hacked up in one big, ugly file.
5  *
6  * TODO: - Separate collector, postmaster and backend stuff
7  * into different files.
8  *
9  * - Add some automatic call for pgstat vacuuming.
10  *
11  * - Add a pgstat config column to pg_database, so this
12  * entire thing can be enabled/disabled on a per db basis.
13  *
14  * Copyright (c) 2001-2017, PostgreSQL Global Development Group
15  *
16  * src/backend/postmaster/pgstat.c
17  * ----------
18  */
19 #include "postgres.h"
20 
21 #include <unistd.h>
22 #include <fcntl.h>
23 #include <sys/param.h>
24 #include <sys/time.h>
25 #include <sys/socket.h>
26 #include <netdb.h>
27 #include <netinet/in.h>
28 #include <arpa/inet.h>
29 #include <signal.h>
30 #include <time.h>
31 #ifdef HAVE_SYS_SELECT_H
32 #include <sys/select.h>
33 #endif
34 
35 #include "pgstat.h"
36 
37 #include "access/heapam.h"
38 #include "access/htup_details.h"
39 #include "access/transam.h"
40 #include "access/twophase_rmgr.h"
41 #include "access/xact.h"
42 #include "catalog/pg_database.h"
43 #include "catalog/pg_proc.h"
44 #include "common/ip.h"
45 #include "libpq/libpq.h"
46 #include "libpq/pqsignal.h"
47 #include "mb/pg_wchar.h"
48 #include "miscadmin.h"
49 #include "pg_trace.h"
50 #include "postmaster/autovacuum.h"
52 #include "postmaster/postmaster.h"
53 #include "replication/walsender.h"
54 #include "storage/backendid.h"
55 #include "storage/dsm.h"
56 #include "storage/fd.h"
57 #include "storage/ipc.h"
58 #include "storage/latch.h"
59 #include "storage/lmgr.h"
60 #include "storage/pg_shmem.h"
61 #include "storage/procsignal.h"
62 #include "storage/sinvaladt.h"
63 #include "utils/ascii.h"
64 #include "utils/guc.h"
65 #include "utils/memutils.h"
66 #include "utils/ps_status.h"
67 #include "utils/rel.h"
68 #include "utils/snapmgr.h"
69 #include "utils/timestamp.h"
70 #include "utils/tqual.h"
71 
72 
73 /* ----------
74  * Timer definitions.
75  * ----------
76  */
77 #define PGSTAT_STAT_INTERVAL 500 /* Minimum time between stats file
78  * updates; in milliseconds. */
79 
80 #define PGSTAT_RETRY_DELAY 10 /* How long to wait between checks for a
81  * new file; in milliseconds. */
82 
83 #define PGSTAT_MAX_WAIT_TIME 10000 /* Maximum time to wait for a stats
84  * file update; in milliseconds. */
85 
86 #define PGSTAT_INQ_INTERVAL 640 /* How often to ping the collector for a
87  * new file; in milliseconds. */
88 
89 #define PGSTAT_RESTART_INTERVAL 60 /* How often to attempt to restart a
90  * failed statistics collector; in
91  * seconds. */
92 
93 #define PGSTAT_POLL_LOOP_COUNT (PGSTAT_MAX_WAIT_TIME / PGSTAT_RETRY_DELAY)
94 #define PGSTAT_INQ_LOOP_COUNT (PGSTAT_INQ_INTERVAL / PGSTAT_RETRY_DELAY)
95 
96 /* Minimum receive buffer size for the collector's socket. */
97 #define PGSTAT_MIN_RCVBUF (100 * 1024)
98 
99 
100 /* ----------
101  * The initial size hints for the hash tables used in the collector.
102  * ----------
103  */
104 #define PGSTAT_DB_HASH_SIZE 16
105 #define PGSTAT_TAB_HASH_SIZE 512
106 #define PGSTAT_FUNCTION_HASH_SIZE 512
107 
108 
109 /* ----------
110  * Total number of backends including auxiliary
111  *
112  * We reserve a slot for each possible BackendId, plus one for each
113  * possible auxiliary process type. (This scheme assumes there is not
114  * more than one of any auxiliary process type at a time.) MaxBackends
115  * includes autovacuum workers and background workers as well.
116  * ----------
117  */
118 #define NumBackendStatSlots (MaxBackends + NUM_AUXPROCTYPES)
119 
120 
121 /* ----------
122  * GUC parameters
123  * ----------
124  */
126 bool pgstat_track_counts = false;
129 
130 /* ----------
131  * Built from GUC parameter
132  * ----------
133  */
137 
138 /*
139  * BgWriter global statistics counters (unused in other processes).
140  * Stored directly in a stats message structure so it can be sent
141  * without needing to copy things around. We assume this inits to zeroes.
142  */
144 
145 /* ----------
146  * Local data
147  * ----------
148  */
150 
152 
154 
155 static bool pgStatRunningInCollector = false;
156 
157 /*
158  * Structures in which backends store per-table info that's waiting to be
159  * sent to the collector.
160  *
161  * NOTE: once allocated, TabStatusArray structures are never moved or deleted
162  * for the life of the backend. Also, we zero out the t_id fields of the
163  * contained PgStat_TableStatus structs whenever they are not actively in use.
164  * This allows relcache pgstat_info pointers to be treated as long-lived data,
165  * avoiding repeated searches in pgstat_initstats() when a relation is
166  * repeatedly opened during a transaction.
167  */
168 #define TABSTAT_QUANTUM 100 /* we alloc this many at a time */
169 
170 typedef struct TabStatusArray
171 {
172  struct TabStatusArray *tsa_next; /* link to next array, if any */
173  int tsa_used; /* # entries currently used */
176 
178 
179 /*
180  * pgStatTabHash entry: map from relation OID to PgStat_TableStatus pointer
181  */
182 typedef struct TabStatHashEntry
183 {
187 
188 /*
189  * Hash table for O(1) t_id -> tsa_entry lookup
190  */
192 
193 /*
194  * Backends store per-function info that's waiting to be sent to the collector
195  * in this hash table (indexed by function OID).
196  */
198 
199 /*
200  * Indicates if backend has some function stats that it hasn't yet
201  * sent to the collector.
202  */
203 static bool have_function_stats = false;
204 
205 /*
206  * Tuple insertion/deletion counts for an open transaction can't be propagated
207  * into PgStat_TableStatus counters until we know if it is going to commit
208  * or abort. Hence, we keep these counts in per-subxact structs that live
209  * in TopTransactionContext. This data structure is designed on the assumption
210  * that subxacts won't usually modify very many tables.
211  */
212 typedef struct PgStat_SubXactStatus
213 {
214  int nest_level; /* subtransaction nest level */
215  struct PgStat_SubXactStatus *prev; /* higher-level subxact if any */
216  PgStat_TableXactStatus *first; /* head of list for this subxact */
218 
220 
221 static int pgStatXactCommit = 0;
222 static int pgStatXactRollback = 0;
225 
226 /* Record that's written to 2PC state file when pgstat state is persisted */
227 typedef struct TwoPhasePgStatRecord
228 {
229  PgStat_Counter tuples_inserted; /* tuples inserted in xact */
230  PgStat_Counter tuples_updated; /* tuples updated in xact */
231  PgStat_Counter tuples_deleted; /* tuples deleted in xact */
232  PgStat_Counter inserted_pre_trunc; /* tuples inserted prior to truncate */
233  PgStat_Counter updated_pre_trunc; /* tuples updated prior to truncate */
234  PgStat_Counter deleted_pre_trunc; /* tuples deleted prior to truncate */
235  Oid t_id; /* table's OID */
236  bool t_shared; /* is it a shared catalog? */
237  bool t_truncated; /* was the relation truncated? */
239 
240 /*
241  * Info about current "snapshot" of stats file
242  */
245 
246 /* Status for backends including auxiliary */
248 
249 /* Total number of backends including auxiliary */
250 static int localNumBackends = 0;
251 
252 /*
253  * Cluster wide statistics, kept in the stats collector.
254  * Contains statistics that are not collected per database
255  * or per table.
256  */
259 
260 /*
261  * List of OIDs of databases we need to write out. If an entry is InvalidOid,
262  * it means to write only the shared-catalog stats ("DB 0"); otherwise, we
263  * will write both that DB's data and the shared stats.
264  */
266 
267 /* Signal handler flags */
268 static volatile bool need_exit = false;
269 static volatile bool got_SIGHUP = false;
270 
271 /*
272  * Total time charged to functions so far in the current backend.
273  * We use this to help separate "self" and "other" time charges.
274  * (We assume this initializes to zero.)
275  */
277 
278 
279 /* ----------
280  * Local function forward declarations
281  * ----------
282  */
283 #ifdef EXEC_BACKEND
284 static pid_t pgstat_forkexec(void);
285 #endif
286 
287 NON_EXEC_STATIC void PgstatCollectorMain(int argc, char *argv[]) pg_attribute_noreturn();
288 static void pgstat_exit(SIGNAL_ARGS);
289 static void pgstat_beshutdown_hook(int code, Datum arg);
291 
292 static PgStat_StatDBEntry *pgstat_get_db_entry(Oid databaseid, bool create);
294  Oid tableoid, bool create);
295 static void pgstat_write_statsfiles(bool permanent, bool allDbs);
296 static void pgstat_write_db_statsfile(PgStat_StatDBEntry *dbentry, bool permanent);
297 static HTAB *pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep);
298 static void pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash, bool permanent);
299 static void backend_read_statsfile(void);
300 static void pgstat_read_current_status(void);
301 
302 static bool pgstat_write_statsfile_needed(void);
303 static bool pgstat_db_requested(Oid databaseid);
304 
305 static void pgstat_send_tabstat(PgStat_MsgTabstat *tsmsg);
306 static void pgstat_send_funcstats(void);
307 static HTAB *pgstat_collect_oids(Oid catalogid);
308 
309 static PgStat_TableStatus *get_tabstat_entry(Oid rel_id, bool isshared);
310 
311 static void pgstat_setup_memcxt(void);
312 
313 static const char *pgstat_get_wait_activity(WaitEventActivity w);
314 static const char *pgstat_get_wait_client(WaitEventClient w);
315 static const char *pgstat_get_wait_ipc(WaitEventIPC w);
316 static const char *pgstat_get_wait_timeout(WaitEventTimeout w);
317 static const char *pgstat_get_wait_io(WaitEventIO w);
318 
319 static void pgstat_setheader(PgStat_MsgHdr *hdr, StatMsgType mtype);
320 static void pgstat_send(void *msg, int len);
321 
322 static void pgstat_recv_inquiry(PgStat_MsgInquiry *msg, int len);
323 static void pgstat_recv_tabstat(PgStat_MsgTabstat *msg, int len);
324 static void pgstat_recv_tabpurge(PgStat_MsgTabpurge *msg, int len);
325 static void pgstat_recv_dropdb(PgStat_MsgDropdb *msg, int len);
326 static void pgstat_recv_resetcounter(PgStat_MsgResetcounter *msg, int len);
329 static void pgstat_recv_autovac(PgStat_MsgAutovacStart *msg, int len);
330 static void pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len);
331 static void pgstat_recv_analyze(PgStat_MsgAnalyze *msg, int len);
332 static void pgstat_recv_archiver(PgStat_MsgArchiver *msg, int len);
333 static void pgstat_recv_bgwriter(PgStat_MsgBgWriter *msg, int len);
334 static void pgstat_recv_funcstat(PgStat_MsgFuncstat *msg, int len);
335 static void pgstat_recv_funcpurge(PgStat_MsgFuncpurge *msg, int len);
337 static void pgstat_recv_deadlock(PgStat_MsgDeadlock *msg, int len);
338 static void pgstat_recv_tempfile(PgStat_MsgTempFile *msg, int len);
339 
340 /* ------------------------------------------------------------
341  * Public functions called from postmaster follow
342  * ------------------------------------------------------------
343  */
344 
345 /* ----------
346  * pgstat_init() -
347  *
348  * Called from postmaster at startup. Create the resources required
349  * by the statistics collector process. If unable to do so, do not
350  * fail --- better to let the postmaster start with stats collection
351  * disabled.
352  * ----------
353  */
354 void
356 {
357  ACCEPT_TYPE_ARG3 alen;
358  struct addrinfo *addrs = NULL,
359  *addr,
360  hints;
361  int ret;
362  fd_set rset;
363  struct timeval tv;
364  char test_byte;
365  int sel_res;
366  int tries = 0;
367 
368 #define TESTBYTEVAL ((char) 199)
369 
370  /*
371  * This static assertion verifies that we didn't mess up the calculations
372  * involved in selecting maximum payload sizes for our UDP messages.
373  * Because the only consequence of overrunning PGSTAT_MAX_MSG_SIZE would
374  * be silent performance loss from fragmentation, it seems worth having a
375  * compile-time cross-check that we didn't.
376  */
378  "maximum stats message size exceeds PGSTAT_MAX_MSG_SIZE");
379 
380  /*
381  * Create the UDP socket for sending and receiving statistic messages
382  */
383  hints.ai_flags = AI_PASSIVE;
384  hints.ai_family = AF_UNSPEC;
385  hints.ai_socktype = SOCK_DGRAM;
386  hints.ai_protocol = 0;
387  hints.ai_addrlen = 0;
388  hints.ai_addr = NULL;
389  hints.ai_canonname = NULL;
390  hints.ai_next = NULL;
391  ret = pg_getaddrinfo_all("localhost", NULL, &hints, &addrs);
392  if (ret || !addrs)
393  {
394  ereport(LOG,
395  (errmsg("could not resolve \"localhost\": %s",
396  gai_strerror(ret))));
397  goto startup_failed;
398  }
399 
400  /*
401  * On some platforms, pg_getaddrinfo_all() may return multiple addresses
402  * only one of which will actually work (eg, both IPv6 and IPv4 addresses
403  * when kernel will reject IPv6). Worse, the failure may occur at the
404  * bind() or perhaps even connect() stage. So we must loop through the
405  * results till we find a working combination. We will generate LOG
406  * messages, but no error, for bogus combinations.
407  */
408  for (addr = addrs; addr; addr = addr->ai_next)
409  {
410 #ifdef HAVE_UNIX_SOCKETS
411  /* Ignore AF_UNIX sockets, if any are returned. */
412  if (addr->ai_family == AF_UNIX)
413  continue;
414 #endif
415 
416  if (++tries > 1)
417  ereport(LOG,
418  (errmsg("trying another address for the statistics collector")));
419 
420  /*
421  * Create the socket.
422  */
423  if ((pgStatSock = socket(addr->ai_family, SOCK_DGRAM, 0)) == PGINVALID_SOCKET)
424  {
425  ereport(LOG,
427  errmsg("could not create socket for statistics collector: %m")));
428  continue;
429  }
430 
431  /*
432  * Bind it to a kernel assigned port on localhost and get the assigned
433  * port via getsockname().
434  */
435  if (bind(pgStatSock, addr->ai_addr, addr->ai_addrlen) < 0)
436  {
437  ereport(LOG,
439  errmsg("could not bind socket for statistics collector: %m")));
442  continue;
443  }
444 
445  alen = sizeof(pgStatAddr);
446  if (getsockname(pgStatSock, (struct sockaddr *) &pgStatAddr, &alen) < 0)
447  {
448  ereport(LOG,
450  errmsg("could not get address of socket for statistics collector: %m")));
453  continue;
454  }
455 
456  /*
457  * Connect the socket to its own address. This saves a few cycles by
458  * not having to respecify the target address on every send. This also
459  * provides a kernel-level check that only packets from this same
460  * address will be received.
461  */
462  if (connect(pgStatSock, (struct sockaddr *) &pgStatAddr, alen) < 0)
463  {
464  ereport(LOG,
466  errmsg("could not connect socket for statistics collector: %m")));
469  continue;
470  }
471 
472  /*
473  * Try to send and receive a one-byte test message on the socket. This
474  * is to catch situations where the socket can be created but will not
475  * actually pass data (for instance, because kernel packet filtering
476  * rules prevent it).
477  */
478  test_byte = TESTBYTEVAL;
479 
480 retry1:
481  if (send(pgStatSock, &test_byte, 1, 0) != 1)
482  {
483  if (errno == EINTR)
484  goto retry1; /* if interrupted, just retry */
485  ereport(LOG,
487  errmsg("could not send test message on socket for statistics collector: %m")));
490  continue;
491  }
492 
493  /*
494  * There could possibly be a little delay before the message can be
495  * received. We arbitrarily allow up to half a second before deciding
496  * it's broken.
497  */
498  for (;;) /* need a loop to handle EINTR */
499  {
500  FD_ZERO(&rset);
501  FD_SET(pgStatSock, &rset);
502 
503  tv.tv_sec = 0;
504  tv.tv_usec = 500000;
505  sel_res = select(pgStatSock + 1, &rset, NULL, NULL, &tv);
506  if (sel_res >= 0 || errno != EINTR)
507  break;
508  }
509  if (sel_res < 0)
510  {
511  ereport(LOG,
513  errmsg("select() failed in statistics collector: %m")));
516  continue;
517  }
518  if (sel_res == 0 || !FD_ISSET(pgStatSock, &rset))
519  {
520  /*
521  * This is the case we actually think is likely, so take pains to
522  * give a specific message for it.
523  *
524  * errno will not be set meaningfully here, so don't use it.
525  */
526  ereport(LOG,
527  (errcode(ERRCODE_CONNECTION_FAILURE),
528  errmsg("test message did not get through on socket for statistics collector")));
531  continue;
532  }
533 
534  test_byte++; /* just make sure variable is changed */
535 
536 retry2:
537  if (recv(pgStatSock, &test_byte, 1, 0) != 1)
538  {
539  if (errno == EINTR)
540  goto retry2; /* if interrupted, just retry */
541  ereport(LOG,
543  errmsg("could not receive test message on socket for statistics collector: %m")));
546  continue;
547  }
548 
549  if (test_byte != TESTBYTEVAL) /* strictly paranoia ... */
550  {
551  ereport(LOG,
552  (errcode(ERRCODE_INTERNAL_ERROR),
553  errmsg("incorrect test message transmission on socket for statistics collector")));
556  continue;
557  }
558 
559  /* If we get here, we have a working socket */
560  break;
561  }
562 
563  /* Did we find a working address? */
564  if (!addr || pgStatSock == PGINVALID_SOCKET)
565  goto startup_failed;
566 
567  /*
568  * Set the socket to non-blocking IO. This ensures that if the collector
569  * falls behind, statistics messages will be discarded; backends won't
570  * block waiting to send messages to the collector.
571  */
573  {
574  ereport(LOG,
576  errmsg("could not set statistics collector socket to nonblocking mode: %m")));
577  goto startup_failed;
578  }
579 
580  /*
581  * Try to ensure that the socket's receive buffer is at least
582  * PGSTAT_MIN_RCVBUF bytes, so that it won't easily overflow and lose
583  * data. Use of UDP protocol means that we are willing to lose data under
584  * heavy load, but we don't want it to happen just because of ridiculously
585  * small default buffer sizes (such as 8KB on older Windows versions).
586  */
587  {
588  int old_rcvbuf;
589  int new_rcvbuf;
590  ACCEPT_TYPE_ARG3 rcvbufsize = sizeof(old_rcvbuf);
591 
592  if (getsockopt(pgStatSock, SOL_SOCKET, SO_RCVBUF,
593  (char *) &old_rcvbuf, &rcvbufsize) < 0)
594  {
595  elog(LOG, "getsockopt(SO_RCVBUF) failed: %m");
596  /* if we can't get existing size, always try to set it */
597  old_rcvbuf = 0;
598  }
599 
600  new_rcvbuf = PGSTAT_MIN_RCVBUF;
601  if (old_rcvbuf < new_rcvbuf)
602  {
603  if (setsockopt(pgStatSock, SOL_SOCKET, SO_RCVBUF,
604  (char *) &new_rcvbuf, sizeof(new_rcvbuf)) < 0)
605  elog(LOG, "setsockopt(SO_RCVBUF) failed: %m");
606  }
607  }
608 
609  pg_freeaddrinfo_all(hints.ai_family, addrs);
610 
611  return;
612 
613 startup_failed:
614  ereport(LOG,
615  (errmsg("disabling statistics collector for lack of working socket")));
616 
617  if (addrs)
618  pg_freeaddrinfo_all(hints.ai_family, addrs);
619 
623 
624  /*
625  * Adjust GUC variables to suppress useless activity, and for debugging
626  * purposes (seeing track_counts off is a clue that we failed here). We
627  * use PGC_S_OVERRIDE because there is no point in trying to turn it back
628  * on from postgresql.conf without a restart.
629  */
630  SetConfigOption("track_counts", "off", PGC_INTERNAL, PGC_S_OVERRIDE);
631 }
632 
633 /*
634  * subroutine for pgstat_reset_all
635  */
636 static void
638 {
639  DIR *dir;
640  struct dirent *entry;
641  char fname[MAXPGPATH * 2];
642 
643  dir = AllocateDir(directory);
644  while ((entry = ReadDir(dir, directory)) != NULL)
645  {
646  int nchars;
647  Oid tmp_oid;
648 
649  /*
650  * Skip directory entries that don't match the file names we write.
651  * See get_dbstat_filename for the database-specific pattern.
652  */
653  if (strncmp(entry->d_name, "global.", 7) == 0)
654  nchars = 7;
655  else
656  {
657  nchars = 0;
658  (void) sscanf(entry->d_name, "db_%u.%n",
659  &tmp_oid, &nchars);
660  if (nchars <= 0)
661  continue;
662  /* %u allows leading whitespace, so reject that */
663  if (strchr("0123456789", entry->d_name[3]) == NULL)
664  continue;
665  }
666 
667  if (strcmp(entry->d_name + nchars, "tmp") != 0 &&
668  strcmp(entry->d_name + nchars, "stat") != 0)
669  continue;
670 
671  snprintf(fname, sizeof(fname), "%s/%s", directory,
672  entry->d_name);
673  unlink(fname);
674  }
675  FreeDir(dir);
676 }
677 
678 /*
679  * pgstat_reset_all() -
680  *
681  * Remove the stats files. This is currently used only if WAL
682  * recovery is needed after a crash.
683  */
684 void
686 {
689 }
690 
691 #ifdef EXEC_BACKEND
692 
693 /*
694  * pgstat_forkexec() -
695  *
696  * Format up the arglist for, then fork and exec, statistics collector process
697  */
698 static pid_t
699 pgstat_forkexec(void)
700 {
701  char *av[10];
702  int ac = 0;
703 
704  av[ac++] = "postgres";
705  av[ac++] = "--forkcol";
706  av[ac++] = NULL; /* filled in by postmaster_forkexec */
707 
708  av[ac] = NULL;
709  Assert(ac < lengthof(av));
710 
711  return postmaster_forkexec(ac, av);
712 }
713 #endif /* EXEC_BACKEND */
714 
715 
716 /*
717  * pgstat_start() -
718  *
719  * Called from postmaster at startup or after an existing collector
720  * died. Attempt to fire up a fresh statistics collector.
721  *
722  * Returns PID of child process, or 0 if fail.
723  *
724  * Note: if fail, we will be called again from the postmaster main loop.
725  */
726 int
728 {
729  time_t curtime;
730  pid_t pgStatPid;
731 
732  /*
733  * Check that the socket is there, else pgstat_init failed and we can do
734  * nothing useful.
735  */
737  return 0;
738 
739  /*
740  * Do nothing if too soon since last collector start. This is a safety
741  * valve to protect against continuous respawn attempts if the collector
742  * is dying immediately at launch. Note that since we will be re-called
743  * from the postmaster main loop, we will get another chance later.
744  */
745  curtime = time(NULL);
746  if ((unsigned int) (curtime - last_pgstat_start_time) <
747  (unsigned int) PGSTAT_RESTART_INTERVAL)
748  return 0;
749  last_pgstat_start_time = curtime;
750 
751  /*
752  * Okay, fork off the collector.
753  */
754 #ifdef EXEC_BACKEND
755  switch ((pgStatPid = pgstat_forkexec()))
756 #else
757  switch ((pgStatPid = fork_process()))
758 #endif
759  {
760  case -1:
761  ereport(LOG,
762  (errmsg("could not fork statistics collector: %m")));
763  return 0;
764 
765 #ifndef EXEC_BACKEND
766  case 0:
767  /* in postmaster child ... */
769 
770  /* Close the postmaster's sockets */
771  ClosePostmasterPorts(false);
772 
773  /* Drop our connection to postmaster's shared memory, as well */
774  dsm_detach_all();
776 
778  break;
779 #endif
780 
781  default:
782  return (int) pgStatPid;
783  }
784 
785  /* shouldn't get here */
786  return 0;
787 }
788 
789 void
791 {
793 }
794 
795 /* ------------------------------------------------------------
796  * Public functions used by backends follow
797  *------------------------------------------------------------
798  */
799 
800 
801 /* ----------
802  * pgstat_report_stat() -
803  *
804  * Must be called by processes that performs DML: tcop/postgres.c, logical
805  * receiver processes, SPI worker, etc. to send the so far collected
806  * per-table and function usage statistics to the collector. Note that this
807  * is called only when not within a transaction, so it is fair to use
808  * transaction stop time as an approximation of current time.
809  * ----------
810  */
811 void
813 {
814  /* we assume this inits to all zeroes: */
815  static const PgStat_TableCounts all_zeroes;
816  static TimestampTz last_report = 0;
817 
819  PgStat_MsgTabstat regular_msg;
820  PgStat_MsgTabstat shared_msg;
821  TabStatusArray *tsa;
822  int i;
823 
824  /* Don't expend a clock check if nothing to do */
825  if ((pgStatTabList == NULL || pgStatTabList->tsa_used == 0) &&
826  pgStatXactCommit == 0 && pgStatXactRollback == 0 &&
828  return;
829 
830  /*
831  * Don't send a message unless it's been at least PGSTAT_STAT_INTERVAL
832  * msec since we last sent one, or the caller wants to force stats out.
833  */
835  if (!force &&
837  return;
838  last_report = now;
839 
840  /*
841  * Destroy pgStatTabHash before we start invalidating PgStat_TableEntry
842  * entries it points to. (Should we fail partway through the loop below,
843  * it's okay to have removed the hashtable already --- the only
844  * consequence is we'd get multiple entries for the same table in the
845  * pgStatTabList, and that's safe.)
846  */
847  if (pgStatTabHash)
848  hash_destroy(pgStatTabHash);
849  pgStatTabHash = NULL;
850 
851  /*
852  * Scan through the TabStatusArray struct(s) to find tables that actually
853  * have counts, and build messages to send. We have to separate shared
854  * relations from regular ones because the databaseid field in the message
855  * header has to depend on that.
856  */
857  regular_msg.m_databaseid = MyDatabaseId;
858  shared_msg.m_databaseid = InvalidOid;
859  regular_msg.m_nentries = 0;
860  shared_msg.m_nentries = 0;
861 
862  for (tsa = pgStatTabList; tsa != NULL; tsa = tsa->tsa_next)
863  {
864  for (i = 0; i < tsa->tsa_used; i++)
865  {
866  PgStat_TableStatus *entry = &tsa->tsa_entries[i];
867  PgStat_MsgTabstat *this_msg;
868  PgStat_TableEntry *this_ent;
869 
870  /* Shouldn't have any pending transaction-dependent counts */
871  Assert(entry->trans == NULL);
872 
873  /*
874  * Ignore entries that didn't accumulate any actual counts, such
875  * as indexes that were opened by the planner but not used.
876  */
877  if (memcmp(&entry->t_counts, &all_zeroes,
878  sizeof(PgStat_TableCounts)) == 0)
879  continue;
880 
881  /*
882  * OK, insert data into the appropriate message, and send if full.
883  */
884  this_msg = entry->t_shared ? &shared_msg : &regular_msg;
885  this_ent = &this_msg->m_entry[this_msg->m_nentries];
886  this_ent->t_id = entry->t_id;
887  memcpy(&this_ent->t_counts, &entry->t_counts,
888  sizeof(PgStat_TableCounts));
889  if (++this_msg->m_nentries >= PGSTAT_NUM_TABENTRIES)
890  {
891  pgstat_send_tabstat(this_msg);
892  this_msg->m_nentries = 0;
893  }
894  }
895  /* zero out TableStatus structs after use */
896  MemSet(tsa->tsa_entries, 0,
897  tsa->tsa_used * sizeof(PgStat_TableStatus));
898  tsa->tsa_used = 0;
899  }
900 
901  /*
902  * Send partial messages. Make sure that any pending xact commit/abort
903  * gets counted, even if there are no table stats to send.
904  */
905  if (regular_msg.m_nentries > 0 ||
907  pgstat_send_tabstat(&regular_msg);
908  if (shared_msg.m_nentries > 0)
909  pgstat_send_tabstat(&shared_msg);
910 
911  /* Now, send function statistics */
913 }
914 
915 /*
916  * Subroutine for pgstat_report_stat: finish and send a tabstat message
917  */
918 static void
920 {
921  int n;
922  int len;
923 
924  /* It's unlikely we'd get here with no socket, but maybe not impossible */
926  return;
927 
928  /*
929  * Report and reset accumulated xact commit/rollback and I/O timings
930  * whenever we send a normal tabstat message
931  */
932  if (OidIsValid(tsmsg->m_databaseid))
933  {
938  pgStatXactCommit = 0;
939  pgStatXactRollback = 0;
942  }
943  else
944  {
945  tsmsg->m_xact_commit = 0;
946  tsmsg->m_xact_rollback = 0;
947  tsmsg->m_block_read_time = 0;
948  tsmsg->m_block_write_time = 0;
949  }
950 
951  n = tsmsg->m_nentries;
952  len = offsetof(PgStat_MsgTabstat, m_entry[0]) +
953  n * sizeof(PgStat_TableEntry);
954 
956  pgstat_send(tsmsg, len);
957 }
958 
959 /*
960  * Subroutine for pgstat_report_stat: populate and send a function stat message
961  */
962 static void
964 {
965  /* we assume this inits to all zeroes: */
966  static const PgStat_FunctionCounts all_zeroes;
967 
968  PgStat_MsgFuncstat msg;
970  HASH_SEQ_STATUS fstat;
971 
972  if (pgStatFunctions == NULL)
973  return;
974 
977  msg.m_nentries = 0;
978 
979  hash_seq_init(&fstat, pgStatFunctions);
980  while ((entry = (PgStat_BackendFunctionEntry *) hash_seq_search(&fstat)) != NULL)
981  {
982  PgStat_FunctionEntry *m_ent;
983 
984  /* Skip it if no counts accumulated since last time */
985  if (memcmp(&entry->f_counts, &all_zeroes,
986  sizeof(PgStat_FunctionCounts)) == 0)
987  continue;
988 
989  /* need to convert format of time accumulators */
990  m_ent = &msg.m_entry[msg.m_nentries];
991  m_ent->f_id = entry->f_id;
992  m_ent->f_numcalls = entry->f_counts.f_numcalls;
995 
996  if (++msg.m_nentries >= PGSTAT_NUM_FUNCENTRIES)
997  {
998  pgstat_send(&msg, offsetof(PgStat_MsgFuncstat, m_entry[0]) +
999  msg.m_nentries * sizeof(PgStat_FunctionEntry));
1000  msg.m_nentries = 0;
1001  }
1002 
1003  /* reset the entry's counts */
1004  MemSet(&entry->f_counts, 0, sizeof(PgStat_FunctionCounts));
1005  }
1006 
1007  if (msg.m_nentries > 0)
1008  pgstat_send(&msg, offsetof(PgStat_MsgFuncstat, m_entry[0]) +
1009  msg.m_nentries * sizeof(PgStat_FunctionEntry));
1010 
1011  have_function_stats = false;
1012 }
1013 
1014 
1015 /* ----------
1016  * pgstat_vacuum_stat() -
1017  *
1018  * Will tell the collector about objects he can get rid of.
1019  * ----------
1020  */
1021 void
1023 {
1024  HTAB *htab;
1025  PgStat_MsgTabpurge msg;
1026  PgStat_MsgFuncpurge f_msg;
1027  HASH_SEQ_STATUS hstat;
1028  PgStat_StatDBEntry *dbentry;
1029  PgStat_StatTabEntry *tabentry;
1030  PgStat_StatFuncEntry *funcentry;
1031  int len;
1032 
1034  return;
1035 
1036  /*
1037  * If not done for this transaction, read the statistics collector stats
1038  * file into some hash tables.
1039  */
1041 
1042  /*
1043  * Read pg_database and make a list of OIDs of all existing databases
1044  */
1046 
1047  /*
1048  * Search the database hash table for dead databases and tell the
1049  * collector to drop them.
1050  */
1051  hash_seq_init(&hstat, pgStatDBHash);
1052  while ((dbentry = (PgStat_StatDBEntry *) hash_seq_search(&hstat)) != NULL)
1053  {
1054  Oid dbid = dbentry->databaseid;
1055 
1057 
1058  /* the DB entry for shared tables (with InvalidOid) is never dropped */
1059  if (OidIsValid(dbid) &&
1060  hash_search(htab, (void *) &dbid, HASH_FIND, NULL) == NULL)
1061  pgstat_drop_database(dbid);
1062  }
1063 
1064  /* Clean up */
1065  hash_destroy(htab);
1066 
1067  /*
1068  * Lookup our own database entry; if not found, nothing more to do.
1069  */
1070  dbentry = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
1071  (void *) &MyDatabaseId,
1072  HASH_FIND, NULL);
1073  if (dbentry == NULL || dbentry->tables == NULL)
1074  return;
1075 
1076  /*
1077  * Similarly to above, make a list of all known relations in this DB.
1078  */
1080 
1081  /*
1082  * Initialize our messages table counter to zero
1083  */
1084  msg.m_nentries = 0;
1085 
1086  /*
1087  * Check for all tables listed in stats hashtable if they still exist.
1088  */
1089  hash_seq_init(&hstat, dbentry->tables);
1090  while ((tabentry = (PgStat_StatTabEntry *) hash_seq_search(&hstat)) != NULL)
1091  {
1092  Oid tabid = tabentry->tableid;
1093 
1095 
1096  if (hash_search(htab, (void *) &tabid, HASH_FIND, NULL) != NULL)
1097  continue;
1098 
1099  /*
1100  * Not there, so add this table's Oid to the message
1101  */
1102  msg.m_tableid[msg.m_nentries++] = tabid;
1103 
1104  /*
1105  * If the message is full, send it out and reinitialize to empty
1106  */
1107  if (msg.m_nentries >= PGSTAT_NUM_TABPURGE)
1108  {
1109  len = offsetof(PgStat_MsgTabpurge, m_tableid[0])
1110  + msg.m_nentries * sizeof(Oid);
1111 
1113  msg.m_databaseid = MyDatabaseId;
1114  pgstat_send(&msg, len);
1115 
1116  msg.m_nentries = 0;
1117  }
1118  }
1119 
1120  /*
1121  * Send the rest
1122  */
1123  if (msg.m_nentries > 0)
1124  {
1125  len = offsetof(PgStat_MsgTabpurge, m_tableid[0])
1126  + msg.m_nentries * sizeof(Oid);
1127 
1129  msg.m_databaseid = MyDatabaseId;
1130  pgstat_send(&msg, len);
1131  }
1132 
1133  /* Clean up */
1134  hash_destroy(htab);
1135 
1136  /*
1137  * Now repeat the above steps for functions. However, we needn't bother
1138  * in the common case where no function stats are being collected.
1139  */
1140  if (dbentry->functions != NULL &&
1141  hash_get_num_entries(dbentry->functions) > 0)
1142  {
1144 
1146  f_msg.m_databaseid = MyDatabaseId;
1147  f_msg.m_nentries = 0;
1148 
1149  hash_seq_init(&hstat, dbentry->functions);
1150  while ((funcentry = (PgStat_StatFuncEntry *) hash_seq_search(&hstat)) != NULL)
1151  {
1152  Oid funcid = funcentry->functionid;
1153 
1155 
1156  if (hash_search(htab, (void *) &funcid, HASH_FIND, NULL) != NULL)
1157  continue;
1158 
1159  /*
1160  * Not there, so add this function's Oid to the message
1161  */
1162  f_msg.m_functionid[f_msg.m_nentries++] = funcid;
1163 
1164  /*
1165  * If the message is full, send it out and reinitialize to empty
1166  */
1167  if (f_msg.m_nentries >= PGSTAT_NUM_FUNCPURGE)
1168  {
1169  len = offsetof(PgStat_MsgFuncpurge, m_functionid[0])
1170  + f_msg.m_nentries * sizeof(Oid);
1171 
1172  pgstat_send(&f_msg, len);
1173 
1174  f_msg.m_nentries = 0;
1175  }
1176  }
1177 
1178  /*
1179  * Send the rest
1180  */
1181  if (f_msg.m_nentries > 0)
1182  {
1183  len = offsetof(PgStat_MsgFuncpurge, m_functionid[0])
1184  + f_msg.m_nentries * sizeof(Oid);
1185 
1186  pgstat_send(&f_msg, len);
1187  }
1188 
1189  hash_destroy(htab);
1190  }
1191 }
1192 
1193 
1194 /* ----------
1195  * pgstat_collect_oids() -
1196  *
1197  * Collect the OIDs of all objects listed in the specified system catalog
1198  * into a temporary hash table. Caller should hash_destroy the result
1199  * when done with it. (However, we make the table in CurrentMemoryContext
1200  * so that it will be freed properly in event of an error.)
1201  * ----------
1202  */
1203 static HTAB *
1205 {
1206  HTAB *htab;
1207  HASHCTL hash_ctl;
1208  Relation rel;
1209  HeapScanDesc scan;
1210  HeapTuple tup;
1211  Snapshot snapshot;
1212 
1213  memset(&hash_ctl, 0, sizeof(hash_ctl));
1214  hash_ctl.keysize = sizeof(Oid);
1215  hash_ctl.entrysize = sizeof(Oid);
1216  hash_ctl.hcxt = CurrentMemoryContext;
1217  htab = hash_create("Temporary table of OIDs",
1219  &hash_ctl,
1221 
1222  rel = heap_open(catalogid, AccessShareLock);
1223  snapshot = RegisterSnapshot(GetLatestSnapshot());
1224  scan = heap_beginscan(rel, snapshot, 0, NULL);
1225  while ((tup = heap_getnext(scan, ForwardScanDirection)) != NULL)
1226  {
1227  Oid thisoid = HeapTupleGetOid(tup);
1228 
1230 
1231  (void) hash_search(htab, (void *) &thisoid, HASH_ENTER, NULL);
1232  }
1233  heap_endscan(scan);
1234  UnregisterSnapshot(snapshot);
1236 
1237  return htab;
1238 }
1239 
1240 
1241 /* ----------
1242  * pgstat_drop_database() -
1243  *
1244  * Tell the collector that we just dropped a database.
1245  * (If the message gets lost, we will still clean the dead DB eventually
1246  * via future invocations of pgstat_vacuum_stat().)
1247  * ----------
1248  */
1249 void
1251 {
1252  PgStat_MsgDropdb msg;
1253 
1255  return;
1256 
1258  msg.m_databaseid = databaseid;
1259  pgstat_send(&msg, sizeof(msg));
1260 }
1261 
1262 
1263 /* ----------
1264  * pgstat_drop_relation() -
1265  *
1266  * Tell the collector that we just dropped a relation.
1267  * (If the message gets lost, we will still clean the dead entry eventually
1268  * via future invocations of pgstat_vacuum_stat().)
1269  *
1270  * Currently not used for lack of any good place to call it; we rely
1271  * entirely on pgstat_vacuum_stat() to clean out stats for dead rels.
1272  * ----------
1273  */
1274 #ifdef NOT_USED
1275 void
1276 pgstat_drop_relation(Oid relid)
1277 {
1278  PgStat_MsgTabpurge msg;
1279  int len;
1280 
1282  return;
1283 
1284  msg.m_tableid[0] = relid;
1285  msg.m_nentries = 1;
1286 
1287  len = offsetof(PgStat_MsgTabpurge, m_tableid[0]) + sizeof(Oid);
1288 
1290  msg.m_databaseid = MyDatabaseId;
1291  pgstat_send(&msg, len);
1292 }
1293 #endif /* NOT_USED */
1294 
1295 
1296 /* ----------
1297  * pgstat_reset_counters() -
1298  *
1299  * Tell the statistics collector to reset counters for our database.
1300  *
1301  * Permission checking for this function is managed through the normal
1302  * GRANT system.
1303  * ----------
1304  */
1305 void
1307 {
1309 
1311  return;
1312 
1314  msg.m_databaseid = MyDatabaseId;
1315  pgstat_send(&msg, sizeof(msg));
1316 }
1317 
1318 /* ----------
1319  * pgstat_reset_shared_counters() -
1320  *
1321  * Tell the statistics collector to reset cluster-wide shared counters.
1322  *
1323  * Permission checking for this function is managed through the normal
1324  * GRANT system.
1325  * ----------
1326  */
1327 void
1328 pgstat_reset_shared_counters(const char *target)
1329 {
1331 
1333  return;
1334 
1335  if (strcmp(target, "archiver") == 0)
1337  else if (strcmp(target, "bgwriter") == 0)
1339  else
1340  ereport(ERROR,
1341  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
1342  errmsg("unrecognized reset target: \"%s\"", target),
1343  errhint("Target must be \"archiver\" or \"bgwriter\".")));
1344 
1346  pgstat_send(&msg, sizeof(msg));
1347 }
1348 
1349 /* ----------
1350  * pgstat_reset_single_counter() -
1351  *
1352  * Tell the statistics collector to reset a single counter.
1353  *
1354  * Permission checking for this function is managed through the normal
1355  * GRANT system.
1356  * ----------
1357  */
1358 void
1360 {
1362 
1364  return;
1365 
1367  msg.m_databaseid = MyDatabaseId;
1368  msg.m_resettype = type;
1369  msg.m_objectid = objoid;
1370 
1371  pgstat_send(&msg, sizeof(msg));
1372 }
1373 
1374 /* ----------
1375  * pgstat_report_autovac() -
1376  *
1377  * Called from autovacuum.c to report startup of an autovacuum process.
1378  * We are called before InitPostgres is done, so can't rely on MyDatabaseId;
1379  * the db OID must be passed in, instead.
1380  * ----------
1381  */
1382 void
1384 {
1386 
1388  return;
1389 
1391  msg.m_databaseid = dboid;
1393 
1394  pgstat_send(&msg, sizeof(msg));
1395 }
1396 
1397 
1398 /* ---------
1399  * pgstat_report_vacuum() -
1400  *
1401  * Tell the collector about the table we just vacuumed.
1402  * ---------
1403  */
1404 void
1405 pgstat_report_vacuum(Oid tableoid, bool shared,
1406  PgStat_Counter livetuples, PgStat_Counter deadtuples)
1407 {
1408  PgStat_MsgVacuum msg;
1409 
1411  return;
1412 
1414  msg.m_databaseid = shared ? InvalidOid : MyDatabaseId;
1415  msg.m_tableoid = tableoid;
1418  msg.m_live_tuples = livetuples;
1419  msg.m_dead_tuples = deadtuples;
1420  pgstat_send(&msg, sizeof(msg));
1421 }
1422 
1423 /* --------
1424  * pgstat_report_analyze() -
1425  *
1426  * Tell the collector about the table we just analyzed.
1427  *
1428  * Caller must provide new live- and dead-tuples estimates, as well as a
1429  * flag indicating whether to reset the changes_since_analyze counter.
1430  * --------
1431  */
1432 void
1434  PgStat_Counter livetuples, PgStat_Counter deadtuples,
1435  bool resetcounter)
1436 {
1437  PgStat_MsgAnalyze msg;
1438 
1440  return;
1441 
1442  /*
1443  * Unlike VACUUM, ANALYZE might be running inside a transaction that has
1444  * already inserted and/or deleted rows in the target table. ANALYZE will
1445  * have counted such rows as live or dead respectively. Because we will
1446  * report our counts of such rows at transaction end, we should subtract
1447  * off these counts from what we send to the collector now, else they'll
1448  * be double-counted after commit. (This approach also ensures that the
1449  * collector ends up with the right numbers if we abort instead of
1450  * committing.)
1451  */
1452  if (rel->pgstat_info != NULL)
1453  {
1455 
1456  for (trans = rel->pgstat_info->trans; trans; trans = trans->upper)
1457  {
1458  livetuples -= trans->tuples_inserted - trans->tuples_deleted;
1459  deadtuples -= trans->tuples_updated + trans->tuples_deleted;
1460  }
1461  /* count stuff inserted by already-aborted subxacts, too */
1462  deadtuples -= rel->pgstat_info->t_counts.t_delta_dead_tuples;
1463  /* Since ANALYZE's counts are estimates, we could have underflowed */
1464  livetuples = Max(livetuples, 0);
1465  deadtuples = Max(deadtuples, 0);
1466  }
1467 
1469  msg.m_databaseid = rel->rd_rel->relisshared ? InvalidOid : MyDatabaseId;
1470  msg.m_tableoid = RelationGetRelid(rel);
1472  msg.m_resetcounter = resetcounter;
1474  msg.m_live_tuples = livetuples;
1475  msg.m_dead_tuples = deadtuples;
1476  pgstat_send(&msg, sizeof(msg));
1477 }
1478 
1479 /* --------
1480  * pgstat_report_recovery_conflict() -
1481  *
1482  * Tell the collector about a Hot Standby recovery conflict.
1483  * --------
1484  */
1485 void
1487 {
1489 
1491  return;
1492 
1494  msg.m_databaseid = MyDatabaseId;
1495  msg.m_reason = reason;
1496  pgstat_send(&msg, sizeof(msg));
1497 }
1498 
1499 /* --------
1500  * pgstat_report_deadlock() -
1501  *
1502  * Tell the collector about a deadlock detected.
1503  * --------
1504  */
1505 void
1507 {
1508  PgStat_MsgDeadlock msg;
1509 
1511  return;
1512 
1514  msg.m_databaseid = MyDatabaseId;
1515  pgstat_send(&msg, sizeof(msg));
1516 }
1517 
1518 /* --------
1519  * pgstat_report_tempfile() -
1520  *
1521  * Tell the collector about a temporary file.
1522  * --------
1523  */
1524 void
1525 pgstat_report_tempfile(size_t filesize)
1526 {
1527  PgStat_MsgTempFile msg;
1528 
1530  return;
1531 
1533  msg.m_databaseid = MyDatabaseId;
1534  msg.m_filesize = filesize;
1535  pgstat_send(&msg, sizeof(msg));
1536 }
1537 
1538 
1539 /* ----------
1540  * pgstat_ping() -
1541  *
1542  * Send some junk data to the collector to increase traffic.
1543  * ----------
1544  */
1545 void
1547 {
1548  PgStat_MsgDummy msg;
1549 
1551  return;
1552 
1554  pgstat_send(&msg, sizeof(msg));
1555 }
1556 
1557 /* ----------
1558  * pgstat_send_inquiry() -
1559  *
1560  * Notify collector that we need fresh data.
1561  * ----------
1562  */
1563 static void
1564 pgstat_send_inquiry(TimestampTz clock_time, TimestampTz cutoff_time, Oid databaseid)
1565 {
1566  PgStat_MsgInquiry msg;
1567 
1569  msg.clock_time = clock_time;
1570  msg.cutoff_time = cutoff_time;
1571  msg.databaseid = databaseid;
1572  pgstat_send(&msg, sizeof(msg));
1573 }
1574 
1575 
1576 /*
1577  * Initialize function call usage data.
1578  * Called by the executor before invoking a function.
1579  */
1580 void
1583 {
1584  PgStat_BackendFunctionEntry *htabent;
1585  bool found;
1586 
1587  if (pgstat_track_functions <= fcinfo->flinfo->fn_stats)
1588  {
1589  /* stats not wanted */
1590  fcu->fs = NULL;
1591  return;
1592  }
1593 
1594  if (!pgStatFunctions)
1595  {
1596  /* First time through - initialize function stat table */
1597  HASHCTL hash_ctl;
1598 
1599  memset(&hash_ctl, 0, sizeof(hash_ctl));
1600  hash_ctl.keysize = sizeof(Oid);
1601  hash_ctl.entrysize = sizeof(PgStat_BackendFunctionEntry);
1602  pgStatFunctions = hash_create("Function stat entries",
1604  &hash_ctl,
1605  HASH_ELEM | HASH_BLOBS);
1606  }
1607 
1608  /* Get the stats entry for this function, create if necessary */
1609  htabent = hash_search(pgStatFunctions, &fcinfo->flinfo->fn_oid,
1610  HASH_ENTER, &found);
1611  if (!found)
1612  MemSet(&htabent->f_counts, 0, sizeof(PgStat_FunctionCounts));
1613 
1614  fcu->fs = &htabent->f_counts;
1615 
1616  /* save stats for this function, later used to compensate for recursion */
1617  fcu->save_f_total_time = htabent->f_counts.f_total_time;
1618 
1619  /* save current backend-wide total time */
1620  fcu->save_total = total_func_time;
1621 
1622  /* get clock time as of function start */
1624 }
1625 
1626 /*
1627  * find_funcstat_entry - find any existing PgStat_BackendFunctionEntry entry
1628  * for specified function
1629  *
1630  * If no entry, return NULL, don't create a new one
1631  */
1634 {
1635  if (pgStatFunctions == NULL)
1636  return NULL;
1637 
1638  return (PgStat_BackendFunctionEntry *) hash_search(pgStatFunctions,
1639  (void *) &func_id,
1640  HASH_FIND, NULL);
1641 }
1642 
1643 /*
1644  * Calculate function call usage and update stat counters.
1645  * Called by the executor after invoking a function.
1646  *
1647  * In the case of a set-returning function that runs in value-per-call mode,
1648  * we will see multiple pgstat_init_function_usage/pgstat_end_function_usage
1649  * calls for what the user considers a single call of the function. The
1650  * finalize flag should be TRUE on the last call.
1651  */
1652 void
1654 {
1655  PgStat_FunctionCounts *fs = fcu->fs;
1656  instr_time f_total;
1657  instr_time f_others;
1658  instr_time f_self;
1659 
1660  /* stats not wanted? */
1661  if (fs == NULL)
1662  return;
1663 
1664  /* total elapsed time in this function call */
1665  INSTR_TIME_SET_CURRENT(f_total);
1666  INSTR_TIME_SUBTRACT(f_total, fcu->f_start);
1667 
1668  /* self usage: elapsed minus anything already charged to other calls */
1669  f_others = total_func_time;
1670  INSTR_TIME_SUBTRACT(f_others, fcu->save_total);
1671  f_self = f_total;
1672  INSTR_TIME_SUBTRACT(f_self, f_others);
1673 
1674  /* update backend-wide total time */
1676 
1677  /*
1678  * Compute the new f_total_time as the total elapsed time added to the
1679  * pre-call value of f_total_time. This is necessary to avoid
1680  * double-counting any time taken by recursive calls of myself. (We do
1681  * not need any similar kluge for self time, since that already excludes
1682  * any recursive calls.)
1683  */
1684  INSTR_TIME_ADD(f_total, fcu->save_f_total_time);
1685 
1686  /* update counters in function stats table */
1687  if (finalize)
1688  fs->f_numcalls++;
1689  fs->f_total_time = f_total;
1690  INSTR_TIME_ADD(fs->f_self_time, f_self);
1691 
1692  /* indicate that we have something to send */
1693  have_function_stats = true;
1694 }
1695 
1696 
1697 /* ----------
1698  * pgstat_initstats() -
1699  *
1700  * Initialize a relcache entry to count access statistics.
1701  * Called whenever a relation is opened.
1702  *
1703  * We assume that a relcache entry's pgstat_info field is zeroed by
1704  * relcache.c when the relcache entry is made; thereafter it is long-lived
1705  * data. We can avoid repeated searches of the TabStatus arrays when the
1706  * same relation is touched repeatedly within a transaction.
1707  * ----------
1708  */
1709 void
1711 {
1712  Oid rel_id = rel->rd_id;
1713  char relkind = rel->rd_rel->relkind;
1714 
1715  /* We only count stats for things that have storage */
1716  if (!(relkind == RELKIND_RELATION ||
1717  relkind == RELKIND_MATVIEW ||
1718  relkind == RELKIND_INDEX ||
1719  relkind == RELKIND_TOASTVALUE ||
1720  relkind == RELKIND_SEQUENCE))
1721  {
1722  rel->pgstat_info = NULL;
1723  return;
1724  }
1725 
1727  {
1728  /* We're not counting at all */
1729  rel->pgstat_info = NULL;
1730  return;
1731  }
1732 
1733  /*
1734  * If we already set up this relation in the current transaction, nothing
1735  * to do.
1736  */
1737  if (rel->pgstat_info != NULL &&
1738  rel->pgstat_info->t_id == rel_id)
1739  return;
1740 
1741  /* Else find or make the PgStat_TableStatus entry, and update link */
1742  rel->pgstat_info = get_tabstat_entry(rel_id, rel->rd_rel->relisshared);
1743 }
1744 
1745 /*
1746  * get_tabstat_entry - find or create a PgStat_TableStatus entry for rel
1747  */
1748 static PgStat_TableStatus *
1749 get_tabstat_entry(Oid rel_id, bool isshared)
1750 {
1751  TabStatHashEntry *hash_entry;
1752  PgStat_TableStatus *entry;
1753  TabStatusArray *tsa;
1754  bool found;
1755 
1756  /*
1757  * Create hash table if we don't have it already.
1758  */
1759  if (pgStatTabHash == NULL)
1760  {
1761  HASHCTL ctl;
1762 
1763  memset(&ctl, 0, sizeof(ctl));
1764  ctl.keysize = sizeof(Oid);
1765  ctl.entrysize = sizeof(TabStatHashEntry);
1766 
1767  pgStatTabHash = hash_create("pgstat TabStatusArray lookup hash table",
1769  &ctl,
1770  HASH_ELEM | HASH_BLOBS);
1771  }
1772 
1773  /*
1774  * Find an entry or create a new one.
1775  */
1776  hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_ENTER, &found);
1777  if (!found)
1778  {
1779  /* initialize new entry with null pointer */
1780  hash_entry->tsa_entry = NULL;
1781  }
1782 
1783  /*
1784  * If entry is already valid, we're done.
1785  */
1786  if (hash_entry->tsa_entry)
1787  return hash_entry->tsa_entry;
1788 
1789  /*
1790  * Locate the first pgStatTabList entry with free space, making a new list
1791  * entry if needed. Note that we could get an OOM failure here, but if so
1792  * we have left the hashtable and the list in a consistent state.
1793  */
1794  if (pgStatTabList == NULL)
1795  {
1796  /* Set up first pgStatTabList entry */
1797  pgStatTabList = (TabStatusArray *)
1799  sizeof(TabStatusArray));
1800  }
1801 
1802  tsa = pgStatTabList;
1803  while (tsa->tsa_used >= TABSTAT_QUANTUM)
1804  {
1805  if (tsa->tsa_next == NULL)
1806  tsa->tsa_next = (TabStatusArray *)
1808  sizeof(TabStatusArray));
1809  tsa = tsa->tsa_next;
1810  }
1811 
1812  /*
1813  * Allocate a PgStat_TableStatus entry within this list entry. We assume
1814  * the entry was already zeroed, either at creation or after last use.
1815  */
1816  entry = &tsa->tsa_entries[tsa->tsa_used++];
1817  entry->t_id = rel_id;
1818  entry->t_shared = isshared;
1819 
1820  /*
1821  * Now we can fill the entry in pgStatTabHash.
1822  */
1823  hash_entry->tsa_entry = entry;
1824 
1825  return entry;
1826 }
1827 
1828 /*
1829  * find_tabstat_entry - find any existing PgStat_TableStatus entry for rel
1830  *
1831  * If no entry, return NULL, don't create a new one
1832  *
1833  * Note: if we got an error in the most recent execution of pgstat_report_stat,
1834  * it's possible that an entry exists but there's no hashtable entry for it.
1835  * That's okay, we'll treat this case as "doesn't exist".
1836  */
1839 {
1840  TabStatHashEntry *hash_entry;
1841 
1842  /* If hashtable doesn't exist, there are no entries at all */
1843  if (!pgStatTabHash)
1844  return NULL;
1845 
1846  hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_FIND, NULL);
1847  if (!hash_entry)
1848  return NULL;
1849 
1850  /* Note that this step could also return NULL, but that's correct */
1851  return hash_entry->tsa_entry;
1852 }
1853 
1854 /*
1855  * get_tabstat_stack_level - add a new (sub)transaction stack entry if needed
1856  */
1857 static PgStat_SubXactStatus *
1859 {
1860  PgStat_SubXactStatus *xact_state;
1861 
1862  xact_state = pgStatXactStack;
1863  if (xact_state == NULL || xact_state->nest_level != nest_level)
1864  {
1865  xact_state = (PgStat_SubXactStatus *)
1867  sizeof(PgStat_SubXactStatus));
1868  xact_state->nest_level = nest_level;
1869  xact_state->prev = pgStatXactStack;
1870  xact_state->first = NULL;
1871  pgStatXactStack = xact_state;
1872  }
1873  return xact_state;
1874 }
1875 
1876 /*
1877  * add_tabstat_xact_level - add a new (sub)transaction state record
1878  */
1879 static void
1880 add_tabstat_xact_level(PgStat_TableStatus *pgstat_info, int nest_level)
1881 {
1882  PgStat_SubXactStatus *xact_state;
1884 
1885  /*
1886  * If this is the first rel to be modified at the current nest level, we
1887  * first have to push a transaction stack entry.
1888  */
1889  xact_state = get_tabstat_stack_level(nest_level);
1890 
1891  /* Now make a per-table stack entry */
1892  trans = (PgStat_TableXactStatus *)
1894  sizeof(PgStat_TableXactStatus));
1895  trans->nest_level = nest_level;
1896  trans->upper = pgstat_info->trans;
1897  trans->parent = pgstat_info;
1898  trans->next = xact_state->first;
1899  xact_state->first = trans;
1900  pgstat_info->trans = trans;
1901 }
1902 
1903 /*
1904  * pgstat_count_heap_insert - count a tuple insertion of n tuples
1905  */
1906 void
1908 {
1909  PgStat_TableStatus *pgstat_info = rel->pgstat_info;
1910 
1911  if (pgstat_info != NULL)
1912  {
1913  /* We have to log the effect at the proper transactional level */
1914  int nest_level = GetCurrentTransactionNestLevel();
1915 
1916  if (pgstat_info->trans == NULL ||
1917  pgstat_info->trans->nest_level != nest_level)
1918  add_tabstat_xact_level(pgstat_info, nest_level);
1919 
1920  pgstat_info->trans->tuples_inserted += n;
1921  }
1922 }
1923 
1924 /*
1925  * pgstat_count_heap_update - count a tuple update
1926  */
1927 void
1929 {
1930  PgStat_TableStatus *pgstat_info = rel->pgstat_info;
1931 
1932  if (pgstat_info != NULL)
1933  {
1934  /* We have to log the effect at the proper transactional level */
1935  int nest_level = GetCurrentTransactionNestLevel();
1936 
1937  if (pgstat_info->trans == NULL ||
1938  pgstat_info->trans->nest_level != nest_level)
1939  add_tabstat_xact_level(pgstat_info, nest_level);
1940 
1941  pgstat_info->trans->tuples_updated++;
1942 
1943  /* t_tuples_hot_updated is nontransactional, so just advance it */
1944  if (hot)
1945  pgstat_info->t_counts.t_tuples_hot_updated++;
1946  }
1947 }
1948 
1949 /*
1950  * pgstat_count_heap_delete - count a tuple deletion
1951  */
1952 void
1954 {
1955  PgStat_TableStatus *pgstat_info = rel->pgstat_info;
1956 
1957  if (pgstat_info != NULL)
1958  {
1959  /* We have to log the effect at the proper transactional level */
1960  int nest_level = GetCurrentTransactionNestLevel();
1961 
1962  if (pgstat_info->trans == NULL ||
1963  pgstat_info->trans->nest_level != nest_level)
1964  add_tabstat_xact_level(pgstat_info, nest_level);
1965 
1966  pgstat_info->trans->tuples_deleted++;
1967  }
1968 }
1969 
1970 /*
1971  * pgstat_truncate_save_counters
1972  *
1973  * Whenever a table is truncated, we save its i/u/d counters so that they can
1974  * be cleared, and if the (sub)xact that executed the truncate later aborts,
1975  * the counters can be restored to the saved (pre-truncate) values. Note we do
1976  * this on the first truncate in any particular subxact level only.
1977  */
1978 static void
1980 {
1981  if (!trans->truncated)
1982  {
1983  trans->inserted_pre_trunc = trans->tuples_inserted;
1984  trans->updated_pre_trunc = trans->tuples_updated;
1985  trans->deleted_pre_trunc = trans->tuples_deleted;
1986  trans->truncated = true;
1987  }
1988 }
1989 
1990 /*
1991  * pgstat_truncate_restore_counters - restore counters when a truncate aborts
1992  */
1993 static void
1995 {
1996  if (trans->truncated)
1997  {
1998  trans->tuples_inserted = trans->inserted_pre_trunc;
1999  trans->tuples_updated = trans->updated_pre_trunc;
2000  trans->tuples_deleted = trans->deleted_pre_trunc;
2001  }
2002 }
2003 
2004 /*
2005  * pgstat_count_truncate - update tuple counters due to truncate
2006  */
2007 void
2009 {
2010  PgStat_TableStatus *pgstat_info = rel->pgstat_info;
2011 
2012  if (pgstat_info != NULL)
2013  {
2014  /* We have to log the effect at the proper transactional level */
2015  int nest_level = GetCurrentTransactionNestLevel();
2016 
2017  if (pgstat_info->trans == NULL ||
2018  pgstat_info->trans->nest_level != nest_level)
2019  add_tabstat_xact_level(pgstat_info, nest_level);
2020 
2021  pgstat_truncate_save_counters(pgstat_info->trans);
2022  pgstat_info->trans->tuples_inserted = 0;
2023  pgstat_info->trans->tuples_updated = 0;
2024  pgstat_info->trans->tuples_deleted = 0;
2025  }
2026 }
2027 
2028 /*
2029  * pgstat_update_heap_dead_tuples - update dead-tuples count
2030  *
2031  * The semantics of this are that we are reporting the nontransactional
2032  * recovery of "delta" dead tuples; so t_delta_dead_tuples decreases
2033  * rather than increasing, and the change goes straight into the per-table
2034  * counter, not into transactional state.
2035  */
2036 void
2038 {
2039  PgStat_TableStatus *pgstat_info = rel->pgstat_info;
2040 
2041  if (pgstat_info != NULL)
2042  pgstat_info->t_counts.t_delta_dead_tuples -= delta;
2043 }
2044 
2045 
2046 /* ----------
2047  * AtEOXact_PgStat
2048  *
2049  * Called from access/transam/xact.c at top-level transaction commit/abort.
2050  * ----------
2051  */
2052 void
2053 AtEOXact_PgStat(bool isCommit)
2054 {
2055  PgStat_SubXactStatus *xact_state;
2056 
2057  /*
2058  * Count transaction commit or abort. (We use counters, not just bools,
2059  * in case the reporting message isn't sent right away.)
2060  */
2061  if (isCommit)
2062  pgStatXactCommit++;
2063  else
2065 
2066  /*
2067  * Transfer transactional insert/update counts into the base tabstat
2068  * entries. We don't bother to free any of the transactional state, since
2069  * it's all in TopTransactionContext and will go away anyway.
2070  */
2071  xact_state = pgStatXactStack;
2072  if (xact_state != NULL)
2073  {
2075 
2076  Assert(xact_state->nest_level == 1);
2077  Assert(xact_state->prev == NULL);
2078  for (trans = xact_state->first; trans != NULL; trans = trans->next)
2079  {
2080  PgStat_TableStatus *tabstat;
2081 
2082  Assert(trans->nest_level == 1);
2083  Assert(trans->upper == NULL);
2084  tabstat = trans->parent;
2085  Assert(tabstat->trans == trans);
2086  /* restore pre-truncate stats (if any) in case of aborted xact */
2087  if (!isCommit)
2089  /* count attempted actions regardless of commit/abort */
2090  tabstat->t_counts.t_tuples_inserted += trans->tuples_inserted;
2091  tabstat->t_counts.t_tuples_updated += trans->tuples_updated;
2092  tabstat->t_counts.t_tuples_deleted += trans->tuples_deleted;
2093  if (isCommit)
2094  {
2095  tabstat->t_counts.t_truncated = trans->truncated;
2096  if (trans->truncated)
2097  {
2098  /* forget live/dead stats seen by backend thus far */
2099  tabstat->t_counts.t_delta_live_tuples = 0;
2100  tabstat->t_counts.t_delta_dead_tuples = 0;
2101  }
2102  /* insert adds a live tuple, delete removes one */
2103  tabstat->t_counts.t_delta_live_tuples +=
2104  trans->tuples_inserted - trans->tuples_deleted;
2105  /* update and delete each create a dead tuple */
2106  tabstat->t_counts.t_delta_dead_tuples +=
2107  trans->tuples_updated + trans->tuples_deleted;
2108  /* insert, update, delete each count as one change event */
2109  tabstat->t_counts.t_changed_tuples +=
2110  trans->tuples_inserted + trans->tuples_updated +
2111  trans->tuples_deleted;
2112  }
2113  else
2114  {
2115  /* inserted tuples are dead, deleted tuples are unaffected */
2116  tabstat->t_counts.t_delta_dead_tuples +=
2117  trans->tuples_inserted + trans->tuples_updated;
2118  /* an aborted xact generates no changed_tuple events */
2119  }
2120  tabstat->trans = NULL;
2121  }
2122  }
2123  pgStatXactStack = NULL;
2124 
2125  /* Make sure any stats snapshot is thrown away */
2127 }
2128 
2129 /* ----------
2130  * AtEOSubXact_PgStat
2131  *
2132  * Called from access/transam/xact.c at subtransaction commit/abort.
2133  * ----------
2134  */
2135 void
2136 AtEOSubXact_PgStat(bool isCommit, int nestDepth)
2137 {
2138  PgStat_SubXactStatus *xact_state;
2139 
2140  /*
2141  * Transfer transactional insert/update counts into the next higher
2142  * subtransaction state.
2143  */
2144  xact_state = pgStatXactStack;
2145  if (xact_state != NULL &&
2146  xact_state->nest_level >= nestDepth)
2147  {
2149  PgStat_TableXactStatus *next_trans;
2150 
2151  /* delink xact_state from stack immediately to simplify reuse case */
2152  pgStatXactStack = xact_state->prev;
2153 
2154  for (trans = xact_state->first; trans != NULL; trans = next_trans)
2155  {
2156  PgStat_TableStatus *tabstat;
2157 
2158  next_trans = trans->next;
2159  Assert(trans->nest_level == nestDepth);
2160  tabstat = trans->parent;
2161  Assert(tabstat->trans == trans);
2162  if (isCommit)
2163  {
2164  if (trans->upper && trans->upper->nest_level == nestDepth - 1)
2165  {
2166  if (trans->truncated)
2167  {
2168  /* propagate the truncate status one level up */
2170  /* replace upper xact stats with ours */
2171  trans->upper->tuples_inserted = trans->tuples_inserted;
2172  trans->upper->tuples_updated = trans->tuples_updated;
2173  trans->upper->tuples_deleted = trans->tuples_deleted;
2174  }
2175  else
2176  {
2177  trans->upper->tuples_inserted += trans->tuples_inserted;
2178  trans->upper->tuples_updated += trans->tuples_updated;
2179  trans->upper->tuples_deleted += trans->tuples_deleted;
2180  }
2181  tabstat->trans = trans->upper;
2182  pfree(trans);
2183  }
2184  else
2185  {
2186  /*
2187  * When there isn't an immediate parent state, we can just
2188  * reuse the record instead of going through a
2189  * palloc/pfree pushup (this works since it's all in
2190  * TopTransactionContext anyway). We have to re-link it
2191  * into the parent level, though, and that might mean
2192  * pushing a new entry into the pgStatXactStack.
2193  */
2194  PgStat_SubXactStatus *upper_xact_state;
2195 
2196  upper_xact_state = get_tabstat_stack_level(nestDepth - 1);
2197  trans->next = upper_xact_state->first;
2198  upper_xact_state->first = trans;
2199  trans->nest_level = nestDepth - 1;
2200  }
2201  }
2202  else
2203  {
2204  /*
2205  * On abort, update top-level tabstat counts, then forget the
2206  * subtransaction
2207  */
2208 
2209  /* first restore values obliterated by truncate */
2211  /* count attempted actions regardless of commit/abort */
2212  tabstat->t_counts.t_tuples_inserted += trans->tuples_inserted;
2213  tabstat->t_counts.t_tuples_updated += trans->tuples_updated;
2214  tabstat->t_counts.t_tuples_deleted += trans->tuples_deleted;
2215  /* inserted tuples are dead, deleted tuples are unaffected */
2216  tabstat->t_counts.t_delta_dead_tuples +=
2217  trans->tuples_inserted + trans->tuples_updated;
2218  tabstat->trans = trans->upper;
2219  pfree(trans);
2220  }
2221  }
2222  pfree(xact_state);
2223  }
2224 }
2225 
2226 
2227 /*
2228  * AtPrepare_PgStat
2229  * Save the transactional stats state at 2PC transaction prepare.
2230  *
2231  * In this phase we just generate 2PC records for all the pending
2232  * transaction-dependent stats work.
2233  */
2234 void
2236 {
2237  PgStat_SubXactStatus *xact_state;
2238 
2239  xact_state = pgStatXactStack;
2240  if (xact_state != NULL)
2241  {
2243 
2244  Assert(xact_state->nest_level == 1);
2245  Assert(xact_state->prev == NULL);
2246  for (trans = xact_state->first; trans != NULL; trans = trans->next)
2247  {
2248  PgStat_TableStatus *tabstat;
2249  TwoPhasePgStatRecord record;
2250 
2251  Assert(trans->nest_level == 1);
2252  Assert(trans->upper == NULL);
2253  tabstat = trans->parent;
2254  Assert(tabstat->trans == trans);
2255 
2256  record.tuples_inserted = trans->tuples_inserted;
2257  record.tuples_updated = trans->tuples_updated;
2258  record.tuples_deleted = trans->tuples_deleted;
2259  record.inserted_pre_trunc = trans->inserted_pre_trunc;
2260  record.updated_pre_trunc = trans->updated_pre_trunc;
2261  record.deleted_pre_trunc = trans->deleted_pre_trunc;
2262  record.t_id = tabstat->t_id;
2263  record.t_shared = tabstat->t_shared;
2264  record.t_truncated = trans->truncated;
2265 
2267  &record, sizeof(TwoPhasePgStatRecord));
2268  }
2269  }
2270 }
2271 
2272 /*
2273  * PostPrepare_PgStat
2274  * Clean up after successful PREPARE.
2275  *
2276  * All we need do here is unlink the transaction stats state from the
2277  * nontransactional state. The nontransactional action counts will be
2278  * reported to the stats collector immediately, while the effects on live
2279  * and dead tuple counts are preserved in the 2PC state file.
2280  *
2281  * Note: AtEOXact_PgStat is not called during PREPARE.
2282  */
2283 void
2285 {
2286  PgStat_SubXactStatus *xact_state;
2287 
2288  /*
2289  * We don't bother to free any of the transactional state, since it's all
2290  * in TopTransactionContext and will go away anyway.
2291  */
2292  xact_state = pgStatXactStack;
2293  if (xact_state != NULL)
2294  {
2296 
2297  for (trans = xact_state->first; trans != NULL; trans = trans->next)
2298  {
2299  PgStat_TableStatus *tabstat;
2300 
2301  tabstat = trans->parent;
2302  tabstat->trans = NULL;
2303  }
2304  }
2305  pgStatXactStack = NULL;
2306 
2307  /* Make sure any stats snapshot is thrown away */
2309 }
2310 
2311 /*
2312  * 2PC processing routine for COMMIT PREPARED case.
2313  *
2314  * Load the saved counts into our local pgstats state.
2315  */
2316 void
2318  void *recdata, uint32 len)
2319 {
2320  TwoPhasePgStatRecord *rec = (TwoPhasePgStatRecord *) recdata;
2321  PgStat_TableStatus *pgstat_info;
2322 
2323  /* Find or create a tabstat entry for the rel */
2324  pgstat_info = get_tabstat_entry(rec->t_id, rec->t_shared);
2325 
2326  /* Same math as in AtEOXact_PgStat, commit case */
2327  pgstat_info->t_counts.t_tuples_inserted += rec->tuples_inserted;
2328  pgstat_info->t_counts.t_tuples_updated += rec->tuples_updated;
2329  pgstat_info->t_counts.t_tuples_deleted += rec->tuples_deleted;
2330  pgstat_info->t_counts.t_truncated = rec->t_truncated;
2331  if (rec->t_truncated)
2332  {
2333  /* forget live/dead stats seen by backend thus far */
2334  pgstat_info->t_counts.t_delta_live_tuples = 0;
2335  pgstat_info->t_counts.t_delta_dead_tuples = 0;
2336  }
2337  pgstat_info->t_counts.t_delta_live_tuples +=
2338  rec->tuples_inserted - rec->tuples_deleted;
2339  pgstat_info->t_counts.t_delta_dead_tuples +=
2340  rec->tuples_updated + rec->tuples_deleted;
2341  pgstat_info->t_counts.t_changed_tuples +=
2342  rec->tuples_inserted + rec->tuples_updated +
2343  rec->tuples_deleted;
2344 }
2345 
2346 /*
2347  * 2PC processing routine for ROLLBACK PREPARED case.
2348  *
2349  * Load the saved counts into our local pgstats state, but treat them
2350  * as aborted.
2351  */
2352 void
2354  void *recdata, uint32 len)
2355 {
2356  TwoPhasePgStatRecord *rec = (TwoPhasePgStatRecord *) recdata;
2357  PgStat_TableStatus *pgstat_info;
2358 
2359  /* Find or create a tabstat entry for the rel */
2360  pgstat_info = get_tabstat_entry(rec->t_id, rec->t_shared);
2361 
2362  /* Same math as in AtEOXact_PgStat, abort case */
2363  if (rec->t_truncated)
2364  {
2365  rec->tuples_inserted = rec->inserted_pre_trunc;
2366  rec->tuples_updated = rec->updated_pre_trunc;
2367  rec->tuples_deleted = rec->deleted_pre_trunc;
2368  }
2369  pgstat_info->t_counts.t_tuples_inserted += rec->tuples_inserted;
2370  pgstat_info->t_counts.t_tuples_updated += rec->tuples_updated;
2371  pgstat_info->t_counts.t_tuples_deleted += rec->tuples_deleted;
2372  pgstat_info->t_counts.t_delta_dead_tuples +=
2373  rec->tuples_inserted + rec->tuples_updated;
2374 }
2375 
2376 
2377 /* ----------
2378  * pgstat_fetch_stat_dbentry() -
2379  *
2380  * Support function for the SQL-callable pgstat* functions. Returns
2381  * the collected statistics for one database or NULL. NULL doesn't mean
2382  * that the database doesn't exist, it is just not yet known by the
2383  * collector, so the caller is better off to report ZERO instead.
2384  * ----------
2385  */
2388 {
2389  /*
2390  * If not done for this transaction, read the statistics collector stats
2391  * file into some hash tables.
2392  */
2394 
2395  /*
2396  * Lookup the requested database; return NULL if not found
2397  */
2398  return (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
2399  (void *) &dbid,
2400  HASH_FIND, NULL);
2401 }
2402 
2403 
2404 /* ----------
2405  * pgstat_fetch_stat_tabentry() -
2406  *
2407  * Support function for the SQL-callable pgstat* functions. Returns
2408  * the collected statistics for one table or NULL. NULL doesn't mean
2409  * that the table doesn't exist, it is just not yet known by the
2410  * collector, so the caller is better off to report ZERO instead.
2411  * ----------
2412  */
2415 {
2416  Oid dbid;
2417  PgStat_StatDBEntry *dbentry;
2418  PgStat_StatTabEntry *tabentry;
2419 
2420  /*
2421  * If not done for this transaction, read the statistics collector stats
2422  * file into some hash tables.
2423  */
2425 
2426  /*
2427  * Lookup our database, then look in its table hash table.
2428  */
2429  dbid = MyDatabaseId;
2430  dbentry = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
2431  (void *) &dbid,
2432  HASH_FIND, NULL);
2433  if (dbentry != NULL && dbentry->tables != NULL)
2434  {
2435  tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
2436  (void *) &relid,
2437  HASH_FIND, NULL);
2438  if (tabentry)
2439  return tabentry;
2440  }
2441 
2442  /*
2443  * If we didn't find it, maybe it's a shared table.
2444  */
2445  dbid = InvalidOid;
2446  dbentry = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
2447  (void *) &dbid,
2448  HASH_FIND, NULL);
2449  if (dbentry != NULL && dbentry->tables != NULL)
2450  {
2451  tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
2452  (void *) &relid,
2453  HASH_FIND, NULL);
2454  if (tabentry)
2455  return tabentry;
2456  }
2457 
2458  return NULL;
2459 }
2460 
2461 
2462 /* ----------
2463  * pgstat_fetch_stat_funcentry() -
2464  *
2465  * Support function for the SQL-callable pgstat* functions. Returns
2466  * the collected statistics for one function or NULL.
2467  * ----------
2468  */
2471 {
2472  PgStat_StatDBEntry *dbentry;
2473  PgStat_StatFuncEntry *funcentry = NULL;
2474 
2475  /* load the stats file if needed */
2477 
2478  /* Lookup our database, then find the requested function. */
2480  if (dbentry != NULL && dbentry->functions != NULL)
2481  {
2482  funcentry = (PgStat_StatFuncEntry *) hash_search(dbentry->functions,
2483  (void *) &func_id,
2484  HASH_FIND, NULL);
2485  }
2486 
2487  return funcentry;
2488 }
2489 
2490 
2491 /* ----------
2492  * pgstat_fetch_stat_beentry() -
2493  *
2494  * Support function for the SQL-callable pgstat* functions. Returns
2495  * our local copy of the current-activity entry for one backend.
2496  *
2497  * NB: caller is responsible for a check if the user is permitted to see
2498  * this info (especially the querystring).
2499  * ----------
2500  */
2503 {
2505 
2506  if (beid < 1 || beid > localNumBackends)
2507  return NULL;
2508 
2509  return &localBackendStatusTable[beid - 1].backendStatus;
2510 }
2511 
2512 
2513 /* ----------
2514  * pgstat_fetch_stat_local_beentry() -
2515  *
2516  * Like pgstat_fetch_stat_beentry() but with locally computed additions (like
2517  * xid and xmin values of the backend)
2518  *
2519  * NB: caller is responsible for a check if the user is permitted to see
2520  * this info (especially the querystring).
2521  * ----------
2522  */
2525 {
2527 
2528  if (beid < 1 || beid > localNumBackends)
2529  return NULL;
2530 
2531  return &localBackendStatusTable[beid - 1];
2532 }
2533 
2534 
2535 /* ----------
2536  * pgstat_fetch_stat_numbackends() -
2537  *
2538  * Support function for the SQL-callable pgstat* functions. Returns
2539  * the maximum current backend id.
2540  * ----------
2541  */
2542 int
2544 {
2546 
2547  return localNumBackends;
2548 }
2549 
2550 /*
2551  * ---------
2552  * pgstat_fetch_stat_archiver() -
2553  *
2554  * Support function for the SQL-callable pgstat* functions. Returns
2555  * a pointer to the archiver statistics struct.
2556  * ---------
2557  */
2560 {
2562 
2563  return &archiverStats;
2564 }
2565 
2566 
2567 /*
2568  * ---------
2569  * pgstat_fetch_global() -
2570  *
2571  * Support function for the SQL-callable pgstat* functions. Returns
2572  * a pointer to the global statistics struct.
2573  * ---------
2574  */
2577 {
2579 
2580  return &globalStats;
2581 }
2582 
2583 
2584 /* ------------------------------------------------------------
2585  * Functions for management of the shared-memory PgBackendStatus array
2586  * ------------------------------------------------------------
2587  */
2588 
2595 #ifdef USE_SSL
2596 static PgBackendSSLStatus *BackendSslStatusBuffer = NULL;
2597 #endif
2598 
2599 
2600 /*
2601  * Report shared-memory space needed by CreateSharedBackendStatus.
2602  */
2603 Size
2605 {
2606  Size size;
2607 
2608  /* BackendStatusArray: */
2609  size = mul_size(sizeof(PgBackendStatus), NumBackendStatSlots);
2610  /* BackendAppnameBuffer: */
2611  size = add_size(size,
2613  /* BackendClientHostnameBuffer: */
2614  size = add_size(size,
2616  /* BackendActivityBuffer: */
2617  size = add_size(size,
2619 #ifdef USE_SSL
2620  /* BackendSslStatusBuffer: */
2621  size = add_size(size,
2623 #endif
2624  return size;
2625 }
2626 
2627 /*
2628  * Initialize the shared status array and several string buffers
2629  * during postmaster startup.
2630  */
2631 void
2633 {
2634  Size size;
2635  bool found;
2636  int i;
2637  char *buffer;
2638 
2639  /* Create or attach to the shared array */
2640  size = mul_size(sizeof(PgBackendStatus), NumBackendStatSlots);
2641  BackendStatusArray = (PgBackendStatus *)
2642  ShmemInitStruct("Backend Status Array", size, &found);
2643 
2644  if (!found)
2645  {
2646  /*
2647  * We're the first - initialize.
2648  */
2649  MemSet(BackendStatusArray, 0, size);
2650  }
2651 
2652  /* Create or attach to the shared appname buffer */
2653  size = mul_size(NAMEDATALEN, MaxBackends);
2654  BackendAppnameBuffer = (char *)
2655  ShmemInitStruct("Backend Application Name Buffer", size, &found);
2656 
2657  if (!found)
2658  {
2659  MemSet(BackendAppnameBuffer, 0, size);
2660 
2661  /* Initialize st_appname pointers. */
2662  buffer = BackendAppnameBuffer;
2663  for (i = 0; i < NumBackendStatSlots; i++)
2664  {
2665  BackendStatusArray[i].st_appname = buffer;
2666  buffer += NAMEDATALEN;
2667  }
2668  }
2669 
2670  /* Create or attach to the shared client hostname buffer */
2671  size = mul_size(NAMEDATALEN, MaxBackends);
2672  BackendClientHostnameBuffer = (char *)
2673  ShmemInitStruct("Backend Client Host Name Buffer", size, &found);
2674 
2675  if (!found)
2676  {
2677  MemSet(BackendClientHostnameBuffer, 0, size);
2678 
2679  /* Initialize st_clienthostname pointers. */
2680  buffer = BackendClientHostnameBuffer;
2681  for (i = 0; i < NumBackendStatSlots; i++)
2682  {
2683  BackendStatusArray[i].st_clienthostname = buffer;
2684  buffer += NAMEDATALEN;
2685  }
2686  }
2687 
2688  /* Create or attach to the shared activity buffer */
2689  BackendActivityBufferSize = mul_size(pgstat_track_activity_query_size,
2691  BackendActivityBuffer = (char *)
2692  ShmemInitStruct("Backend Activity Buffer",
2693  BackendActivityBufferSize,
2694  &found);
2695 
2696  if (!found)
2697  {
2698  MemSet(BackendActivityBuffer, 0, size);
2699 
2700  /* Initialize st_activity pointers. */
2701  buffer = BackendActivityBuffer;
2702  for (i = 0; i < NumBackendStatSlots; i++)
2703  {
2704  BackendStatusArray[i].st_activity = buffer;
2706  }
2707  }
2708 
2709 #ifdef USE_SSL
2710  /* Create or attach to the shared SSL status buffer */
2712  BackendSslStatusBuffer = (PgBackendSSLStatus *)
2713  ShmemInitStruct("Backend SSL Status Buffer", size, &found);
2714 
2715  if (!found)
2716  {
2717  PgBackendSSLStatus *ptr;
2718 
2719  MemSet(BackendSslStatusBuffer, 0, size);
2720 
2721  /* Initialize st_sslstatus pointers. */
2722  ptr = BackendSslStatusBuffer;
2723  for (i = 0; i < NumBackendStatSlots; i++)
2724  {
2725  BackendStatusArray[i].st_sslstatus = ptr;
2726  ptr++;
2727  }
2728  }
2729 #endif
2730 }
2731 
2732 
2733 /* ----------
2734  * pgstat_initialize() -
2735  *
2736  * Initialize pgstats state, and set up our on-proc-exit hook.
2737  * Called from InitPostgres and AuxiliaryProcessMain. For auxiliary process,
2738  * MyBackendId is invalid. Otherwise, MyBackendId must be set,
2739  * but we must not have started any transaction yet (since the
2740  * exit hook must run after the last transaction exit).
2741  * NOTE: MyDatabaseId isn't set yet; so the shutdown hook has to be careful.
2742  * ----------
2743  */
2744 void
2746 {
2747  /* Initialize MyBEEntry */
2749  {
2751  MyBEEntry = &BackendStatusArray[MyBackendId - 1];
2752  }
2753  else
2754  {
2755  /* Must be an auxiliary process */
2757 
2758  /*
2759  * Assign the MyBEEntry for an auxiliary process. Since it doesn't
2760  * have a BackendId, the slot is statically allocated based on the
2761  * auxiliary process type (MyAuxProcType). Backends use slots indexed
2762  * in the range from 1 to MaxBackends (inclusive), so we use
2763  * MaxBackends + AuxBackendType + 1 as the index of the slot for an
2764  * auxiliary process.
2765  */
2766  MyBEEntry = &BackendStatusArray[MaxBackends + MyAuxProcType];
2767  }
2768 
2769  /* Set up a process-exit hook to clean up */
2771 }
2772 
2773 /* ----------
2774  * pgstat_bestart() -
2775  *
2776  * Initialize this backend's entry in the PgBackendStatus array.
2777  * Called from InitPostgres.
2778  *
2779  * Apart from auxiliary processes, MyBackendId, MyDatabaseId,
2780  * session userid, and application_name must be set for a
2781  * backend (hence, this cannot be combined with pgstat_initialize).
2782  * ----------
2783  */
2784 void
2786 {
2787  TimestampTz proc_start_timestamp;
2788  SockAddr clientaddr;
2789  volatile PgBackendStatus *beentry;
2790 
2791  /*
2792  * To minimize the time spent modifying the PgBackendStatus entry, fetch
2793  * all the needed data first.
2794  *
2795  * If we have a MyProcPort, use its session start time (for consistency,
2796  * and to save a kernel call).
2797  */
2798  if (MyProcPort)
2799  proc_start_timestamp = MyProcPort->SessionStartTime;
2800  else
2801  proc_start_timestamp = GetCurrentTimestamp();
2802 
2803  /*
2804  * We may not have a MyProcPort (eg, if this is the autovacuum process).
2805  * If so, use all-zeroes client address, which is dealt with specially in
2806  * pg_stat_get_backend_client_addr and pg_stat_get_backend_client_port.
2807  */
2808  if (MyProcPort)
2809  memcpy(&clientaddr, &MyProcPort->raddr, sizeof(clientaddr));
2810  else
2811  MemSet(&clientaddr, 0, sizeof(clientaddr));
2812 
2813  /*
2814  * Initialize my status entry, following the protocol of bumping
2815  * st_changecount before and after; and make sure it's even afterwards. We
2816  * use a volatile pointer here to ensure the compiler doesn't try to get
2817  * cute.
2818  */
2819  beentry = MyBEEntry;
2820 
2821  /* pgstats state must be initialized from pgstat_initialize() */
2822  Assert(beentry != NULL);
2823 
2825  {
2827  {
2828  /* Autovacuum Launcher */
2830  }
2831  else if (IsAutoVacuumWorkerProcess())
2832  {
2833  /* Autovacuum Worker */
2834  beentry->st_backendType = B_AUTOVAC_WORKER;
2835  }
2836  else if (am_walsender)
2837  {
2838  /* Wal sender */
2839  beentry->st_backendType = B_WAL_SENDER;
2840  }
2841  else if (IsBackgroundWorker)
2842  {
2843  /* bgworker */
2844  beentry->st_backendType = B_BG_WORKER;
2845  }
2846  else
2847  {
2848  /* client-backend */
2849  beentry->st_backendType = B_BACKEND;
2850  }
2851  }
2852  else
2853  {
2854  /* Must be an auxiliary process */
2856  switch (MyAuxProcType)
2857  {
2858  case StartupProcess:
2859  beentry->st_backendType = B_STARTUP;
2860  break;
2861  case BgWriterProcess:
2862  beentry->st_backendType = B_BG_WRITER;
2863  break;
2864  case CheckpointerProcess:
2865  beentry->st_backendType = B_CHECKPOINTER;
2866  break;
2867  case WalWriterProcess:
2868  beentry->st_backendType = B_WAL_WRITER;
2869  break;
2870  case WalReceiverProcess:
2871  beentry->st_backendType = B_WAL_RECEIVER;
2872  break;
2873  default:
2874  elog(FATAL, "unrecognized process type: %d",
2875  (int) MyAuxProcType);
2876  proc_exit(1);
2877  }
2878  }
2879 
2880  do
2881  {
2883  } while ((beentry->st_changecount & 1) == 0);
2884 
2885  beentry->st_procpid = MyProcPid;
2886  beentry->st_proc_start_timestamp = proc_start_timestamp;
2887  beentry->st_activity_start_timestamp = 0;
2888  beentry->st_state_start_timestamp = 0;
2889  beentry->st_xact_start_timestamp = 0;
2890  beentry->st_databaseid = MyDatabaseId;
2891 
2892  /* We have userid for client-backends, wal-sender and bgworker processes */
2893  if (beentry->st_backendType == B_BACKEND
2894  || beentry->st_backendType == B_WAL_SENDER
2895  || beentry->st_backendType == B_BG_WORKER)
2896  beentry->st_userid = GetSessionUserId();
2897  else
2898  beentry->st_userid = InvalidOid;
2899 
2900  beentry->st_clientaddr = clientaddr;
2903  NAMEDATALEN);
2904  else
2905  beentry->st_clienthostname[0] = '\0';
2906 #ifdef USE_SSL
2907  if (MyProcPort && MyProcPort->ssl != NULL)
2908  {
2909  beentry->st_ssl = true;
2915  }
2916  else
2917  {
2918  beentry->st_ssl = false;
2919  }
2920 #else
2921  beentry->st_ssl = false;
2922 #endif
2923  beentry->st_state = STATE_UNDEFINED;
2924  beentry->st_appname[0] = '\0';
2925  beentry->st_activity[0] = '\0';
2926  /* Also make sure the last byte in each string area is always 0 */
2927  beentry->st_clienthostname[NAMEDATALEN - 1] = '\0';
2928  beentry->st_appname[NAMEDATALEN - 1] = '\0';
2929  beentry->st_activity[pgstat_track_activity_query_size - 1] = '\0';
2932 
2933  /*
2934  * we don't zero st_progress_param here to save cycles; nobody should
2935  * examine it until st_progress_command has been set to something other
2936  * than PROGRESS_COMMAND_INVALID
2937  */
2938 
2940 
2941  /* Update app name to current GUC setting */
2942  if (application_name)
2944 }
2945 
2946 /*
2947  * Shut down a single backend's statistics reporting at process exit.
2948  *
2949  * Flush any remaining statistics counts out to the collector.
2950  * Without this, operations triggered during backend exit (such as
2951  * temp table deletions) won't be counted.
2952  *
2953  * Lastly, clear out our entry in the PgBackendStatus array.
2954  */
2955 static void
2957 {
2958  volatile PgBackendStatus *beentry = MyBEEntry;
2959 
2960  /*
2961  * If we got as far as discovering our own database ID, we can report what
2962  * we did to the collector. Otherwise, we'd be sending an invalid
2963  * database ID, so forget it. (This means that accesses to pg_database
2964  * during failed backend starts might never get counted.)
2965  */
2966  if (OidIsValid(MyDatabaseId))
2967  pgstat_report_stat(true);
2968 
2969  /*
2970  * Clear my status entry, following the protocol of bumping st_changecount
2971  * before and after. We use a volatile pointer here to ensure the
2972  * compiler doesn't try to get cute.
2973  */
2975 
2976  beentry->st_procpid = 0; /* mark invalid */
2977 
2979 }
2980 
2981 
2982 /* ----------
2983  * pgstat_report_activity() -
2984  *
2985  * Called from tcop/postgres.c to report what the backend is actually doing
2986  * (but note cmd_str can be NULL for certain cases).
2987  *
2988  * All updates of the status entry follow the protocol of bumping
2989  * st_changecount before and after. We use a volatile pointer here to
2990  * ensure the compiler doesn't try to get cute.
2991  * ----------
2992  */
2993 void
2995 {
2996  volatile PgBackendStatus *beentry = MyBEEntry;
2997  TimestampTz start_timestamp;
2998  TimestampTz current_timestamp;
2999  int len = 0;
3000 
3001  TRACE_POSTGRESQL_STATEMENT_STATUS(cmd_str);
3002 
3003  if (!beentry)
3004  return;
3005 
3007  {
3008  if (beentry->st_state != STATE_DISABLED)
3009  {
3010  volatile PGPROC *proc = MyProc;
3011 
3012  /*
3013  * track_activities is disabled, but we last reported a
3014  * non-disabled state. As our final update, change the state and
3015  * clear fields we will not be updating anymore.
3016  */
3018  beentry->st_state = STATE_DISABLED;
3019  beentry->st_state_start_timestamp = 0;
3020  beentry->st_activity[0] = '\0';
3021  beentry->st_activity_start_timestamp = 0;
3022  /* st_xact_start_timestamp and wait_event_info are also disabled */
3023  beentry->st_xact_start_timestamp = 0;
3024  proc->wait_event_info = 0;
3026  }
3027  return;
3028  }
3029 
3030  /*
3031  * To minimize the time spent modifying the entry, fetch all the needed
3032  * data first.
3033  */
3034  start_timestamp = GetCurrentStatementStartTimestamp();
3035  if (cmd_str != NULL)
3036  {
3037  len = pg_mbcliplen(cmd_str, strlen(cmd_str),
3039  }
3040  current_timestamp = GetCurrentTimestamp();
3041 
3042  /*
3043  * Now update the status entry
3044  */
3046 
3047  beentry->st_state = state;
3048  beentry->st_state_start_timestamp = current_timestamp;
3049 
3050  if (cmd_str != NULL)
3051  {
3052  memcpy((char *) beentry->st_activity, cmd_str, len);
3053  beentry->st_activity[len] = '\0';
3054  beentry->st_activity_start_timestamp = start_timestamp;
3055  }
3056 
3058 }
3059 
3060 /*-----------
3061  * pgstat_progress_start_command() -
3062  *
3063  * Set st_progress_command (and st_progress_command_target) in own backend
3064  * entry. Also, zero-initialize st_progress_param array.
3065  *-----------
3066  */
3067 void
3069 {
3070  volatile PgBackendStatus *beentry = MyBEEntry;
3071 
3072  if (!beentry || !pgstat_track_activities)
3073  return;
3074 
3076  beentry->st_progress_command = cmdtype;
3077  beentry->st_progress_command_target = relid;
3078  MemSet(&beentry->st_progress_param, 0, sizeof(beentry->st_progress_param));
3080 }
3081 
3082 /*-----------
3083  * pgstat_progress_update_param() -
3084  *
3085  * Update index'th member in st_progress_param[] of own backend entry.
3086  *-----------
3087  */
3088 void
3090 {
3091  volatile PgBackendStatus *beentry = MyBEEntry;
3092 
3093  Assert(index >= 0 && index < PGSTAT_NUM_PROGRESS_PARAM);
3094 
3095  if (!beentry || !pgstat_track_activities)
3096  return;
3097 
3099  beentry->st_progress_param[index] = val;
3101 }
3102 
3103 /*-----------
3104  * pgstat_progress_update_multi_param() -
3105  *
3106  * Update multiple members in st_progress_param[] of own backend entry.
3107  * This is atomic; readers won't see intermediate states.
3108  *-----------
3109  */
3110 void
3112  const int64 *val)
3113 {
3114  volatile PgBackendStatus *beentry = MyBEEntry;
3115  int i;
3116 
3117  if (!beentry || !pgstat_track_activities || nparam == 0)
3118  return;
3119 
3121 
3122  for (i = 0; i < nparam; ++i)
3123  {
3124  Assert(index[i] >= 0 && index[i] < PGSTAT_NUM_PROGRESS_PARAM);
3125 
3126  beentry->st_progress_param[index[i]] = val[i];
3127  }
3128 
3130 }
3131 
3132 /*-----------
3133  * pgstat_progress_end_command() -
3134  *
3135  * Reset st_progress_command (and st_progress_command_target) in own backend
3136  * entry. This signals the end of the command.
3137  *-----------
3138  */
3139 void
3141 {
3142  volatile PgBackendStatus *beentry = MyBEEntry;
3143 
3144  if (!beentry)
3145  return;
3148  return;
3149 
3154 }
3155 
3156 /* ----------
3157  * pgstat_report_appname() -
3158  *
3159  * Called to update our application name.
3160  * ----------
3161  */
3162 void
3163 pgstat_report_appname(const char *appname)
3164 {
3165  volatile PgBackendStatus *beentry = MyBEEntry;
3166  int len;
3167 
3168  if (!beentry)
3169  return;
3170 
3171  /* This should be unnecessary if GUC did its job, but be safe */
3172  len = pg_mbcliplen(appname, strlen(appname), NAMEDATALEN - 1);
3173 
3174  /*
3175  * Update my status entry, following the protocol of bumping
3176  * st_changecount before and after. We use a volatile pointer here to
3177  * ensure the compiler doesn't try to get cute.
3178  */
3180 
3181  memcpy((char *) beentry->st_appname, appname, len);
3182  beentry->st_appname[len] = '\0';
3183 
3185 }
3186 
3187 /*
3188  * Report current transaction start timestamp as the specified value.
3189  * Zero means there is no active transaction.
3190  */
3191 void
3193 {
3194  volatile PgBackendStatus *beentry = MyBEEntry;
3195 
3196  if (!pgstat_track_activities || !beentry)
3197  return;
3198 
3199  /*
3200  * Update my status entry, following the protocol of bumping
3201  * st_changecount before and after. We use a volatile pointer here to
3202  * ensure the compiler doesn't try to get cute.
3203  */
3205  beentry->st_xact_start_timestamp = tstamp;
3207 }
3208 
3209 /* ----------
3210  * pgstat_read_current_status() -
3211  *
3212  * Copy the current contents of the PgBackendStatus array to local memory,
3213  * if not already done in this transaction.
3214  * ----------
3215  */
3216 static void
3218 {
3219  volatile PgBackendStatus *beentry;
3220  LocalPgBackendStatus *localtable;
3221  LocalPgBackendStatus *localentry;
3222  char *localappname,
3223  *localactivity;
3224 #ifdef USE_SSL
3225  PgBackendSSLStatus *localsslstatus;
3226 #endif
3227  int i;
3228 
3230  if (localBackendStatusTable)
3231  return; /* already done */
3232 
3234 
3235  localtable = (LocalPgBackendStatus *)
3236  MemoryContextAlloc(pgStatLocalContext,
3238  localappname = (char *)
3239  MemoryContextAlloc(pgStatLocalContext,
3241  localactivity = (char *)
3242  MemoryContextAlloc(pgStatLocalContext,
3243  pgstat_track_activity_query_size * NumBackendStatSlots);
3244 #ifdef USE_SSL
3245  localsslstatus = (PgBackendSSLStatus *)
3246  MemoryContextAlloc(pgStatLocalContext,
3248 #endif
3249 
3250  localNumBackends = 0;
3251 
3252  beentry = BackendStatusArray;
3253  localentry = localtable;
3254  for (i = 1; i <= NumBackendStatSlots; i++)
3255  {
3256  /*
3257  * Follow the protocol of retrying if st_changecount changes while we
3258  * copy the entry, or if it's odd. (The check for odd is needed to
3259  * cover the case where we are able to completely copy the entry while
3260  * the source backend is between increment steps.) We use a volatile
3261  * pointer here to ensure the compiler doesn't try to get cute.
3262  */
3263  for (;;)
3264  {
3265  int before_changecount;
3266  int after_changecount;
3267 
3268  pgstat_save_changecount_before(beentry, before_changecount);
3269 
3270  localentry->backendStatus.st_procpid = beentry->st_procpid;
3271  if (localentry->backendStatus.st_procpid > 0)
3272  {
3273  memcpy(&localentry->backendStatus, (char *) beentry, sizeof(PgBackendStatus));
3274 
3275  /*
3276  * strcpy is safe even if the string is modified concurrently,
3277  * because there's always a \0 at the end of the buffer.
3278  */
3279  strcpy(localappname, (char *) beentry->st_appname);
3280  localentry->backendStatus.st_appname = localappname;
3281  strcpy(localactivity, (char *) beentry->st_activity);
3282  localentry->backendStatus.st_activity = localactivity;
3283  localentry->backendStatus.st_ssl = beentry->st_ssl;
3284 #ifdef USE_SSL
3285  if (beentry->st_ssl)
3286  {
3287  memcpy(localsslstatus, beentry->st_sslstatus, sizeof(PgBackendSSLStatus));
3288  localentry->backendStatus.st_sslstatus = localsslstatus;
3289  }
3290 #endif
3291  }
3292 
3293  pgstat_save_changecount_after(beentry, after_changecount);
3294  if (before_changecount == after_changecount &&
3295  (before_changecount & 1) == 0)
3296  break;
3297 
3298  /* Make sure we can break out of loop if stuck... */
3300  }
3301 
3302  beentry++;
3303  /* Only valid entries get included into the local array */
3304  if (localentry->backendStatus.st_procpid > 0)
3305  {
3307  &localentry->backend_xid,
3308  &localentry->backend_xmin);
3309 
3310  localentry++;
3311  localappname += NAMEDATALEN;
3312  localactivity += pgstat_track_activity_query_size;
3313 #ifdef USE_SSL
3314  localsslstatus++;
3315 #endif
3316  localNumBackends++;
3317  }
3318  }
3319 
3320  /* Set the pointer only after completion of a valid table */
3321  localBackendStatusTable = localtable;
3322 }
3323 
3324 /* ----------
3325  * pgstat_get_wait_event_type() -
3326  *
3327  * Return a string representing the current wait event type, backend is
3328  * waiting on.
3329  */
3330 const char *
3332 {
3333  uint32 classId;
3334  const char *event_type;
3335 
3336  /* report process as not waiting. */
3337  if (wait_event_info == 0)
3338  return NULL;
3339 
3340  classId = wait_event_info & 0xFF000000;
3341 
3342  switch (classId)
3343  {
3344  case PG_WAIT_LWLOCK:
3345  event_type = "LWLock";
3346  break;
3347  case PG_WAIT_LOCK:
3348  event_type = "Lock";
3349  break;
3350  case PG_WAIT_BUFFER_PIN:
3351  event_type = "BufferPin";
3352  break;
3353  case PG_WAIT_ACTIVITY:
3354  event_type = "Activity";
3355  break;
3356  case PG_WAIT_CLIENT:
3357  event_type = "Client";
3358  break;
3359  case PG_WAIT_EXTENSION:
3360  event_type = "Extension";
3361  break;
3362  case PG_WAIT_IPC:
3363  event_type = "IPC";
3364  break;
3365  case PG_WAIT_TIMEOUT:
3366  event_type = "Timeout";
3367  break;
3368  case PG_WAIT_IO:
3369  event_type = "IO";
3370  break;
3371  default:
3372  event_type = "???";
3373  break;
3374  }
3375 
3376  return event_type;
3377 }
3378 
3379 /* ----------
3380  * pgstat_get_wait_event() -
3381  *
3382  * Return a string representing the current wait event, backend is
3383  * waiting on.
3384  */
3385 const char *
3387 {
3388  uint32 classId;
3389  uint16 eventId;
3390  const char *event_name;
3391 
3392  /* report process as not waiting. */
3393  if (wait_event_info == 0)
3394  return NULL;
3395 
3396  classId = wait_event_info & 0xFF000000;
3397  eventId = wait_event_info & 0x0000FFFF;
3398 
3399  switch (classId)
3400  {
3401  case PG_WAIT_LWLOCK:
3402  event_name = GetLWLockIdentifier(classId, eventId);
3403  break;
3404  case PG_WAIT_LOCK:
3405  event_name = GetLockNameFromTagType(eventId);
3406  break;
3407  case PG_WAIT_BUFFER_PIN:
3408  event_name = "BufferPin";
3409  break;
3410  case PG_WAIT_ACTIVITY:
3411  {
3412  WaitEventActivity w = (WaitEventActivity) wait_event_info;
3413 
3414  event_name = pgstat_get_wait_activity(w);
3415  break;
3416  }
3417  case PG_WAIT_CLIENT:
3418  {
3419  WaitEventClient w = (WaitEventClient) wait_event_info;
3420 
3421  event_name = pgstat_get_wait_client(w);
3422  break;
3423  }
3424  case PG_WAIT_EXTENSION:
3425  event_name = "Extension";
3426  break;
3427  case PG_WAIT_IPC:
3428  {
3429  WaitEventIPC w = (WaitEventIPC) wait_event_info;
3430 
3431  event_name = pgstat_get_wait_ipc(w);
3432  break;
3433  }
3434  case PG_WAIT_TIMEOUT:
3435  {
3436  WaitEventTimeout w = (WaitEventTimeout) wait_event_info;
3437 
3438  event_name = pgstat_get_wait_timeout(w);
3439  break;
3440  }
3441  case PG_WAIT_IO:
3442  {
3443  WaitEventIO w = (WaitEventIO) wait_event_info;
3444 
3445  event_name = pgstat_get_wait_io(w);
3446  break;
3447  }
3448  default:
3449  event_name = "unknown wait event";
3450  break;
3451  }
3452 
3453  return event_name;
3454 }
3455 
3456 /* ----------
3457  * pgstat_get_wait_activity() -
3458  *
3459  * Convert WaitEventActivity to string.
3460  * ----------
3461  */
3462 static const char *
3464 {
3465  const char *event_name = "unknown wait event";
3466 
3467  switch (w)
3468  {
3470  event_name = "ArchiverMain";
3471  break;
3473  event_name = "AutoVacuumMain";
3474  break;
3476  event_name = "BgWriterHibernate";
3477  break;
3479  event_name = "BgWriterMain";
3480  break;
3482  event_name = "CheckpointerMain";
3483  break;
3485  event_name = "PgStatMain";
3486  break;
3488  event_name = "RecoveryWalAll";
3489  break;
3491  event_name = "RecoveryWalStream";
3492  break;
3494  event_name = "SysLoggerMain";
3495  break;
3497  event_name = "WalReceiverMain";
3498  break;
3500  event_name = "WalSenderMain";
3501  break;
3503  event_name = "WalWriterMain";
3504  break;
3506  event_name = "LogicalLauncherMain";
3507  break;
3509  event_name = "LogicalApplyMain";
3510  break;
3511  /* no default case, so that compiler will warn */
3512  }
3513 
3514  return event_name;
3515 }
3516 
3517 /* ----------
3518  * pgstat_get_wait_client() -
3519  *
3520  * Convert WaitEventClient to string.
3521  * ----------
3522  */
3523 static const char *
3525 {
3526  const char *event_name = "unknown wait event";
3527 
3528  switch (w)
3529  {
3531  event_name = "ClientRead";
3532  break;
3534  event_name = "ClientWrite";
3535  break;
3537  event_name = "SSLOpenServer";
3538  break;
3540  event_name = "WalReceiverWaitStart";
3541  break;
3543  event_name = "LibPQWalReceiver";
3544  break;
3546  event_name = "WalSenderWaitForWAL";
3547  break;
3549  event_name = "WalSenderWriteData";
3550  break;
3551  /* no default case, so that compiler will warn */
3552  }
3553 
3554  return event_name;
3555 }
3556 
3557 /* ----------
3558  * pgstat_get_wait_ipc() -
3559  *
3560  * Convert WaitEventIPC to string.
3561  * ----------
3562  */
3563 static const char *
3565 {
3566  const char *event_name = "unknown wait event";
3567 
3568  switch (w)
3569  {
3571  event_name = "BgWorkerShutdown";
3572  break;
3574  event_name = "BgWorkerStartup";
3575  break;
3576  case WAIT_EVENT_BTREE_PAGE:
3577  event_name = "BtreePage";
3578  break;
3580  event_name = "ExecuteGather";
3581  break;
3583  event_name = "MessageQueueInternal";
3584  break;
3586  event_name = "MessageQueuePutMessage";
3587  break;
3588  case WAIT_EVENT_MQ_RECEIVE:
3589  event_name = "MessageQueueReceive";
3590  break;
3591  case WAIT_EVENT_MQ_SEND:
3592  event_name = "MessageQueueSend";
3593  break;
3595  event_name = "ParallelFinish";
3596  break;
3598  event_name = "ParallelBitmapScan";
3599  break;
3601  event_name = "ProcArrayGroupUpdate";
3602  break;
3604  event_name = "SafeSnapshot";
3605  break;
3606  case WAIT_EVENT_SYNC_REP:
3607  event_name = "SyncRep";
3608  break;
3610  event_name = "LogicalSyncData";
3611  break;
3613  event_name = "LogicalSyncStateChange";
3614  break;
3615  /* no default case, so that compiler will warn */
3616  }
3617 
3618  return event_name;
3619 }
3620 
3621 /* ----------
3622  * pgstat_get_wait_timeout() -
3623  *
3624  * Convert WaitEventTimeout to string.
3625  * ----------
3626  */
3627 static const char *
3629 {
3630  const char *event_name = "unknown wait event";
3631 
3632  switch (w)
3633  {
3635  event_name = "BaseBackupThrottle";
3636  break;
3637  case WAIT_EVENT_PG_SLEEP:
3638  event_name = "PgSleep";
3639  break;
3641  event_name = "RecoveryApplyDelay";
3642  break;
3643  /* no default case, so that compiler will warn */
3644  }
3645 
3646  return event_name;
3647 }
3648 
3649 /* ----------
3650  * pgstat_get_wait_io() -
3651  *
3652  * Convert WaitEventIO to string.
3653  * ----------
3654  */
3655 static const char *
3657 {
3658  const char *event_name = "unknown wait event";
3659 
3660  switch (w)
3661  {
3663  event_name = "BufFileRead";
3664  break;
3666  event_name = "BufFileWrite";
3667  break;
3669  event_name = "ControlFileRead";
3670  break;
3672  event_name = "ControlFileSync";
3673  break;
3675  event_name = "ControlFileSyncUpdate";
3676  break;
3678  event_name = "ControlFileWrite";
3679  break;
3681  event_name = "ControlFileWriteUpdate";
3682  break;
3684  event_name = "CopyFileRead";
3685  break;
3687  event_name = "CopyFileWrite";
3688  break;
3690  event_name = "DataFileExtend";
3691  break;
3693  event_name = "DataFileFlush";
3694  break;
3696  event_name = "DataFileImmediateSync";
3697  break;
3699  event_name = "DataFilePrefetch";
3700  break;
3702  event_name = "DataFileRead";
3703  break;
3705  event_name = "DataFileSync";
3706  break;
3708  event_name = "DataFileTruncate";
3709  break;
3711  event_name = "DataFileWrite";
3712  break;
3714  event_name = "DSMFillZeroWrite";
3715  break;
3717  event_name = "LockFileAddToDataDirRead";
3718  break;
3720  event_name = "LockFileAddToDataDirSync";
3721  break;
3723  event_name = "LockFileAddToDataDirWrite";
3724  break;
3726  event_name = "LockFileCreateRead";
3727  break;
3729  event_name = "LockFileCreateSync";
3730  break;
3732  event_name = "LockFileCreateWRITE";
3733  break;
3735  event_name = "LockFileReCheckDataDirRead";
3736  break;
3738  event_name = "LogicalRewriteCheckpointSync";
3739  break;
3741  event_name = "LogicalRewriteMappingSync";
3742  break;
3744  event_name = "LogicalRewriteMappingWrite";
3745  break;
3747  event_name = "LogicalRewriteSync";
3748  break;
3750  event_name = "LogicalRewriteTruncate";
3751  break;
3753  event_name = "LogicalRewriteWrite";
3754  break;
3756  event_name = "RelationMapRead";
3757  break;
3759  event_name = "RelationMapSync";
3760  break;
3762  event_name = "RelationMapWrite";
3763  break;
3765  event_name = "ReorderBufferRead";
3766  break;
3768  event_name = "ReorderBufferWrite";
3769  break;
3771  event_name = "ReorderLogicalMappingRead";
3772  break;
3774  event_name = "ReplicationSlotRead";
3775  break;
3777  event_name = "ReplicationSlotRestoreSync";
3778  break;
3780  event_name = "ReplicationSlotSync";
3781  break;
3783  event_name = "ReplicationSlotWrite";
3784  break;
3786  event_name = "SLRUFlushSync";
3787  break;
3788  case WAIT_EVENT_SLRU_READ:
3789  event_name = "SLRURead";
3790  break;
3791  case WAIT_EVENT_SLRU_SYNC:
3792  event_name = "SLRUSync";
3793  break;
3794  case WAIT_EVENT_SLRU_WRITE:
3795  event_name = "SLRUWrite";
3796  break;
3798  event_name = "SnapbuildRead";
3799  break;
3801  event_name = "SnapbuildSync";
3802  break;
3804  event_name = "SnapbuildWrite";
3805  break;
3807  event_name = "TimelineHistoryFileSync";
3808  break;
3810  event_name = "TimelineHistoryFileWrite";
3811  break;
3813  event_name = "TimelineHistoryRead";
3814  break;
3816  event_name = "TimelineHistorySync";
3817  break;
3819  event_name = "TimelineHistoryWrite";
3820  break;
3822  event_name = "TwophaseFileRead";
3823  break;
3825  event_name = "TwophaseFileSync";
3826  break;
3828  event_name = "TwophaseFileWrite";
3829  break;
3831  event_name = "WALSenderTimelineHistoryRead";
3832  break;
3834  event_name = "WALBootstrapSync";
3835  break;
3837  event_name = "WALBootstrapWrite";
3838  break;
3840  event_name = "WALCopyRead";
3841  break;
3843  event_name = "WALCopySync";
3844  break;
3846  event_name = "WALCopyWrite";
3847  break;
3849  event_name = "WALInitSync";
3850  break;
3852  event_name = "WALInitWrite";
3853  break;
3854  case WAIT_EVENT_WAL_READ:
3855  event_name = "WALRead";
3856  break;
3858  event_name = "WALSyncMethodAssign";
3859  break;
3860  case WAIT_EVENT_WAL_WRITE:
3861  event_name = "WALWrite";
3862  break;
3863 
3864  /* no default case, so that compiler will warn */
3865  }
3866 
3867  return event_name;
3868 }
3869 
3870 
3871 /* ----------
3872  * pgstat_get_backend_current_activity() -
3873  *
3874  * Return a string representing the current activity of the backend with
3875  * the specified PID. This looks directly at the BackendStatusArray,
3876  * and so will provide current information regardless of the age of our
3877  * transaction's snapshot of the status array.
3878  *
3879  * It is the caller's responsibility to invoke this only for backends whose
3880  * state is expected to remain stable while the result is in use. The
3881  * only current use is in deadlock reporting, where we can expect that
3882  * the target backend is blocked on a lock. (There are corner cases
3883  * where the target's wait could get aborted while we are looking at it,
3884  * but the very worst consequence is to return a pointer to a string
3885  * that's been changed, so we won't worry too much.)
3886  *
3887  * Note: return strings for special cases match pg_stat_get_backend_activity.
3888  * ----------
3889  */
3890 const char *
3891 pgstat_get_backend_current_activity(int pid, bool checkUser)
3892 {
3893  PgBackendStatus *beentry;
3894  int i;
3895 
3896  beentry = BackendStatusArray;
3897  for (i = 1; i <= MaxBackends; i++)
3898  {
3899  /*
3900  * Although we expect the target backend's entry to be stable, that
3901  * doesn't imply that anyone else's is. To avoid identifying the
3902  * wrong backend, while we check for a match to the desired PID we
3903  * must follow the protocol of retrying if st_changecount changes
3904  * while we examine the entry, or if it's odd. (This might be
3905  * unnecessary, since fetching or storing an int is almost certainly
3906  * atomic, but let's play it safe.) We use a volatile pointer here to
3907  * ensure the compiler doesn't try to get cute.
3908  */
3909  volatile PgBackendStatus *vbeentry = beentry;
3910  bool found;
3911 
3912  for (;;)
3913  {
3914  int before_changecount;
3915  int after_changecount;
3916 
3917  pgstat_save_changecount_before(vbeentry, before_changecount);
3918 
3919  found = (vbeentry->st_procpid == pid);
3920 
3921  pgstat_save_changecount_after(vbeentry, after_changecount);
3922 
3923  if (before_changecount == after_changecount &&
3924  (before_changecount & 1) == 0)
3925  break;
3926 
3927  /* Make sure we can break out of loop if stuck... */
3929  }
3930 
3931  if (found)
3932  {
3933  /* Now it is safe to use the non-volatile pointer */
3934  if (checkUser && !superuser() && beentry->st_userid != GetUserId())
3935  return "<insufficient privilege>";
3936  else if (*(beentry->st_activity) == '\0')
3937  return "<command string not enabled>";
3938  else
3939  return beentry->st_activity;
3940  }
3941 
3942  beentry++;
3943  }
3944 
3945  /* If we get here, caller is in error ... */
3946  return "<backend information not available>";
3947 }
3948 
3949 /* ----------
3950  * pgstat_get_crashed_backend_activity() -
3951  *
3952  * Return a string representing the current activity of the backend with
3953  * the specified PID. Like the function above, but reads shared memory with
3954  * the expectation that it may be corrupt. On success, copy the string
3955  * into the "buffer" argument and return that pointer. On failure,
3956  * return NULL.
3957  *
3958  * This function is only intended to be used by the postmaster to report the
3959  * query that crashed a backend. In particular, no attempt is made to
3960  * follow the correct concurrency protocol when accessing the
3961  * BackendStatusArray. But that's OK, in the worst case we'll return a
3962  * corrupted message. We also must take care not to trip on ereport(ERROR).
3963  * ----------
3964  */
3965 const char *
3966 pgstat_get_crashed_backend_activity(int pid, char *buffer, int buflen)
3967 {
3968  volatile PgBackendStatus *beentry;
3969  int i;
3970 
3971  beentry = BackendStatusArray;
3972 
3973  /*
3974  * We probably shouldn't get here before shared memory has been set up,
3975  * but be safe.
3976  */
3977  if (beentry == NULL || BackendActivityBuffer == NULL)
3978  return NULL;
3979 
3980  for (i = 1; i <= MaxBackends; i++)
3981  {
3982  if (beentry->st_procpid == pid)
3983  {
3984  /* Read pointer just once, so it can't change after validation */
3985  const char *activity = beentry->st_activity;
3986  const char *activity_last;
3987 
3988  /*
3989  * We mustn't access activity string before we verify that it
3990  * falls within the BackendActivityBuffer. To make sure that the
3991  * entire string including its ending is contained within the
3992  * buffer, subtract one activity length from the buffer size.
3993  */
3994  activity_last = BackendActivityBuffer + BackendActivityBufferSize
3996 
3997  if (activity < BackendActivityBuffer ||
3998  activity > activity_last)
3999  return NULL;
4000 
4001  /* If no string available, no point in a report */
4002  if (activity[0] == '\0')
4003  return NULL;
4004 
4005  /*
4006  * Copy only ASCII-safe characters so we don't run into encoding
4007  * problems when reporting the message; and be sure not to run off
4008  * the end of memory.
4009  */
4010  ascii_safe_strlcpy(buffer, activity,
4011  Min(buflen, pgstat_track_activity_query_size));
4012 
4013  return buffer;
4014  }
4015 
4016  beentry++;
4017  }
4018 
4019  /* PID not found */
4020  return NULL;
4021 }
4022 
4023 const char *
4025 {
4026  const char *backendDesc = "unknown process type";
4027 
4028  switch (backendType)
4029  {
4030  case B_AUTOVAC_LAUNCHER:
4031  backendDesc = "autovacuum launcher";
4032  break;
4033  case B_AUTOVAC_WORKER:
4034  backendDesc = "autovacuum worker";
4035  break;
4036  case B_BACKEND:
4037  backendDesc = "client backend";
4038  break;
4039  case B_BG_WORKER:
4040  backendDesc = "background worker";
4041  break;
4042  case B_BG_WRITER:
4043  backendDesc = "background writer";
4044  break;
4045  case B_CHECKPOINTER:
4046  backendDesc = "checkpointer";
4047  break;
4048  case B_STARTUP:
4049  backendDesc = "startup";
4050  break;
4051  case B_WAL_RECEIVER:
4052  backendDesc = "walreceiver";
4053  break;
4054  case B_WAL_SENDER:
4055  backendDesc = "walsender";
4056  break;
4057  case B_WAL_WRITER:
4058  backendDesc = "walwriter";
4059  break;
4060  }
4061 
4062  return backendDesc;
4063 }
4064 
4065 /* ------------------------------------------------------------
4066  * Local support functions follow
4067  * ------------------------------------------------------------
4068  */
4069 
4070 
4071 /* ----------
4072  * pgstat_setheader() -
4073  *
4074  * Set common header fields in a statistics message
4075  * ----------
4076  */
4077 static void
4079 {
4080  hdr->m_type = mtype;
4081 }
4082 
4083 
4084 /* ----------
4085  * pgstat_send() -
4086  *
4087  * Send out one statistics message to the collector
4088  * ----------
4089  */
4090 static void
4091 pgstat_send(void *msg, int len)
4092 {
4093  int rc;
4094 
4096  return;
4097 
4098  ((PgStat_MsgHdr *) msg)->m_size = len;
4099 
4100  /* We'll retry after EINTR, but ignore all other failures */
4101  do
4102  {
4103  rc = send(pgStatSock, msg, len, 0);
4104  } while (rc < 0 && errno == EINTR);
4105 
4106 #ifdef USE_ASSERT_CHECKING
4107  /* In debug builds, log send failures ... */
4108  if (rc < 0)
4109  elog(LOG, "could not send to statistics collector: %m");
4110 #endif
4111 }
4112 
4113 /* ----------
4114  * pgstat_send_archiver() -
4115  *
4116  * Tell the collector about the WAL file that we successfully
4117  * archived or failed to archive.
4118  * ----------
4119  */
4120 void
4121 pgstat_send_archiver(const char *xlog, bool failed)
4122 {
4123  PgStat_MsgArchiver msg;
4124 
4125  /*
4126  * Prepare and send the message
4127  */
4129  msg.m_failed = failed;
4130  StrNCpy(msg.m_xlog, xlog, sizeof(msg.m_xlog));
4132  pgstat_send(&msg, sizeof(msg));
4133 }
4134 
4135 /* ----------
4136  * pgstat_send_bgwriter() -
4137  *
4138  * Send bgwriter statistics to the collector
4139  * ----------
4140  */
4141 void
4143 {
4144  /* We assume this initializes to zeroes */
4145  static const PgStat_MsgBgWriter all_zeroes;
4146 
4147  /*
4148  * This function can be called even if nothing at all has happened. In
4149  * this case, avoid sending a completely empty message to the stats
4150  * collector.
4151  */
4152  if (memcmp(&BgWriterStats, &all_zeroes, sizeof(PgStat_MsgBgWriter)) == 0)
4153  return;
4154 
4155  /*
4156  * Prepare and send the message
4157  */
4158  pgstat_setheader(&BgWriterStats.m_hdr, PGSTAT_MTYPE_BGWRITER);
4159  pgstat_send(&BgWriterStats, sizeof(BgWriterStats));
4160 
4161  /*
4162  * Clear out the statistics buffer, so it can be re-used.
4163  */
4164  MemSet(&BgWriterStats, 0, sizeof(BgWriterStats));
4165 }
4166 
4167 
4168 /* ----------
4169  * PgstatCollectorMain() -
4170  *
4171  * Start up the statistics collector process. This is the body of the
4172  * postmaster child process.
4173  *
4174  * The argc/argv parameters are valid only in EXEC_BACKEND case.
4175  * ----------
4176  */
4177 NON_EXEC_STATIC void
4178 PgstatCollectorMain(int argc, char *argv[])
4179 {
4180  int len;
4181  PgStat_Msg msg;
4182  int wr;
4183 
4184  /*
4185  * Ignore all signals usually bound to some action in the postmaster,
4186  * except SIGHUP and SIGQUIT. Note we don't need a SIGUSR1 handler to
4187  * support latch operations, because we only use a local latch.
4188  */
4190  pqsignal(SIGINT, SIG_IGN);
4191  pqsignal(SIGTERM, SIG_IGN);
4203 
4204  /*
4205  * Identify myself via ps
4206  */
4207  init_ps_display("stats collector process", "", "", "");
4208 
4209  /*
4210  * Read in existing stats files or initialize the stats to zero.
4211  */
4212  pgStatRunningInCollector = true;
4213  pgStatDBHash = pgstat_read_statsfiles(InvalidOid, true, true);
4214 
4215  /*
4216  * Loop to process messages until we get SIGQUIT or detect ungraceful
4217  * death of our parent postmaster.
4218  *
4219  * For performance reasons, we don't want to do ResetLatch/WaitLatch after
4220  * every message; instead, do that only after a recv() fails to obtain a
4221  * message. (This effectively means that if backends are sending us stuff
4222  * like mad, we won't notice postmaster death until things slack off a
4223  * bit; which seems fine.) To do that, we have an inner loop that
4224  * iterates as long as recv() succeeds. We do recognize got_SIGHUP inside
4225  * the inner loop, which means that such interrupts will get serviced but
4226  * the latch won't get cleared until next time there is a break in the
4227  * action.
4228  */
4229  for (;;)
4230  {
4231  /* Clear any already-pending wakeups */
4233 
4234  /*
4235  * Quit if we get SIGQUIT from the postmaster.
4236  */
4237  if (need_exit)
4238  break;
4239 
4240  /*
4241  * Inner loop iterates as long as we keep getting messages, or until
4242  * need_exit becomes set.
4243  */
4244  while (!need_exit)
4245  {
4246  /*
4247  * Reload configuration if we got SIGHUP from the postmaster.
4248  */
4249  if (got_SIGHUP)
4250  {
4251  got_SIGHUP = false;
4253  }
4254 
4255  /*
4256  * Write the stats file(s) if a new request has arrived that is
4257  * not satisfied by existing file(s).
4258  */
4260  pgstat_write_statsfiles(false, false);
4261 
4262  /*
4263  * Try to receive and process a message. This will not block,
4264  * since the socket is set to non-blocking mode.
4265  *
4266  * XXX On Windows, we have to force pgwin32_recv to cooperate,
4267  * despite the previous use of pg_set_noblock() on the socket.
4268  * This is extremely broken and should be fixed someday.
4269  */
4270 #ifdef WIN32
4271  pgwin32_noblock = 1;
4272 #endif
4273 
4274  len = recv(pgStatSock, (char *) &msg,
4275  sizeof(PgStat_Msg), 0);
4276 
4277 #ifdef WIN32
4278  pgwin32_noblock = 0;
4279 #endif
4280 
4281  if (len < 0)
4282  {
4283  if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
4284  break; /* out of inner loop */
4285  ereport(ERROR,
4287  errmsg("could not read statistics message: %m")));
4288  }
4289 
4290  /*
4291  * We ignore messages that are smaller than our common header
4292  */
4293  if (len < sizeof(PgStat_MsgHdr))
4294  continue;
4295 
4296  /*
4297  * The received length must match the length in the header
4298  */
4299  if (msg.msg_hdr.m_size != len)
4300  continue;
4301 
4302  /*
4303  * O.K. - we accept this message. Process it.
4304  */
4305  switch (msg.msg_hdr.m_type)
4306  {
4307  case PGSTAT_MTYPE_DUMMY:
4308  break;
4309 
4310  case PGSTAT_MTYPE_INQUIRY:
4311  pgstat_recv_inquiry((PgStat_MsgInquiry *) &msg, len);
4312  break;
4313 
4314  case PGSTAT_MTYPE_TABSTAT:
4315  pgstat_recv_tabstat((PgStat_MsgTabstat *) &msg, len);
4316  break;
4317 
4318  case PGSTAT_MTYPE_TABPURGE:
4320  break;
4321 
4322  case PGSTAT_MTYPE_DROPDB:
4323  pgstat_recv_dropdb((PgStat_MsgDropdb *) &msg, len);
4324  break;
4325 
4328  len);
4329  break;
4330 
4334  len);
4335  break;
4336 
4340  len);
4341  break;
4342 
4345  break;
4346 
4347  case PGSTAT_MTYPE_VACUUM:
4348  pgstat_recv_vacuum((PgStat_MsgVacuum *) &msg, len);
4349  break;
4350 
4351  case PGSTAT_MTYPE_ANALYZE:
4352  pgstat_recv_analyze((PgStat_MsgAnalyze *) &msg, len);
4353  break;
4354 
4355  case PGSTAT_MTYPE_ARCHIVER:
4357  break;
4358 
4359  case PGSTAT_MTYPE_BGWRITER:
4361  break;
4362 
4363  case PGSTAT_MTYPE_FUNCSTAT:
4365  break;
4366 
4369  break;
4370 
4373  break;
4374 
4375  case PGSTAT_MTYPE_DEADLOCK:
4377  break;
4378 
4379  case PGSTAT_MTYPE_TEMPFILE:
4381  break;
4382 
4383  default:
4384  break;
4385  }
4386  } /* end of inner message-processing loop */
4387 
4388  /* Sleep until there's something to do */
4389 #ifndef WIN32
4392  pgStatSock, -1L,
4394 #else
4395 
4396  /*
4397  * Windows, at least in its Windows Server 2003 R2 incarnation,
4398  * sometimes loses FD_READ events. Waking up and retrying the recv()
4399  * fixes that, so don't sleep indefinitely. This is a crock of the
4400  * first water, but until somebody wants to debug exactly what's
4401  * happening there, this is the best we can do. The two-second
4402  * timeout matches our pre-9.2 behavior, and needs to be short enough
4403  * to not provoke "using stale statistics" complaints from
4404  * backend_read_statsfile.
4405  */
4408  pgStatSock,
4409  2 * 1000L /* msec */ ,
4411 #endif
4412 
4413  /*
4414  * Emergency bailout if postmaster has died. This is to avoid the
4415  * necessity for manual cleanup of all postmaster children.
4416  */
4417  if (wr & WL_POSTMASTER_DEATH)
4418  break;
4419  } /* end of outer loop */
4420 
4421  /*
4422  * Save the final stats to reuse at next startup.
4423  */
4424  pgstat_write_statsfiles(true, true);
4425 
4426  exit(0);
4427 }
4428 
4429 
4430 /* SIGQUIT signal handler for collector process */
4431 static void
4433 {
4434  int save_errno = errno;
4435 
4436  need_exit = true;
4437  SetLatch(MyLatch);
4438 
4439  errno = save_errno;
4440 }
4441 
4442 /* SIGHUP handler for collector process */
4443 static void
4445 {
4446  int save_errno = errno;
4447 
4448  got_SIGHUP = true;
4449  SetLatch(MyLatch);
4450 
4451  errno = save_errno;
4452 }
4453 
4454 /*
4455  * Subroutine to clear stats in a database entry
4456  *
4457  * Tables and functions hashes are initialized to empty.
4458  */
4459 static void
4461 {
4462  HASHCTL hash_ctl;
4463 
4464  dbentry->n_xact_commit = 0;
4465  dbentry->n_xact_rollback = 0;
4466  dbentry->n_blocks_fetched = 0;
4467  dbentry->n_blocks_hit = 0;
4468  dbentry->n_tuples_returned = 0;
4469  dbentry->n_tuples_fetched = 0;
4470  dbentry->n_tuples_inserted = 0;
4471  dbentry->n_tuples_updated = 0;
4472  dbentry->n_tuples_deleted = 0;
4473  dbentry->last_autovac_time = 0;
4474  dbentry->n_conflict_tablespace = 0;
4475  dbentry->n_conflict_lock = 0;
4476  dbentry->n_conflict_snapshot = 0;
4477  dbentry->n_conflict_bufferpin = 0;
4478  dbentry->n_conflict_startup_deadlock = 0;
4479  dbentry->n_temp_files = 0;
4480  dbentry->n_temp_bytes = 0;
4481  dbentry->n_deadlocks = 0;
4482  dbentry->n_block_read_time = 0;
4483  dbentry->n_block_write_time = 0;
4484 
4486  dbentry->stats_timestamp = 0;
4487 
4488  memset(&hash_ctl, 0, sizeof(hash_ctl));
4489  hash_ctl.keysize = sizeof(Oid);
4490  hash_ctl.entrysize = sizeof(PgStat_StatTabEntry);
4491  dbentry->tables = hash_create("Per-database table",
4493  &hash_ctl,
4494  HASH_ELEM | HASH_BLOBS);
4495 
4496  hash_ctl.keysize = sizeof(Oid);
4497  hash_ctl.entrysize = sizeof(PgStat_StatFuncEntry);
4498  dbentry->functions = hash_create("Per-database function",
4500  &hash_ctl,
4501  HASH_ELEM | HASH_BLOBS);
4502 }
4503 
4504 /*
4505  * Lookup the hash table entry for the specified database. If no hash
4506  * table entry exists, initialize it, if the create parameter is true.
4507  * Else, return NULL.
4508  */
4509 static PgStat_StatDBEntry *
4510 pgstat_get_db_entry(Oid databaseid, bool create)
4511 {
4513  bool found;
4514  HASHACTION action = (create ? HASH_ENTER : HASH_FIND);
4515 
4516  /* Lookup or create the hash table entry for this database */
4517  result = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
4518  &databaseid,
4519  action, &found);
4520 
4521  if (!create && !found)
4522  return NULL;
4523 
4524  /*
4525  * If not found, initialize the new one. This creates empty hash tables
4526  * for tables and functions, too.
4527  */
4528  if (!found)
4529  reset_dbentry_counters(result);
4530 
4531  return result;
4532 }
4533 
4534 
4535 /*
4536  * Lookup the hash table entry for the specified table. If no hash
4537  * table entry exists, initialize it, if the create parameter is true.
4538  * Else, return NULL.
4539  */
4540 static PgStat_StatTabEntry *
4541 pgstat_get_tab_entry(PgStat_StatDBEntry *dbentry, Oid tableoid, bool create)
4542 {
4544  bool found;
4545  HASHACTION action = (create ? HASH_ENTER : HASH_FIND);
4546 
4547  /* Lookup or create the hash table entry for this table */
4548  result = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
4549  &tableoid,
4550  action, &found);
4551 
4552  if (!create && !found)
4553  return NULL;
4554 
4555  /* If not found, initialize the new one. */
4556  if (!found)
4557  {
4558  result->numscans = 0;
4559  result->tuples_returned = 0;
4560  result->tuples_fetched = 0;
4561  result->tuples_inserted = 0;
4562  result->tuples_updated = 0;
4563  result->tuples_deleted = 0;
4564  result->tuples_hot_updated = 0;
4565  result->n_live_tuples = 0;
4566  result->n_dead_tuples = 0;
4567  result->changes_since_analyze = 0;
4568  result->blocks_fetched = 0;
4569  result->blocks_hit = 0;
4570  result->vacuum_timestamp = 0;
4571  result->vacuum_count = 0;
4572  result->autovac_vacuum_timestamp = 0;
4573  result->autovac_vacuum_count = 0;
4574  result->analyze_timestamp = 0;
4575  result->analyze_count = 0;
4576  result->autovac_analyze_timestamp = 0;
4577  result->autovac_analyze_count = 0;
4578  }
4579 
4580  return result;
4581 }
4582 
4583 
4584 /* ----------
4585  * pgstat_write_statsfiles() -
4586  * Write the global statistics file, as well as requested DB files.
4587  *
4588  * 'permanent' specifies writing to the permanent files not temporary ones.
4589  * When true (happens only when the collector is shutting down), also remove
4590  * the temporary files so that backends starting up under a new postmaster
4591  * can't read old data before the new collector is ready.
4592  *
4593  * When 'allDbs' is false, only the requested databases (listed in
4594  * pending_write_requests) will be written; otherwise, all databases
4595  * will be written.
4596  * ----------
4597  */
4598 static void
4599 pgstat_write_statsfiles(bool permanent, bool allDbs)
4600 {
4601  HASH_SEQ_STATUS hstat;
4602  PgStat_StatDBEntry *dbentry;
4603  FILE *fpout;
4604  int32 format_id;
4605  const char *tmpfile = permanent ? PGSTAT_STAT_PERMANENT_TMPFILE : pgstat_stat_tmpname;
4606  const char *statfile = permanent ? PGSTAT_STAT_PERMANENT_FILENAME : pgstat_stat_filename;
4607  int rc;
4608 
4609  elog(DEBUG2, "writing stats file \"%s\"", statfile);
4610 
4611  /*
4612  * Open the statistics temp file to write out the current values.
4613  */
4614  fpout = AllocateFile(tmpfile, PG_BINARY_W);
4615  if (fpout == NULL)
4616  {
4617  ereport(LOG,
4619  errmsg("could not open temporary statistics file \"%s\": %m",
4620  tmpfile)));
4621  return;
4622  }
4623 
4624  /*
4625  * Set the timestamp of the stats file.
4626  */
4627  globalStats.stats_timestamp = GetCurrentTimestamp();
4628 
4629  /*
4630  * Write the file header --- currently just a format ID.
4631  */
4632  format_id = PGSTAT_FILE_FORMAT_ID;
4633  rc = fwrite(&format_id, sizeof(format_id), 1, fpout);
4634  (void) rc; /* we'll check for error with ferror */
4635 
4636  /*
4637  * Write global stats struct
4638  */
4639  rc = fwrite(&globalStats, sizeof(globalStats), 1, fpout);
4640  (void) rc; /* we'll check for error with ferror */
4641 
4642  /*
4643  * Write archiver stats struct
4644  */
4645  rc = fwrite(&archiverStats, sizeof(archiverStats), 1, fpout);
4646  (void) rc; /* we'll check for error with ferror */
4647 
4648  /*
4649  * Walk through the database table.
4650  */
4651  hash_seq_init(&hstat, pgStatDBHash);
4652  while ((dbentry = (PgStat_StatDBEntry *) hash_seq_search(&hstat)) != NULL)
4653  {
4654  /*
4655  * Write out the table and function stats for this DB into the
4656  * appropriate per-DB stat file, if required.
4657  */
4658  if (allDbs || pgstat_db_requested(dbentry->databaseid))
4659  {
4660  /* Make DB's timestamp consistent with the global stats */
4661  dbentry->stats_timestamp = globalStats.stats_timestamp;
4662 
4663  pgstat_write_db_statsfile(dbentry, permanent);
4664  }
4665 
4666  /*
4667  * Write out the DB entry. We don't write the tables or functions
4668  * pointers, since they're of no use to any other process.
4669  */
4670  fputc('D', fpout);
4671  rc = fwrite(dbentry, offsetof(PgStat_StatDBEntry, tables), 1, fpout);
4672  (void) rc; /* we'll check for error with ferror */
4673  }
4674 
4675  /*
4676  * No more output to be done. Close the temp file and replace the old
4677  * pgstat.stat with it. The ferror() check replaces testing for error
4678  * after each individual fputc or fwrite above.
4679  */
4680  fputc('E', fpout);
4681 
4682  if (ferror(fpout))
4683  {
4684  ereport(LOG,
4686  errmsg("could not write temporary statistics file \"%s\": %m",
4687  tmpfile)));
4688  FreeFile(fpout);
4689  unlink(tmpfile);
4690  }
4691  else if (FreeFile(fpout) < 0)
4692  {
4693  ereport(LOG,
4695  errmsg("could not close temporary statistics file \"%s\": %m",
4696  tmpfile)));
4697  unlink(tmpfile);
4698  }
4699  else if (rename(tmpfile, statfile) < 0)
4700  {
4701  ereport(LOG,
4703  errmsg("could not rename temporary statistics file \"%s\" to \"%s\": %m",
4704  tmpfile, statfile)));
4705  unlink(tmpfile);
4706  }
4707 
4708  if (permanent)
4710 
4711  /*
4712  * Now throw away the list of requests. Note that requests sent after we
4713  * started the write are still waiting on the network socket.
4714  */
4715  list_free(pending_write_requests);
4716  pending_write_requests = NIL;
4717 }
4718 
4719 /*
4720  * return the filename for a DB stat file; filename is the output buffer,
4721  * of length len.
4722  */
4723 static void
4724 get_dbstat_filename(bool permanent, bool tempname, Oid databaseid,
4725  char *filename, int len)
4726 {
4727  int printed;
4728 
4729  /* NB -- pgstat_reset_remove_files knows about the pattern this uses */
4730  printed = snprintf(filename, len, "%s/db_%u.%s",
4731  permanent ? PGSTAT_STAT_PERMANENT_DIRECTORY :
4733  databaseid,
4734  tempname ? "tmp" : "stat");
4735  if (printed > len)
4736  elog(ERROR, "overlength pgstat path");
4737 }
4738 
4739 /* ----------
4740  * pgstat_write_db_statsfile() -
4741  * Write the stat file for a single database.
4742  *
4743  * If writing to the permanent file (happens when the collector is
4744  * shutting down only), remove the temporary file so that backends
4745  * starting up under a new postmaster can't read the old data before
4746  * the new collector is ready.
4747  * ----------
4748  */
4749 static void
4751 {
4752  HASH_SEQ_STATUS tstat;
4753  HASH_SEQ_STATUS fstat;
4754  PgStat_StatTabEntry *tabentry;
4755  PgStat_StatFuncEntry *funcentry;
4756  FILE *fpout;
4757  int32 format_id;
4758  Oid dbid = dbentry->databaseid;
4759  int rc;
4760  char tmpfile[MAXPGPATH];
4761  char statfile[MAXPGPATH];
4762 
4763  get_dbstat_filename(permanent, true, dbid, tmpfile, MAXPGPATH);
4764  get_dbstat_filename(permanent, false, dbid, statfile, MAXPGPATH);
4765 
4766  elog(DEBUG2, "writing stats file \"%s\"", statfile);
4767 
4768  /*
4769  * Open the statistics temp file to write out the current values.
4770  */
4771  fpout = AllocateFile(tmpfile, PG_BINARY_W);
4772  if (fpout == NULL)
4773  {
4774  ereport(LOG,
4776  errmsg("could not open temporary statistics file \"%s\": %m",
4777  tmpfile)));
4778  return;
4779  }
4780 
4781  /*
4782  * Write the file header --- currently just a format ID.
4783  */
4784  format_id = PGSTAT_FILE_FORMAT_ID;
4785  rc = fwrite(&format_id, sizeof(format_id), 1, fpout);
4786  (void) rc; /* we'll check for error with ferror */
4787 
4788  /*
4789  * Walk through the database's access stats per table.
4790  */
4791  hash_seq_init(&tstat, dbentry->tables);
4792  while ((tabentry = (PgStat_StatTabEntry *) hash_seq_search(&tstat)) != NULL)
4793  {
4794  fputc('T', fpout);
4795  rc = fwrite(tabentry, sizeof(PgStat_StatTabEntry), 1, fpout);
4796  (void) rc; /* we'll check for error with ferror */
4797  }
4798 
4799  /*
4800  * Walk through the database's function stats table.
4801  */
4802  hash_seq_init(&fstat, dbentry->functions);
4803  while ((funcentry = (PgStat_StatFuncEntry *) hash_seq_search(&fstat)) != NULL)
4804  {
4805  fputc('F', fpout);
4806  rc = fwrite(funcentry, sizeof(PgStat_StatFuncEntry), 1, fpout);
4807  (void) rc; /* we'll check for error with ferror */
4808  }
4809 
4810  /*
4811  * No more output to be done. Close the temp file and replace the old
4812  * pgstat.stat with it. The ferror() check replaces testing for error
4813  * after each individual fputc or fwrite above.
4814  */
4815  fputc('E', fpout);
4816 
4817  if (ferror(fpout))
4818  {
4819  ereport(LOG,
4821  errmsg("could not write temporary statistics file \"%s\": %m",
4822  tmpfile)));
4823  FreeFile(fpout);
4824  unlink(tmpfile);
4825  }
4826  else if (FreeFile(fpout) < 0)
4827  {
4828  ereport(LOG,
4830  errmsg("could not close temporary statistics file \"%s\": %m",
4831  tmpfile)));
4832  unlink(tmpfile);
4833  }
4834  else if (rename(tmpfile, statfile) < 0)
4835  {
4836  ereport(LOG,
4838  errmsg("could not rename temporary statistics file \"%s\" to \"%s\": %m",
4839  tmpfile, statfile)));
4840  unlink(tmpfile);
4841  }
4842 
4843  if (permanent)
4844  {
4845  get_dbstat_filename(false, false, dbid, statfile, MAXPGPATH);
4846 
4847  elog(DEBUG2, "removing temporary stats file \"%s\"", statfile);
4848  unlink(statfile);
4849  }
4850 }
4851 
4852 /* ----------
4853  * pgstat_read_statsfiles() -
4854  *
4855  * Reads in some existing statistics collector files and returns the
4856  * databases hash table that is the top level of the data.
4857  *
4858  * If 'onlydb' is not InvalidOid, it means we only want data for that DB
4859  * plus the shared catalogs ("DB 0"). We'll still populate the DB hash
4860  * table for all databases, but we don't bother even creating table/function
4861  * hash tables for other databases.
4862  *
4863  * 'permanent' specifies reading from the permanent files not temporary ones.
4864  * When true (happens only when the collector is starting up), remove the
4865  * files after reading; the in-memory status is now authoritative, and the
4866  * files would be out of date in case somebody else reads them.
4867  *
4868  * If a 'deep' read is requested, table/function stats are read, otherwise
4869  * the table/function hash tables remain empty.
4870  * ----------
4871  */
4872 static HTAB *
4873 pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep)
4874 {
4875  PgStat_StatDBEntry *dbentry;
4876  PgStat_StatDBEntry dbbuf;
4877  HASHCTL hash_ctl;
4878  HTAB *dbhash;
4879  FILE *fpin;
4880  int32 format_id;
4881  bool found;
4882  const char *statfile = permanent ? PGSTAT_STAT_PERMANENT_FILENAME : pgstat_stat_filename;
4883 
4884  /*
4885  * The tables will live in pgStatLocalContext.
4886  */
4888 
4889  /*
4890  * Create the DB hashtable
4891  */
4892  memset(&hash_ctl, 0, sizeof(hash_ctl));
4893  hash_ctl.keysize = sizeof(Oid);
4894  hash_ctl.entrysize = sizeof(PgStat_StatDBEntry);
4895  hash_ctl.hcxt = pgStatLocalContext;
4896  dbhash = hash_create("Databases hash", PGSTAT_DB_HASH_SIZE, &hash_ctl,
4898 
4899  /*
4900  * Clear out global and archiver statistics so they start from zero in
4901  * case we can't load an existing statsfile.
4902  */
4903  memset(&globalStats, 0, sizeof(globalStats));
4904  memset(&archiverStats, 0, sizeof(archiverStats));
4905 
4906  /*
4907  * Set the current timestamp (will be kept only in case we can't load an
4908  * existing statsfile).
4909  */
4910  globalStats.stat_reset_timestamp = GetCurrentTimestamp();
4911  archiverStats.stat_reset_timestamp = globalStats.stat_reset_timestamp;
4912 
4913  /*
4914  * Try to open the stats file. If it doesn't exist, the backends simply
4915  * return zero for anything and the collector simply starts from scratch
4916  * with empty counters.
4917  *
4918  * ENOENT is a possibility if the stats collector is not running or has
4919  * not yet written the stats file the first time. Any other failure
4920  * condition is suspicious.
4921  */
4922  if ((fpin = AllocateFile(statfile, PG_BINARY_R)) == NULL)
4923  {
4924  if (errno != ENOENT)
4927  errmsg("could not open statistics file \"%s\": %m",
4928  statfile)));
4929  return dbhash;
4930  }
4931 
4932  /*
4933  * Verify it's of the expected format.
4934  */
4935  if (fread(&format_id, 1, sizeof(format_id), fpin) != sizeof(format_id) ||
4936  format_id != PGSTAT_FILE_FORMAT_ID)
4937  {
4939  (errmsg("corrupted statistics file \"%s\"", statfile)));
4940  goto done;
4941  }
4942 
4943  /*
4944  * Read global stats struct
4945  */
4946  if (fread(&globalStats, 1, sizeof(globalStats), fpin) != sizeof(globalStats))
4947  {
4949  (errmsg("corrupted statistics file \"%s\"", statfile)));
4950  memset(&globalStats, 0, sizeof(globalStats));
4951  goto done;
4952  }
4953 
4954  /*
4955  * In the collector, disregard the timestamp we read from the permanent
4956  * stats file; we should be willing to write a temp stats file immediately
4957  * upon the first request from any backend. This only matters if the old
4958  * file's timestamp is less than PGSTAT_STAT_INTERVAL ago, but that's not
4959  * an unusual scenario.
4960  */
4962  globalStats.stats_timestamp = 0;
4963 
4964  /*
4965  * Read archiver stats struct
4966  */
4967  if (fread(&archiverStats, 1, sizeof(archiverStats), fpin) != sizeof(archiverStats))
4968  {
4970  (errmsg("corrupted statistics file \"%s\"", statfile)));
4971  memset(&archiverStats, 0, sizeof(archiverStats));
4972  goto done;
4973  }
4974 
4975  /*
4976  * We found an existing collector stats file. Read it and put all the
4977  * hashtable entries into place.
4978  */
4979  for (;;)
4980  {
4981  switch (fgetc(fpin))
4982  {
4983  /*
4984  * 'D' A PgStat_StatDBEntry struct describing a database
4985  * follows.
4986  */
4987  case 'D':
4988  if (fread(&dbbuf, 1, offsetof(PgStat_StatDBEntry, tables),
4989  fpin) != offsetof(PgStat_StatDBEntry, tables))
4990  {
4992  (errmsg("corrupted statistics file \"%s\"",
4993  statfile)));
4994  goto done;
4995  }
4996 
4997  /*
4998  * Add to the DB hash
4999  */
5000  dbentry = (PgStat_StatDBEntry *) hash_search(dbhash,
5001  (void *) &dbbuf.databaseid,
5002  HASH_ENTER,
5003  &found);
5004  if (found)
5005  {
5007  (errmsg("corrupted statistics file \"%s\"",
5008  statfile)));
5009  goto done;
5010  }
5011 
5012  memcpy(dbentry, &dbbuf, sizeof(PgStat_StatDBEntry));
5013  dbentry->tables = NULL;
5014  dbentry->functions = NULL;
5015 
5016  /*
5017  * In the collector, disregard the timestamp we read from the
5018  * permanent stats file; we should be willing to write a temp
5019  * stats file immediately upon the first request from any
5020  * backend.
5021  */
5023  dbentry->stats_timestamp = 0;
5024 
5025  /*
5026  * Don't create tables/functions hashtables for uninteresting
5027  * databases.
5028  */
5029  if (onlydb != InvalidOid)
5030  {
5031  if (dbbuf.databaseid != onlydb &&
5032  dbbuf.databaseid != InvalidOid)
5033  break;
5034  }
5035 
5036  memset(&hash_ctl, 0, sizeof(hash_ctl));
5037  hash_ctl.keysize = sizeof(Oid);
5038  hash_ctl.entrysize = sizeof(PgStat_StatTabEntry);
5039  hash_ctl.hcxt = pgStatLocalContext;
5040  dbentry->tables = hash_create("Per-database table",
5042  &hash_ctl,
5044 
5045  hash_ctl.keysize = sizeof(Oid);
5046  hash_ctl.entrysize = sizeof(PgStat_StatFuncEntry);
5047  hash_ctl.hcxt = pgStatLocalContext;
5048  dbentry->functions = hash_create("Per-database function",
5050  &hash_ctl,
5052 
5053  /*
5054  * If requested, read the data from the database-specific
5055  * file. Otherwise we just leave the hashtables empty.
5056  */
5057  if (deep)
5059  dbentry->tables,
5060  dbentry->functions,
5061  permanent);
5062 
5063  break;
5064 
5065  case 'E':
5066  goto done;
5067 
5068  default:
5070  (errmsg("corrupted statistics file \"%s\"",
5071  statfile)));
5072  goto done;
5073  }
5074  }
5075 
5076 done:
5077  FreeFile(fpin);
5078 
5079  /* If requested to read the permanent file, also get rid of it. */
5080  if (permanent)
5081  {
5082  elog(DEBUG2, "removing permanent stats file \"%s\"", statfile);
5083  unlink(statfile);
5084  }
5085 
5086  return dbhash;
5087 }
5088 
5089 
5090 /* ----------
5091  * pgstat_read_db_statsfile() -
5092  *
5093  * Reads in the existing statistics collector file for the given database,
5094  * filling the passed-in tables and functions hash tables.
5095  *
5096  * As in pgstat_read_statsfiles, if the permanent file is requested, it is
5097  * removed after reading.
5098  *
5099  * Note: this code has the ability to skip storing per-table or per-function
5100  * data, if NULL is passed for the corresponding hashtable. That's not used
5101  * at the moment though.
5102  * ----------
5103  */
5104 static void
5105 pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash,
5106  bool permanent)
5107 {
5108  PgStat_StatTabEntry *tabentry;
5109  PgStat_StatTabEntry tabbuf;
5110  PgStat_StatFuncEntry funcbuf;
5111  PgStat_StatFuncEntry *funcentry;
5112  FILE *fpin;
5113  int32 format_id;
5114  bool found;
5115  char statfile[MAXPGPATH];
5116 
5117  get_dbstat_filename(permanent, false, databaseid, statfile, MAXPGPATH);
5118 
5119  /*
5120  * Try to open the stats file. If it doesn't exist, the backends simply
5121  * return zero for anything and the collector simply starts from scratch
5122  * with empty counters.
5123  *
5124  * ENOENT is a possibility if the stats collector is not running or has
5125  * not yet written the stats file the first time. Any other failure
5126  * condition is suspicious.
5127  */
5128  if ((fpin = AllocateFile(statfile, PG_BINARY_R)) == NULL)
5129  {
5130  if (errno != ENOENT)
5133  errmsg("could not open statistics file \"%s\": %m",
5134  statfile)));
5135  return;
5136  }
5137 
5138  /*
5139  * Verify it's of the expected format.
5140  */
5141  if (fread(&format_id, 1, sizeof(format_id), fpin) != sizeof(format_id) ||
5142  format_id != PGSTAT_FILE_FORMAT_ID)
5143  {
5145  (errmsg("corrupted statistics file \"%s\"", statfile)));
5146  goto done;
5147  }
5148 
5149  /*
5150  * We found an existing collector stats file. Read it and put all the
5151  * hashtable entries into place.
5152  */
5153  for (;;)
5154  {
5155  switch (fgetc(fpin))
5156  {
5157  /*
5158  * 'T' A PgStat_StatTabEntry follows.
5159  */
5160  case 'T':
5161  if (fread(&tabbuf, 1, sizeof(PgStat_StatTabEntry),
5162  fpin) != sizeof(PgStat_StatTabEntry))
5163  {
5165  (errmsg("corrupted statistics file \"%s\"",
5166  statfile)));
5167  goto done;
5168  }
5169 
5170  /*
5171  * Skip if table data not wanted.
5172  */
5173  if (tabhash == NULL)
5174  break;
5175 
5176  tabentry = (PgStat_StatTabEntry *) hash_search(tabhash,
5177  (void *) &tabbuf.tableid,
5178  HASH_ENTER, &found);
5179 
5180  if (found)
5181  {
5183  (errmsg("corrupted statistics file \"%s\"",
5184  statfile)));
5185  goto done;
5186  }
5187 
5188  memcpy(tabentry, &tabbuf, sizeof(tabbuf));
5189  break;
5190 
5191  /*
5192  * 'F' A PgStat_StatFuncEntry follows.
5193  */
5194  case 'F':
5195  if (fread(&funcbuf, 1, sizeof(PgStat_StatFuncEntry),
5196  fpin) != sizeof(PgStat_StatFuncEntry))
5197  {
5199  (errmsg("corrupted statistics file \"%s\"",
5200  statfile)));
5201  goto done;
5202  }
5203 
5204  /*
5205  * Skip if function data not wanted.
5206  */
5207  if (funchash == NULL)
5208  break;
5209 
5210  funcentry = (PgStat_StatFuncEntry *) hash_search(funchash,
5211  (void *) &funcbuf.functionid,
5212  HASH_ENTER, &found);
5213 
5214  if (found)
5215  {
5217  (errmsg("corrupted statistics file \"%s\"",
5218  statfile)));
5219  goto done;
5220  }
5221 
5222  memcpy(funcentry, &funcbuf, sizeof(funcbuf));
5223  break;
5224 
5225  /*
5226  * 'E' The EOF marker of a complete stats file.
5227  */
5228  case 'E':
5229  goto done;
5230 
5231  default:
5233  (errmsg("corrupted statistics file \"%s\"",
5234  statfile)));
5235  goto done;
5236  }
5237  }
5238 
5239 done:
5240  FreeFile(fpin);
5241 
5242  if (permanent)
5243  {
5244  elog(DEBUG2, "removing permanent stats file \"%s\"", statfile);
5245  unlink(statfile);
5246  }
5247 }
5248 
5249 /* ----------
5250  * pgstat_read_db_statsfile_timestamp() -
5251  *
5252  * Attempt to determine the timestamp of the last db statfile write.
5253  * Returns TRUE if successful; the timestamp is stored in *ts.
5254  *
5255  * This needs to be careful about handling databases for which no stats file
5256  * exists, such as databases without a stat entry or those not yet written:
5257  *
5258  * - if there's a database entry in the global file, return the corresponding
5259  * stats_timestamp value.
5260  *
5261  * - if there's no db stat entry (e.g. for a new or inactive database),
5262  * there's no stats_timestamp value, but also nothing to write so we return
5263  * the timestamp of the global statfile.
5264  * ----------
5265  */
5266 static bool
5267 pgstat_read_db_statsfile_timestamp(Oid databaseid, bool permanent,
5268  TimestampTz *ts)
5269 {
5270  PgStat_StatDBEntry dbentry;
5271  PgStat_GlobalStats myGlobalStats;
5272  PgStat_ArchiverStats myArchiverStats;
5273  FILE *fpin;
5274  int32 format_id;
5275  const char *statfile = permanent ? PGSTAT_STAT_PERMANENT_FILENAME : pgstat_stat_filename;
5276 
5277  /*
5278  * Try to open the stats file. As above, anything but ENOENT is worthy of
5279  * complaining about.
5280  */
5281  if ((fpin = AllocateFile(statfile, PG_BINARY_R)) == NULL)
5282  {
5283  if (errno != ENOENT)
5286  errmsg("could not open statistics file \"%s\": %m",
5287  statfile)));
5288  return false;
5289  }
5290 
5291  /*
5292  * Verify it's of the expected format.
5293  */
5294  if (fread(&format_id, 1, sizeof(format_id), fpin) != sizeof(format_id) ||
5295  format_id != PGSTAT_FILE_FORMAT_ID)
5296  {
5298  (errmsg("corrupted statistics file \"%s\"", statfile)));
5299  FreeFile(fpin);
5300  return false;
5301  }
5302 
5303  /*
5304  * Read global stats struct
5305  */
5306  if (fread(&myGlobalStats, 1, sizeof(myGlobalStats),
5307  fpin) != sizeof(myGlobalStats))
5308  {
5310  (errmsg("corrupted statistics file \"%s\"", statfile)));
5311  FreeFile(fpin);
5312  return false;
5313  }
5314 
5315  /*
5316  * Read archiver stats struct
5317  */
5318  if (fread(&myArchiverStats, 1, sizeof(myArchiverStats),
5319  fpin) != sizeof(myArchiverStats))
5320  {
5322  (errmsg("corrupted statistics file \"%s\"", statfile)));
5323  FreeFile(fpin);
5324  return false;
5325  }
5326 
5327  /* By default, we're going to return the timestamp of the global file. */
5328  *ts = myGlobalStats.stats_timestamp;
5329 
5330  /*
5331  * We found an existing collector stats file. Read it and look for a
5332  * record for the requested database. If found, use its timestamp.
5333  */
5334  for (;;)
5335  {
5336  switch (fgetc(fpin))
5337  {
5338  /*
5339  * 'D' A PgStat_StatDBEntry struct describing a database
5340  * follows.
5341  */
5342  case 'D':
5343  if (fread(&dbentry, 1, offsetof(PgStat_StatDBEntry, tables),
5344  fpin) != offsetof(PgStat_StatDBEntry, tables))
5345  {
5347  (errmsg("corrupted statistics file \"%s\"",
5348  statfile)));
5349  goto done;
5350  }
5351 
5352  /*
5353  * If this is the DB we're looking for, save its timestamp and
5354  * we're done.
5355  */
5356  if (dbentry.databaseid == databaseid)
5357  {
5358  *ts = dbentry.stats_timestamp;
5359  goto done;
5360  }
5361 
5362  break;
5363 
5364  case 'E':
5365  goto done;
5366 
5367  default:
5369  (errmsg("corrupted statistics file \"%s\"",
5370  statfile)));
5371  goto done;
5372  }
5373  }
5374 
5375 done:
5376  FreeFile(fpin);
5377  return true;
5378 }
5379 
5380 /*
5381  * If not already done, read the statistics collector stats file into
5382  * some hash tables. The results will be kept until pgstat_clear_snapshot()
5383  * is called (typically, at end of transaction).
5384  */
5385 static void
5387 {
5388  TimestampTz min_ts = 0;
5389  TimestampTz ref_ts = 0;
5390  Oid inquiry_db;
5391  int count;
5392 
5393  /* already read it? */
5394  if (pgStatDBHash)
5395  return;
5397 
5398  /*
5399  * In a normal backend, we check staleness of the data for our own DB, and
5400  * so we send MyDatabaseId in inquiry messages. In the autovac launcher,
5401  * check staleness of the shared-catalog data, and send InvalidOid in
5402  * inquiry messages so as not to force writing unnecessary data.
5403  */
5405  inquiry_db = InvalidOid;
5406  else
5407  inquiry_db = MyDatabaseId;
5408 
5409  /*
5410  * Loop until fresh enough stats file is available or we ran out of time.
5411  * The stats inquiry message is sent repeatedly in case collector drops
5412  * it; but not every single time, as that just swamps the collector.
5413  */
5414  for (count = 0; count < PGSTAT_POLL_LOOP_COUNT; count++)
5415  {
5416  bool ok;
5417  TimestampTz file_ts = 0;
5418  TimestampTz cur_ts;
5419 
5421 
5422  ok = pgstat_read_db_statsfile_timestamp(inquiry_db, false, &file_ts);
5423 
5424  cur_ts = GetCurrentTimestamp();
5425  /* Calculate min acceptable timestamp, if we didn't already */
5426  if (count == 0 || cur_ts < ref_ts)
5427  {
5428  /*
5429  * We set the minimum acceptable timestamp to PGSTAT_STAT_INTERVAL
5430  * msec before now. This indirectly ensures that the collector
5431  * needn't write the file more often than PGSTAT_STAT_INTERVAL. In
5432  * an autovacuum worker, however, we want a lower delay to avoid
5433  * using stale data, so we use PGSTAT_RETRY_DELAY (since the
5434  * number of workers is low, this shouldn't be a problem).
5435  *
5436  * We don't recompute min_ts after sleeping, except in the
5437  * unlikely case that cur_ts went backwards. So we might end up
5438  * accepting a file a bit older than PGSTAT_STAT_INTERVAL. In
5439  * practice that shouldn't happen, though, as long as the sleep
5440  * time is less than PGSTAT_STAT_INTERVAL; and we don't want to
5441  * tell the collector that our cutoff time is less than what we'd
5442  * actually accept.
5443  */
5444  ref_ts = cur_ts;
5446  min_ts = TimestampTzPlusMilliseconds(ref_ts,
5448  else
5449  min_ts = TimestampTzPlusMilliseconds(ref_ts,
5451  }
5452 
5453  /*
5454  * If the file timestamp is actually newer than cur_ts, we must have
5455  * had a clock glitch (system time went backwards) or there is clock
5456  * skew between our processor and the stats collector's processor.
5457  * Accept the file, but send an inquiry message anyway to make
5458  * pgstat_recv_inquiry do a sanity check on the collector's time.
5459  */
5460  if (ok && file_ts > cur_ts)
5461  {
5462  /*
5463  * A small amount of clock skew between processors isn't terribly
5464  * surprising, but a large difference is worth logging. We
5465  * arbitrarily define "large" as 1000 msec.
5466  */
5467  if (file_ts >= TimestampTzPlusMilliseconds(cur_ts, 1000))
5468  {
5469  char *filetime;
5470  char *mytime;
5471 
5472  /* Copy because timestamptz_to_str returns a static buffer */
5473  filetime = pstrdup(timestamptz_to_str(file_ts));
5474  mytime = pstrdup(timestamptz_to_str(cur_ts));
5475  elog(LOG, "stats collector's time %s is later than backend local time %s",
5476  filetime, mytime);
5477  pfree(filetime);
5478  pfree(mytime);
5479  }
5480 
5481  pgstat_send_inquiry(cur_ts, min_ts, inquiry_db);
5482  break;
5483  }
5484 
5485  /* Normal acceptance case: file is not older than cutoff time */
5486  if (ok && file_ts >= min_ts)
5487  break;
5488 
5489  /* Not there or too old, so kick the collector and wait a bit */
5490  if ((count % PGSTAT_INQ_LOOP_COUNT) == 0)
5491  pgstat_send_inquiry(cur_ts, min_ts, inquiry_db);
5492 
5493  pg_usleep(PGSTAT_RETRY_DELAY * 1000L);
5494  }
5495 
5496  if (count >= PGSTAT_POLL_LOOP_COUNT)
5497  ereport(LOG,
5498  (errmsg("using stale statistics instead of current ones "
5499  "because stats collector is not responding")));
5500 
5501  /*
5502  * Autovacuum launcher wants stats about all databases, but a shallow read
5503  * is sufficient. Regular backends want a deep read for just the tables
5504  * they can see (MyDatabaseId + shared catalogs).
5505  */
5507  pgStatDBHash = pgstat_read_statsfiles(InvalidOid, false, false);
5508  else
5509  pgStatDBHash = pgstat_read_statsfiles(MyDatabaseId, false, true);
5510 }
5511 
5512 
5513 /* ----------
5514  * pgstat_setup_memcxt() -
5515  *
5516  * Create pgStatLocalContext, if not already done.
5517  * ----------
5518  */
5519 static void
5521 {
5522  if (!pgStatLocalContext)
5523  pgStatLocalContext = AllocSetContextCreate(TopMemoryContext,
5524  "Statistics snapshot",
5526 }
5527 
5528 
5529 /* ----------
5530  * pgstat_clear_snapshot() -
5531  *
5532  * Discard any data collected in the current transaction. Any subsequent
5533  * request will cause new snapshots to be read.
5534  *
5535  * This is also invoked during transaction commit or abort to discard
5536  * the no-longer-wanted snapshot.
5537  * ----------
5538  */
5539 void
5541 {
5542  /* Release memory, if any was allocated */
5543  if (pgStatLocalContext)
5544  MemoryContextDelete(pgStatLocalContext);
5545 
5546  /* Reset variables */
5547  pgStatLocalContext = NULL;
5548  pgStatDBHash = NULL;
5549  localBackendStatusTable = NULL;
5550  localNumBackends = 0;
5551 }
5552 
5553 
5554 /* ----------
5555  * pgstat_recv_inquiry() -
5556  *
5557  * Process stat inquiry requests.
5558  * ----------
5559  */
5560 static void
5562 {
5563  PgStat_StatDBEntry *dbentry;
5564 
5565  elog(DEBUG2, "received inquiry for database %u", msg->databaseid);
5566 
5567  /*
5568  * If there's already a write request for this DB, there's nothing to do.
5569  *
5570  * Note that if a request is found, we return early and skip the below
5571  * check for clock skew. This is okay, since the only way for a DB
5572  * request to be present in the list is that we have been here since the
5573  * last write round. It seems sufficient to check for clock skew once per
5574  * write round.
5575  */
5576  if (list_member_oid(pending_write_requests, msg->databaseid))
5577  return;
5578 
5579  /*
5580  * Check to see if we last wrote this database at a time >= the requested
5581  * cutoff time. If so, this is a stale request that was generated before
5582  * we updated the DB file, and we don't need to do so again.
5583  *
5584  * If the requestor's local clock time is older than stats_timestamp, we
5585  * should suspect a clock glitch, ie system time going backwards; though
5586  * the more likely explanation is just delayed message receipt. It is
5587  * worth expending a GetCurrentTimestamp call to be sure, since a large
5588  * retreat in the system clock reading could otherwise cause us to neglect
5589  * to update the stats file for a long time.
5590  */
5591  dbentry = pgstat_get_db_entry(msg->databaseid, false);
5592  if (dbentry == NULL)
5593  {
5594  /*
5595  * We have no data for this DB. Enter a write request anyway so that
5596  * the global stats will get updated. This is needed to prevent
5597  * backend_read_statsfile from waiting for data that we cannot supply,
5598  * in the case of a new DB that nobody has yet reported any stats for.
5599  * See the behavior of pgstat_read_db_statsfile_timestamp.
5600  */
5601  }
5602  else if (msg->clock_time < dbentry->stats_timestamp)
5603  {
5604  TimestampTz cur_ts = GetCurrentTimestamp();
5605 
5606  if (cur_ts < dbentry->stats_timestamp)
5607  {
5608  /*
5609  * Sure enough, time went backwards. Force a new stats file write
5610  * to get back in sync; but first, log a complaint.
5611  */
5612  char *writetime;
5613  char *mytime;
5614 
5615  /* Copy because timestamptz_to_str returns a static buffer */
5616  writetime = pstrdup(timestamptz_to_str(dbentry->stats_timestamp));
5617  mytime = pstrdup(timestamptz_to_str(cur_ts));
5618  elog(LOG,
5619  "stats_timestamp %s is later than collector's time %s for database %u",
5620  writetime, mytime, dbentry->databaseid);
5621  pfree(writetime);
5622  pfree(mytime);
5623  }
5624  else
5625  {
5626  /*
5627  * Nope, it's just an old request. Assuming msg's clock_time is
5628  * >= its cutoff_time, it must be stale, so we can ignore it.
5629  */
5630  return;
5631  }
5632  }
5633  else if (msg->cutoff_time <= dbentry->stats_timestamp)
5634  {
5635  /* Stale request, ignore it */
5636  return;
5637  }
5638 
5639  /*
5640  * We need to write this DB, so create a request.
5641  */
5642  pending_write_requests = lappend_oid(pending_write_requests,
5643  msg->databaseid);
5644 }
5645 
5646 
5647 /* ----------
5648  * pgstat_recv_tabstat() -
5649  *
5650  * Count what the backend has done.
5651  * ----------
5652  */
5653 static void
5655 {
5656  PgStat_StatDBEntry *dbentry;
5657  PgStat_StatTabEntry *tabentry;
5658  int i;
5659  bool found;
5660 
5661  dbentry = pgstat_get_db_entry(msg->m_databaseid, true);
5662 
5663  /*
5664  * Update database-wide stats.
5665  */
5666  dbentry->n_xact_commit += (PgStat_Counter) (msg->m_xact_commit);
5667  dbentry->n_xact_rollback += (PgStat_Counter) (msg->m_xact_rollback);
5668  dbentry->n_block_read_time += msg->m_block_read_time;
5669  dbentry->n_block_write_time += msg->m_block_write_time;
5670 
5671  /*
5672  * Process all table entries in the message.
5673  */
5674  for (i = 0; i < msg->m_nentries; i++)
5675  {
5676  PgStat_TableEntry *tabmsg = &(msg->m_entry[i]);
5677 
5678  tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
5679  (void *) &(tabmsg->t_id),
5680  HASH_ENTER, &found);
5681 
5682  if (!found)
5683  {
5684  /*
5685  * If it's a new table entry, initialize counters to the values we
5686  * just got.
5687  */
5688  tabentry->numscans = tabmsg->t_counts.t_numscans;
5689  tabentry->tuples_returned = tabmsg->t_counts.t_tuples_returned;
5690  tabentry->tuples_fetched = tabmsg->t_counts.t_tuples_fetched;
5691  tabentry->tuples_inserted = tabmsg->t_counts.t_tuples_inserted;
5692  tabentry->tuples_updated = tabmsg->t_counts.t_tuples_updated;
5693  tabentry->tuples_deleted = tabmsg->t_counts.t_tuples_deleted;
5694  tabentry->tuples_hot_updated = tabmsg->t_counts.t_tuples_hot_updated;
5695  tabentry->n_live_tuples = tabmsg->t_counts.t_delta_live_tuples;
5696  tabentry->n_dead_tuples = tabmsg->t_counts.t_delta_dead_tuples;
5697  tabentry->changes_since_analyze = tabmsg->t_counts.t_changed_tuples;
5698  tabentry->blocks_fetched = tabmsg->t_counts.t_blocks_fetched;
5699  tabentry->blocks_hit = tabmsg->t_counts.t_blocks_hit;
5700 
5701  tabentry->vacuum_timestamp = 0;
5702  tabentry->vacuum_count = 0;
5703  tabentry->autovac_vacuum_timestamp = 0;
5704  tabentry->autovac_vacuum_count = 0;
5705  tabentry->analyze_timestamp = 0;
5706  tabentry->analyze_count = 0;
5707  tabentry->autovac_analyze_timestamp = 0;
5708  tabentry->autovac_analyze_count = 0;
5709  }
5710  else
5711  {
5712  /*
5713  * Otherwise add the values to the existing entry.
5714  */
5715  tabentry->numscans += tabmsg->t_counts.t_numscans;
5716  tabentry->tuples_returned += tabmsg->t_counts.t_tuples_returned;
5717  tabentry->tuples_fetched += tabmsg->t_counts.t_tuples_fetched;
5718  tabentry->tuples_inserted += tabmsg->t_counts.t_tuples_inserted;
5719  tabentry->tuples_updated += tabmsg->t_counts.t_tuples_updated;
5720  tabentry->tuples_deleted += tabmsg->t_counts.t_tuples_deleted;
5721  tabentry->tuples_hot_updated += tabmsg->t_counts.t_tuples_hot_updated;
5722  /* If table was truncated, first reset the live/dead counters */
5723  if (tabmsg->t_counts.t_truncated)
5724  {
5725  tabentry->n_live_tuples = 0;
5726  tabentry->n_dead_tuples = 0;
5727  }
5728  tabentry->n_live_tuples += tabmsg->t_counts.t_delta_live_tuples;
5729  tabentry->n_dead_tuples += tabmsg->t_counts.t_delta_dead_tuples;
5730  tabentry->changes_since_analyze += tabmsg->t_counts.t_changed_tuples;
5731  tabentry->blocks_fetched += tabmsg->t_counts.t_blocks_fetched;
5732  tabentry->blocks_hit += tabmsg->t_counts.t_blocks_hit;
5733  }
5734 
5735  /* Clamp n_live_tuples in case of negative delta_live_tuples */
5736  tabentry->n_live_tuples = Max(tabentry->n_live_tuples, 0);
5737  /* Likewise for n_dead_tuples */
5738  tabentry->n_dead_tuples = Max(tabentry->n_dead_tuples, 0);
5739 
5740  /*
5741  * Add per-table stats to the per-database entry, too.
5742  */
5743  dbentry->n_tuples_returned += tabmsg->t_counts.t_tuples_returned;
5744  dbentry->n_tuples_fetched += tabmsg->t_counts.t_tuples_fetched;
5745  dbentry->n_tuples_inserted += tabmsg->t_counts.t_tuples_inserted;
5746  dbentry->n_tuples_updated += tabmsg->t_counts.t_tuples_updated;
5747  dbentry->n_tuples_deleted += tabmsg->t_counts.t_tuples_deleted;
5748  dbentry->n_blocks_fetched += tabmsg->t_counts.t_blocks_fetched;
5749  dbentry->n_blocks_hit += tabmsg->t_counts.t_blocks_hit;
5750  }
5751 }
5752 
5753 
5754 /* ----------
5755  * pgstat_recv_tabpurge() -
5756  *
5757  * Arrange for dead table removal.
5758  * ----------
5759  */
5760 static void
5762 {
5763  PgStat_StatDBEntry *dbentry;
5764  int i;
5765 
5766  dbentry = pgstat_get_db_entry(msg->m_databaseid, false);
5767 
5768  /*
5769  * No need to purge if we don't even know the database.
5770  */
5771  if (!dbentry || !dbentry->tables)
5772  return;
5773 
5774  /*
5775  * Process all table entries in the message.
5776  */
5777  for (i = 0; i < msg->m_nentries; i++)
5778  {
5779  /* Remove from hashtable if present; we don't care if it's not. */
5780  (void) hash_search(dbentry->tables,
5781  (void *) &(msg->