PostgreSQL Source Code  git master
pgstat.c
Go to the documentation of this file.
1 /* ----------
2  * pgstat.c
3  *
4  * All the statistics collector stuff hacked up in one big, ugly file.
5  *
6  * TODO: - Separate collector, postmaster and backend stuff
7  * into different files.
8  *
9  * - Add some automatic call for pgstat vacuuming.
10  *
11  * - Add a pgstat config column to pg_database, so this
12  * entire thing can be enabled/disabled on a per db basis.
13  *
14  * Copyright (c) 2001-2018, PostgreSQL Global Development Group
15  *
16  * src/backend/postmaster/pgstat.c
17  * ----------
18  */
19 #include "postgres.h"
20 
21 #include <unistd.h>
22 #include <fcntl.h>
23 #include <sys/param.h>
24 #include <sys/time.h>
25 #include <sys/socket.h>
26 #include <netdb.h>
27 #include <netinet/in.h>
28 #include <arpa/inet.h>
29 #include <signal.h>
30 #include <time.h>
31 #ifdef HAVE_SYS_SELECT_H
32 #include <sys/select.h>
33 #endif
34 
35 #include "pgstat.h"
36 
37 #include "access/heapam.h"
38 #include "access/htup_details.h"
39 #include "access/transam.h"
40 #include "access/twophase_rmgr.h"
41 #include "access/xact.h"
42 #include "catalog/pg_database.h"
43 #include "catalog/pg_proc.h"
44 #include "common/ip.h"
45 #include "libpq/libpq.h"
46 #include "libpq/pqsignal.h"
47 #include "mb/pg_wchar.h"
48 #include "miscadmin.h"
49 #include "pg_trace.h"
50 #include "postmaster/autovacuum.h"
52 #include "postmaster/postmaster.h"
53 #include "replication/walsender.h"
54 #include "storage/backendid.h"
55 #include "storage/dsm.h"
56 #include "storage/fd.h"
57 #include "storage/ipc.h"
58 #include "storage/latch.h"
59 #include "storage/lmgr.h"
60 #include "storage/pg_shmem.h"
61 #include "storage/procsignal.h"
62 #include "storage/sinvaladt.h"
63 #include "utils/ascii.h"
64 #include "utils/guc.h"
65 #include "utils/memutils.h"
66 #include "utils/ps_status.h"
67 #include "utils/rel.h"
68 #include "utils/snapmgr.h"
69 #include "utils/timestamp.h"
70 #include "utils/tqual.h"
71 
72 
73 /* ----------
74  * Timer definitions.
75  * ----------
76  */
77 #define PGSTAT_STAT_INTERVAL 500 /* Minimum time between stats file
78  * updates; in milliseconds. */
79 
80 #define PGSTAT_RETRY_DELAY 10 /* How long to wait between checks for a
81  * new file; in milliseconds. */
82 
83 #define PGSTAT_MAX_WAIT_TIME 10000 /* Maximum time to wait for a stats
84  * file update; in milliseconds. */
85 
86 #define PGSTAT_INQ_INTERVAL 640 /* How often to ping the collector for a
87  * new file; in milliseconds. */
88 
89 #define PGSTAT_RESTART_INTERVAL 60 /* How often to attempt to restart a
90  * failed statistics collector; in
91  * seconds. */
92 
93 #define PGSTAT_POLL_LOOP_COUNT (PGSTAT_MAX_WAIT_TIME / PGSTAT_RETRY_DELAY)
94 #define PGSTAT_INQ_LOOP_COUNT (PGSTAT_INQ_INTERVAL / PGSTAT_RETRY_DELAY)
95 
96 /* Minimum receive buffer size for the collector's socket. */
97 #define PGSTAT_MIN_RCVBUF (100 * 1024)
98 
99 
100 /* ----------
101  * The initial size hints for the hash tables used in the collector.
102  * ----------
103  */
104 #define PGSTAT_DB_HASH_SIZE 16
105 #define PGSTAT_TAB_HASH_SIZE 512
106 #define PGSTAT_FUNCTION_HASH_SIZE 512
107 
108 
109 /* ----------
110  * Total number of backends including auxiliary
111  *
112  * We reserve a slot for each possible BackendId, plus one for each
113  * possible auxiliary process type. (This scheme assumes there is not
114  * more than one of any auxiliary process type at a time.) MaxBackends
115  * includes autovacuum workers and background workers as well.
116  * ----------
117  */
118 #define NumBackendStatSlots (MaxBackends + NUM_AUXPROCTYPES)
119 
120 
121 /* ----------
122  * GUC parameters
123  * ----------
124  */
126 bool pgstat_track_counts = false;
129 
130 /* ----------
131  * Built from GUC parameter
132  * ----------
133  */
135 char *pgstat_stat_filename = NULL;
136 char *pgstat_stat_tmpname = NULL;
137 
138 /*
139  * BgWriter global statistics counters (unused in other processes).
140  * Stored directly in a stats message structure so it can be sent
141  * without needing to copy things around. We assume this inits to zeroes.
142  */
144 
145 /* ----------
146  * Local data
147  * ----------
148  */
150 
152 
154 
155 static bool pgStatRunningInCollector = false;
156 
157 /*
158  * Structures in which backends store per-table info that's waiting to be
159  * sent to the collector.
160  *
161  * NOTE: once allocated, TabStatusArray structures are never moved or deleted
162  * for the life of the backend. Also, we zero out the t_id fields of the
163  * contained PgStat_TableStatus structs whenever they are not actively in use.
164  * This allows relcache pgstat_info pointers to be treated as long-lived data,
165  * avoiding repeated searches in pgstat_initstats() when a relation is
166  * repeatedly opened during a transaction.
167  */
168 #define TABSTAT_QUANTUM 100 /* we alloc this many at a time */
169 
170 typedef struct TabStatusArray
171 {
172  struct TabStatusArray *tsa_next; /* link to next array, if any */
173  int tsa_used; /* # entries currently used */
176 
178 
179 /*
180  * pgStatTabHash entry: map from relation OID to PgStat_TableStatus pointer
181  */
182 typedef struct TabStatHashEntry
183 {
187 
188 /*
189  * Hash table for O(1) t_id -> tsa_entry lookup
190  */
191 static HTAB *pgStatTabHash = NULL;
192 
193 /*
194  * Backends store per-function info that's waiting to be sent to the collector
195  * in this hash table (indexed by function OID).
196  */
197 static HTAB *pgStatFunctions = NULL;
198 
199 /*
200  * Indicates if backend has some function stats that it hasn't yet
201  * sent to the collector.
202  */
203 static bool have_function_stats = false;
204 
205 /*
206  * Tuple insertion/deletion counts for an open transaction can't be propagated
207  * into PgStat_TableStatus counters until we know if it is going to commit
208  * or abort. Hence, we keep these counts in per-subxact structs that live
209  * in TopTransactionContext. This data structure is designed on the assumption
210  * that subxacts won't usually modify very many tables.
211  */
212 typedef struct PgStat_SubXactStatus
213 {
214  int nest_level; /* subtransaction nest level */
215  struct PgStat_SubXactStatus *prev; /* higher-level subxact if any */
216  PgStat_TableXactStatus *first; /* head of list for this subxact */
218 
220 
221 static int pgStatXactCommit = 0;
222 static int pgStatXactRollback = 0;
225 
226 /* Record that's written to 2PC state file when pgstat state is persisted */
227 typedef struct TwoPhasePgStatRecord
228 {
229  PgStat_Counter tuples_inserted; /* tuples inserted in xact */
230  PgStat_Counter tuples_updated; /* tuples updated in xact */
231  PgStat_Counter tuples_deleted; /* tuples deleted in xact */
232  PgStat_Counter inserted_pre_trunc; /* tuples inserted prior to truncate */
233  PgStat_Counter updated_pre_trunc; /* tuples updated prior to truncate */
234  PgStat_Counter deleted_pre_trunc; /* tuples deleted prior to truncate */
235  Oid t_id; /* table's OID */
236  bool t_shared; /* is it a shared catalog? */
237  bool t_truncated; /* was the relation truncated? */
239 
240 /*
241  * Info about current "snapshot" of stats file
242  */
244 static HTAB *pgStatDBHash = NULL;
245 
246 /* Status for backends including auxiliary */
248 
249 /* Total number of backends including auxiliary */
250 static int localNumBackends = 0;
251 
252 /*
253  * Cluster wide statistics, kept in the stats collector.
254  * Contains statistics that are not collected per database
255  * or per table.
256  */
259 
260 /*
261  * List of OIDs of databases we need to write out. If an entry is InvalidOid,
262  * it means to write only the shared-catalog stats ("DB 0"); otherwise, we
263  * will write both that DB's data and the shared stats.
264  */
266 
267 /* Signal handler flags */
268 static volatile bool need_exit = false;
269 static volatile bool got_SIGHUP = false;
270 
271 /*
272  * Total time charged to functions so far in the current backend.
273  * We use this to help separate "self" and "other" time charges.
274  * (We assume this initializes to zero.)
275  */
277 
278 
279 /* ----------
280  * Local function forward declarations
281  * ----------
282  */
283 #ifdef EXEC_BACKEND
284 static pid_t pgstat_forkexec(void);
285 #endif
286 
287 NON_EXEC_STATIC void PgstatCollectorMain(int argc, char *argv[]) pg_attribute_noreturn();
288 static void pgstat_exit(SIGNAL_ARGS);
289 static void pgstat_beshutdown_hook(int code, Datum arg);
291 
292 static PgStat_StatDBEntry *pgstat_get_db_entry(Oid databaseid, bool create);
294  Oid tableoid, bool create);
295 static void pgstat_write_statsfiles(bool permanent, bool allDbs);
296 static void pgstat_write_db_statsfile(PgStat_StatDBEntry *dbentry, bool permanent);
297 static HTAB *pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep);
298 static void pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash, bool permanent);
299 static void backend_read_statsfile(void);
300 static void pgstat_read_current_status(void);
301 
302 static bool pgstat_write_statsfile_needed(void);
303 static bool pgstat_db_requested(Oid databaseid);
304 
305 static void pgstat_send_tabstat(PgStat_MsgTabstat *tsmsg);
306 static void pgstat_send_funcstats(void);
307 static HTAB *pgstat_collect_oids(Oid catalogid);
308 
309 static PgStat_TableStatus *get_tabstat_entry(Oid rel_id, bool isshared);
310 
311 static void pgstat_setup_memcxt(void);
312 
313 static const char *pgstat_get_wait_activity(WaitEventActivity w);
314 static const char *pgstat_get_wait_client(WaitEventClient w);
315 static const char *pgstat_get_wait_ipc(WaitEventIPC w);
316 static const char *pgstat_get_wait_timeout(WaitEventTimeout w);
317 static const char *pgstat_get_wait_io(WaitEventIO w);
318 
319 static void pgstat_setheader(PgStat_MsgHdr *hdr, StatMsgType mtype);
320 static void pgstat_send(void *msg, int len);
321 
322 static void pgstat_recv_inquiry(PgStat_MsgInquiry *msg, int len);
323 static void pgstat_recv_tabstat(PgStat_MsgTabstat *msg, int len);
324 static void pgstat_recv_tabpurge(PgStat_MsgTabpurge *msg, int len);
325 static void pgstat_recv_dropdb(PgStat_MsgDropdb *msg, int len);
326 static void pgstat_recv_resetcounter(PgStat_MsgResetcounter *msg, int len);
329 static void pgstat_recv_autovac(PgStat_MsgAutovacStart *msg, int len);
330 static void pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len);
331 static void pgstat_recv_analyze(PgStat_MsgAnalyze *msg, int len);
332 static void pgstat_recv_archiver(PgStat_MsgArchiver *msg, int len);
333 static void pgstat_recv_bgwriter(PgStat_MsgBgWriter *msg, int len);
334 static void pgstat_recv_funcstat(PgStat_MsgFuncstat *msg, int len);
335 static void pgstat_recv_funcpurge(PgStat_MsgFuncpurge *msg, int len);
337 static void pgstat_recv_deadlock(PgStat_MsgDeadlock *msg, int len);
338 static void pgstat_recv_tempfile(PgStat_MsgTempFile *msg, int len);
339 
340 /* ------------------------------------------------------------
341  * Public functions called from postmaster follow
342  * ------------------------------------------------------------
343  */
344 
345 /* ----------
346  * pgstat_init() -
347  *
348  * Called from postmaster at startup. Create the resources required
349  * by the statistics collector process. If unable to do so, do not
350  * fail --- better to let the postmaster start with stats collection
351  * disabled.
352  * ----------
353  */
354 void
356 {
357  ACCEPT_TYPE_ARG3 alen;
358  struct addrinfo *addrs = NULL,
359  *addr,
360  hints;
361  int ret;
362  fd_set rset;
363  struct timeval tv;
364  char test_byte;
365  int sel_res;
366  int tries = 0;
367 
368 #define TESTBYTEVAL ((char) 199)
369 
370  /*
371  * This static assertion verifies that we didn't mess up the calculations
372  * involved in selecting maximum payload sizes for our UDP messages.
373  * Because the only consequence of overrunning PGSTAT_MAX_MSG_SIZE would
374  * be silent performance loss from fragmentation, it seems worth having a
375  * compile-time cross-check that we didn't.
376  */
378  "maximum stats message size exceeds PGSTAT_MAX_MSG_SIZE");
379 
380  /*
381  * Create the UDP socket for sending and receiving statistic messages
382  */
383  hints.ai_flags = AI_PASSIVE;
384  hints.ai_family = AF_UNSPEC;
385  hints.ai_socktype = SOCK_DGRAM;
386  hints.ai_protocol = 0;
387  hints.ai_addrlen = 0;
388  hints.ai_addr = NULL;
389  hints.ai_canonname = NULL;
390  hints.ai_next = NULL;
391  ret = pg_getaddrinfo_all("localhost", NULL, &hints, &addrs);
392  if (ret || !addrs)
393  {
394  ereport(LOG,
395  (errmsg("could not resolve \"localhost\": %s",
396  gai_strerror(ret))));
397  goto startup_failed;
398  }
399 
400  /*
401  * On some platforms, pg_getaddrinfo_all() may return multiple addresses
402  * only one of which will actually work (eg, both IPv6 and IPv4 addresses
403  * when kernel will reject IPv6). Worse, the failure may occur at the
404  * bind() or perhaps even connect() stage. So we must loop through the
405  * results till we find a working combination. We will generate LOG
406  * messages, but no error, for bogus combinations.
407  */
408  for (addr = addrs; addr; addr = addr->ai_next)
409  {
410 #ifdef HAVE_UNIX_SOCKETS
411  /* Ignore AF_UNIX sockets, if any are returned. */
412  if (addr->ai_family == AF_UNIX)
413  continue;
414 #endif
415 
416  if (++tries > 1)
417  ereport(LOG,
418  (errmsg("trying another address for the statistics collector")));
419 
420  /*
421  * Create the socket.
422  */
423  if ((pgStatSock = socket(addr->ai_family, SOCK_DGRAM, 0)) == PGINVALID_SOCKET)
424  {
425  ereport(LOG,
427  errmsg("could not create socket for statistics collector: %m")));
428  continue;
429  }
430 
431  /*
432  * Bind it to a kernel assigned port on localhost and get the assigned
433  * port via getsockname().
434  */
435  if (bind(pgStatSock, addr->ai_addr, addr->ai_addrlen) < 0)
436  {
437  ereport(LOG,
439  errmsg("could not bind socket for statistics collector: %m")));
442  continue;
443  }
444 
445  alen = sizeof(pgStatAddr);
446  if (getsockname(pgStatSock, (struct sockaddr *) &pgStatAddr, &alen) < 0)
447  {
448  ereport(LOG,
450  errmsg("could not get address of socket for statistics collector: %m")));
453  continue;
454  }
455 
456  /*
457  * Connect the socket to its own address. This saves a few cycles by
458  * not having to respecify the target address on every send. This also
459  * provides a kernel-level check that only packets from this same
460  * address will be received.
461  */
462  if (connect(pgStatSock, (struct sockaddr *) &pgStatAddr, alen) < 0)
463  {
464  ereport(LOG,
466  errmsg("could not connect socket for statistics collector: %m")));
469  continue;
470  }
471 
472  /*
473  * Try to send and receive a one-byte test message on the socket. This
474  * is to catch situations where the socket can be created but will not
475  * actually pass data (for instance, because kernel packet filtering
476  * rules prevent it).
477  */
478  test_byte = TESTBYTEVAL;
479 
480 retry1:
481  if (send(pgStatSock, &test_byte, 1, 0) != 1)
482  {
483  if (errno == EINTR)
484  goto retry1; /* if interrupted, just retry */
485  ereport(LOG,
487  errmsg("could not send test message on socket for statistics collector: %m")));
490  continue;
491  }
492 
493  /*
494  * There could possibly be a little delay before the message can be
495  * received. We arbitrarily allow up to half a second before deciding
496  * it's broken.
497  */
498  for (;;) /* need a loop to handle EINTR */
499  {
500  FD_ZERO(&rset);
501  FD_SET(pgStatSock, &rset);
502 
503  tv.tv_sec = 0;
504  tv.tv_usec = 500000;
505  sel_res = select(pgStatSock + 1, &rset, NULL, NULL, &tv);
506  if (sel_res >= 0 || errno != EINTR)
507  break;
508  }
509  if (sel_res < 0)
510  {
511  ereport(LOG,
513  errmsg("select() failed in statistics collector: %m")));
516  continue;
517  }
518  if (sel_res == 0 || !FD_ISSET(pgStatSock, &rset))
519  {
520  /*
521  * This is the case we actually think is likely, so take pains to
522  * give a specific message for it.
523  *
524  * errno will not be set meaningfully here, so don't use it.
525  */
526  ereport(LOG,
527  (errcode(ERRCODE_CONNECTION_FAILURE),
528  errmsg("test message did not get through on socket for statistics collector")));
531  continue;
532  }
533 
534  test_byte++; /* just make sure variable is changed */
535 
536 retry2:
537  if (recv(pgStatSock, &test_byte, 1, 0) != 1)
538  {
539  if (errno == EINTR)
540  goto retry2; /* if interrupted, just retry */
541  ereport(LOG,
543  errmsg("could not receive test message on socket for statistics collector: %m")));
546  continue;
547  }
548 
549  if (test_byte != TESTBYTEVAL) /* strictly paranoia ... */
550  {
551  ereport(LOG,
552  (errcode(ERRCODE_INTERNAL_ERROR),
553  errmsg("incorrect test message transmission on socket for statistics collector")));
556  continue;
557  }
558 
559  /* If we get here, we have a working socket */
560  break;
561  }
562 
563  /* Did we find a working address? */
564  if (!addr || pgStatSock == PGINVALID_SOCKET)
565  goto startup_failed;
566 
567  /*
568  * Set the socket to non-blocking IO. This ensures that if the collector
569  * falls behind, statistics messages will be discarded; backends won't
570  * block waiting to send messages to the collector.
571  */
573  {
574  ereport(LOG,
576  errmsg("could not set statistics collector socket to nonblocking mode: %m")));
577  goto startup_failed;
578  }
579 
580  /*
581  * Try to ensure that the socket's receive buffer is at least
582  * PGSTAT_MIN_RCVBUF bytes, so that it won't easily overflow and lose
583  * data. Use of UDP protocol means that we are willing to lose data under
584  * heavy load, but we don't want it to happen just because of ridiculously
585  * small default buffer sizes (such as 8KB on older Windows versions).
586  */
587  {
588  int old_rcvbuf;
589  int new_rcvbuf;
590  ACCEPT_TYPE_ARG3 rcvbufsize = sizeof(old_rcvbuf);
591 
592  if (getsockopt(pgStatSock, SOL_SOCKET, SO_RCVBUF,
593  (char *) &old_rcvbuf, &rcvbufsize) < 0)
594  {
595  elog(LOG, "getsockopt(SO_RCVBUF) failed: %m");
596  /* if we can't get existing size, always try to set it */
597  old_rcvbuf = 0;
598  }
599 
600  new_rcvbuf = PGSTAT_MIN_RCVBUF;
601  if (old_rcvbuf < new_rcvbuf)
602  {
603  if (setsockopt(pgStatSock, SOL_SOCKET, SO_RCVBUF,
604  (char *) &new_rcvbuf, sizeof(new_rcvbuf)) < 0)
605  elog(LOG, "setsockopt(SO_RCVBUF) failed: %m");
606  }
607  }
608 
609  pg_freeaddrinfo_all(hints.ai_family, addrs);
610 
611  return;
612 
613 startup_failed:
614  ereport(LOG,
615  (errmsg("disabling statistics collector for lack of working socket")));
616 
617  if (addrs)
618  pg_freeaddrinfo_all(hints.ai_family, addrs);
619 
623 
624  /*
625  * Adjust GUC variables to suppress useless activity, and for debugging
626  * purposes (seeing track_counts off is a clue that we failed here). We
627  * use PGC_S_OVERRIDE because there is no point in trying to turn it back
628  * on from postgresql.conf without a restart.
629  */
630  SetConfigOption("track_counts", "off", PGC_INTERNAL, PGC_S_OVERRIDE);
631 }
632 
633 /*
634  * subroutine for pgstat_reset_all
635  */
636 static void
638 {
639  DIR *dir;
640  struct dirent *entry;
641  char fname[MAXPGPATH * 2];
642 
643  dir = AllocateDir(directory);
644  while ((entry = ReadDir(dir, directory)) != NULL)
645  {
646  int nchars;
647  Oid tmp_oid;
648 
649  /*
650  * Skip directory entries that don't match the file names we write.
651  * See get_dbstat_filename for the database-specific pattern.
652  */
653  if (strncmp(entry->d_name, "global.", 7) == 0)
654  nchars = 7;
655  else
656  {
657  nchars = 0;
658  (void) sscanf(entry->d_name, "db_%u.%n",
659  &tmp_oid, &nchars);
660  if (nchars <= 0)
661  continue;
662  /* %u allows leading whitespace, so reject that */
663  if (strchr("0123456789", entry->d_name[3]) == NULL)
664  continue;
665  }
666 
667  if (strcmp(entry->d_name + nchars, "tmp") != 0 &&
668  strcmp(entry->d_name + nchars, "stat") != 0)
669  continue;
670 
671  snprintf(fname, sizeof(fname), "%s/%s", directory,
672  entry->d_name);
673  unlink(fname);
674  }
675  FreeDir(dir);
676 }
677 
678 /*
679  * pgstat_reset_all() -
680  *
681  * Remove the stats files. This is currently used only if WAL
682  * recovery is needed after a crash.
683  */
684 void
686 {
689 }
690 
691 #ifdef EXEC_BACKEND
692 
693 /*
694  * pgstat_forkexec() -
695  *
696  * Format up the arglist for, then fork and exec, statistics collector process
697  */
698 static pid_t
699 pgstat_forkexec(void)
700 {
701  char *av[10];
702  int ac = 0;
703 
704  av[ac++] = "postgres";
705  av[ac++] = "--forkcol";
706  av[ac++] = NULL; /* filled in by postmaster_forkexec */
707 
708  av[ac] = NULL;
709  Assert(ac < lengthof(av));
710 
711  return postmaster_forkexec(ac, av);
712 }
713 #endif /* EXEC_BACKEND */
714 
715 
716 /*
717  * pgstat_start() -
718  *
719  * Called from postmaster at startup or after an existing collector
720  * died. Attempt to fire up a fresh statistics collector.
721  *
722  * Returns PID of child process, or 0 if fail.
723  *
724  * Note: if fail, we will be called again from the postmaster main loop.
725  */
726 int
728 {
729  time_t curtime;
730  pid_t pgStatPid;
731 
732  /*
733  * Check that the socket is there, else pgstat_init failed and we can do
734  * nothing useful.
735  */
737  return 0;
738 
739  /*
740  * Do nothing if too soon since last collector start. This is a safety
741  * valve to protect against continuous respawn attempts if the collector
742  * is dying immediately at launch. Note that since we will be re-called
743  * from the postmaster main loop, we will get another chance later.
744  */
745  curtime = time(NULL);
746  if ((unsigned int) (curtime - last_pgstat_start_time) <
747  (unsigned int) PGSTAT_RESTART_INTERVAL)
748  return 0;
749  last_pgstat_start_time = curtime;
750 
751  /*
752  * Okay, fork off the collector.
753  */
754 #ifdef EXEC_BACKEND
755  switch ((pgStatPid = pgstat_forkexec()))
756 #else
757  switch ((pgStatPid = fork_process()))
758 #endif
759  {
760  case -1:
761  ereport(LOG,
762  (errmsg("could not fork statistics collector: %m")));
763  return 0;
764 
765 #ifndef EXEC_BACKEND
766  case 0:
767  /* in postmaster child ... */
769 
770  /* Close the postmaster's sockets */
771  ClosePostmasterPorts(false);
772 
773  /* Drop our connection to postmaster's shared memory, as well */
774  dsm_detach_all();
776 
777  PgstatCollectorMain(0, NULL);
778  break;
779 #endif
780 
781  default:
782  return (int) pgStatPid;
783  }
784 
785  /* shouldn't get here */
786  return 0;
787 }
788 
789 void
791 {
793 }
794 
795 /* ------------------------------------------------------------
796  * Public functions used by backends follow
797  *------------------------------------------------------------
798  */
799 
800 
801 /* ----------
802  * pgstat_report_stat() -
803  *
804  * Must be called by processes that performs DML: tcop/postgres.c, logical
805  * receiver processes, SPI worker, etc. to send the so far collected
806  * per-table and function usage statistics to the collector. Note that this
807  * is called only when not within a transaction, so it is fair to use
808  * transaction stop time as an approximation of current time.
809  * ----------
810  */
811 void
813 {
814  /* we assume this inits to all zeroes: */
815  static const PgStat_TableCounts all_zeroes;
816  static TimestampTz last_report = 0;
817 
819  PgStat_MsgTabstat regular_msg;
820  PgStat_MsgTabstat shared_msg;
821  TabStatusArray *tsa;
822  int i;
823 
824  /* Don't expend a clock check if nothing to do */
825  if ((pgStatTabList == NULL || pgStatTabList->tsa_used == 0) &&
826  pgStatXactCommit == 0 && pgStatXactRollback == 0 &&
828  return;
829 
830  /*
831  * Don't send a message unless it's been at least PGSTAT_STAT_INTERVAL
832  * msec since we last sent one, or the caller wants to force stats out.
833  */
835  if (!force &&
837  return;
838  last_report = now;
839 
840  /*
841  * Destroy pgStatTabHash before we start invalidating PgStat_TableEntry
842  * entries it points to. (Should we fail partway through the loop below,
843  * it's okay to have removed the hashtable already --- the only
844  * consequence is we'd get multiple entries for the same table in the
845  * pgStatTabList, and that's safe.)
846  */
847  if (pgStatTabHash)
848  hash_destroy(pgStatTabHash);
849  pgStatTabHash = NULL;
850 
851  /*
852  * Scan through the TabStatusArray struct(s) to find tables that actually
853  * have counts, and build messages to send. We have to separate shared
854  * relations from regular ones because the databaseid field in the message
855  * header has to depend on that.
856  */
857  regular_msg.m_databaseid = MyDatabaseId;
858  shared_msg.m_databaseid = InvalidOid;
859  regular_msg.m_nentries = 0;
860  shared_msg.m_nentries = 0;
861 
862  for (tsa = pgStatTabList; tsa != NULL; tsa = tsa->tsa_next)
863  {
864  for (i = 0; i < tsa->tsa_used; i++)
865  {
866  PgStat_TableStatus *entry = &tsa->tsa_entries[i];
867  PgStat_MsgTabstat *this_msg;
868  PgStat_TableEntry *this_ent;
869 
870  /* Shouldn't have any pending transaction-dependent counts */
871  Assert(entry->trans == NULL);
872 
873  /*
874  * Ignore entries that didn't accumulate any actual counts, such
875  * as indexes that were opened by the planner but not used.
876  */
877  if (memcmp(&entry->t_counts, &all_zeroes,
878  sizeof(PgStat_TableCounts)) == 0)
879  continue;
880 
881  /*
882  * OK, insert data into the appropriate message, and send if full.
883  */
884  this_msg = entry->t_shared ? &shared_msg : &regular_msg;
885  this_ent = &this_msg->m_entry[this_msg->m_nentries];
886  this_ent->t_id = entry->t_id;
887  memcpy(&this_ent->t_counts, &entry->t_counts,
888  sizeof(PgStat_TableCounts));
889  if (++this_msg->m_nentries >= PGSTAT_NUM_TABENTRIES)
890  {
891  pgstat_send_tabstat(this_msg);
892  this_msg->m_nentries = 0;
893  }
894  }
895  /* zero out TableStatus structs after use */
896  MemSet(tsa->tsa_entries, 0,
897  tsa->tsa_used * sizeof(PgStat_TableStatus));
898  tsa->tsa_used = 0;
899  }
900 
901  /*
902  * Send partial messages. Make sure that any pending xact commit/abort
903  * gets counted, even if there are no table stats to send.
904  */
905  if (regular_msg.m_nentries > 0 ||
907  pgstat_send_tabstat(&regular_msg);
908  if (shared_msg.m_nentries > 0)
909  pgstat_send_tabstat(&shared_msg);
910 
911  /* Now, send function statistics */
913 }
914 
915 /*
916  * Subroutine for pgstat_report_stat: finish and send a tabstat message
917  */
918 static void
920 {
921  int n;
922  int len;
923 
924  /* It's unlikely we'd get here with no socket, but maybe not impossible */
926  return;
927 
928  /*
929  * Report and reset accumulated xact commit/rollback and I/O timings
930  * whenever we send a normal tabstat message
931  */
932  if (OidIsValid(tsmsg->m_databaseid))
933  {
938  pgStatXactCommit = 0;
939  pgStatXactRollback = 0;
942  }
943  else
944  {
945  tsmsg->m_xact_commit = 0;
946  tsmsg->m_xact_rollback = 0;
947  tsmsg->m_block_read_time = 0;
948  tsmsg->m_block_write_time = 0;
949  }
950 
951  n = tsmsg->m_nentries;
952  len = offsetof(PgStat_MsgTabstat, m_entry[0]) +
953  n * sizeof(PgStat_TableEntry);
954 
956  pgstat_send(tsmsg, len);
957 }
958 
959 /*
960  * Subroutine for pgstat_report_stat: populate and send a function stat message
961  */
962 static void
964 {
965  /* we assume this inits to all zeroes: */
966  static const PgStat_FunctionCounts all_zeroes;
967 
968  PgStat_MsgFuncstat msg;
970  HASH_SEQ_STATUS fstat;
971 
972  if (pgStatFunctions == NULL)
973  return;
974 
977  msg.m_nentries = 0;
978 
979  hash_seq_init(&fstat, pgStatFunctions);
980  while ((entry = (PgStat_BackendFunctionEntry *) hash_seq_search(&fstat)) != NULL)
981  {
982  PgStat_FunctionEntry *m_ent;
983 
984  /* Skip it if no counts accumulated since last time */
985  if (memcmp(&entry->f_counts, &all_zeroes,
986  sizeof(PgStat_FunctionCounts)) == 0)
987  continue;
988 
989  /* need to convert format of time accumulators */
990  m_ent = &msg.m_entry[msg.m_nentries];
991  m_ent->f_id = entry->f_id;
992  m_ent->f_numcalls = entry->f_counts.f_numcalls;
995 
996  if (++msg.m_nentries >= PGSTAT_NUM_FUNCENTRIES)
997  {
998  pgstat_send(&msg, offsetof(PgStat_MsgFuncstat, m_entry[0]) +
999  msg.m_nentries * sizeof(PgStat_FunctionEntry));
1000  msg.m_nentries = 0;
1001  }
1002 
1003  /* reset the entry's counts */
1004  MemSet(&entry->f_counts, 0, sizeof(PgStat_FunctionCounts));
1005  }
1006 
1007  if (msg.m_nentries > 0)
1008  pgstat_send(&msg, offsetof(PgStat_MsgFuncstat, m_entry[0]) +
1009  msg.m_nentries * sizeof(PgStat_FunctionEntry));
1010 
1011  have_function_stats = false;
1012 }
1013 
1014 
1015 /* ----------
1016  * pgstat_vacuum_stat() -
1017  *
1018  * Will tell the collector about objects he can get rid of.
1019  * ----------
1020  */
1021 void
1023 {
1024  HTAB *htab;
1025  PgStat_MsgTabpurge msg;
1026  PgStat_MsgFuncpurge f_msg;
1027  HASH_SEQ_STATUS hstat;
1028  PgStat_StatDBEntry *dbentry;
1029  PgStat_StatTabEntry *tabentry;
1030  PgStat_StatFuncEntry *funcentry;
1031  int len;
1032 
1034  return;
1035 
1036  /*
1037  * If not done for this transaction, read the statistics collector stats
1038  * file into some hash tables.
1039  */
1041 
1042  /*
1043  * Read pg_database and make a list of OIDs of all existing databases
1044  */
1046 
1047  /*
1048  * Search the database hash table for dead databases and tell the
1049  * collector to drop them.
1050  */
1051  hash_seq_init(&hstat, pgStatDBHash);
1052  while ((dbentry = (PgStat_StatDBEntry *) hash_seq_search(&hstat)) != NULL)
1053  {
1054  Oid dbid = dbentry->databaseid;
1055 
1057 
1058  /* the DB entry for shared tables (with InvalidOid) is never dropped */
1059  if (OidIsValid(dbid) &&
1060  hash_search(htab, (void *) &dbid, HASH_FIND, NULL) == NULL)
1061  pgstat_drop_database(dbid);
1062  }
1063 
1064  /* Clean up */
1065  hash_destroy(htab);
1066 
1067  /*
1068  * Lookup our own database entry; if not found, nothing more to do.
1069  */
1070  dbentry = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
1071  (void *) &MyDatabaseId,
1072  HASH_FIND, NULL);
1073  if (dbentry == NULL || dbentry->tables == NULL)
1074  return;
1075 
1076  /*
1077  * Similarly to above, make a list of all known relations in this DB.
1078  */
1080 
1081  /*
1082  * Initialize our messages table counter to zero
1083  */
1084  msg.m_nentries = 0;
1085 
1086  /*
1087  * Check for all tables listed in stats hashtable if they still exist.
1088  */
1089  hash_seq_init(&hstat, dbentry->tables);
1090  while ((tabentry = (PgStat_StatTabEntry *) hash_seq_search(&hstat)) != NULL)
1091  {
1092  Oid tabid = tabentry->tableid;
1093 
1095 
1096  if (hash_search(htab, (void *) &tabid, HASH_FIND, NULL) != NULL)
1097  continue;
1098 
1099  /*
1100  * Not there, so add this table's Oid to the message
1101  */
1102  msg.m_tableid[msg.m_nentries++] = tabid;
1103 
1104  /*
1105  * If the message is full, send it out and reinitialize to empty
1106  */
1107  if (msg.m_nentries >= PGSTAT_NUM_TABPURGE)
1108  {
1109  len = offsetof(PgStat_MsgTabpurge, m_tableid[0])
1110  + msg.m_nentries * sizeof(Oid);
1111 
1113  msg.m_databaseid = MyDatabaseId;
1114  pgstat_send(&msg, len);
1115 
1116  msg.m_nentries = 0;
1117  }
1118  }
1119 
1120  /*
1121  * Send the rest
1122  */
1123  if (msg.m_nentries > 0)
1124  {
1125  len = offsetof(PgStat_MsgTabpurge, m_tableid[0])
1126  + msg.m_nentries * sizeof(Oid);
1127 
1129  msg.m_databaseid = MyDatabaseId;
1130  pgstat_send(&msg, len);
1131  }
1132 
1133  /* Clean up */
1134  hash_destroy(htab);
1135 
1136  /*
1137  * Now repeat the above steps for functions. However, we needn't bother
1138  * in the common case where no function stats are being collected.
1139  */
1140  if (dbentry->functions != NULL &&
1141  hash_get_num_entries(dbentry->functions) > 0)
1142  {
1144 
1146  f_msg.m_databaseid = MyDatabaseId;
1147  f_msg.m_nentries = 0;
1148 
1149  hash_seq_init(&hstat, dbentry->functions);
1150  while ((funcentry = (PgStat_StatFuncEntry *) hash_seq_search(&hstat)) != NULL)
1151  {
1152  Oid funcid = funcentry->functionid;
1153 
1155 
1156  if (hash_search(htab, (void *) &funcid, HASH_FIND, NULL) != NULL)
1157  continue;
1158 
1159  /*
1160  * Not there, so add this function's Oid to the message
1161  */
1162  f_msg.m_functionid[f_msg.m_nentries++] = funcid;
1163 
1164  /*
1165  * If the message is full, send it out and reinitialize to empty
1166  */
1167  if (f_msg.m_nentries >= PGSTAT_NUM_FUNCPURGE)
1168  {
1169  len = offsetof(PgStat_MsgFuncpurge, m_functionid[0])
1170  + f_msg.m_nentries * sizeof(Oid);
1171 
1172  pgstat_send(&f_msg, len);
1173 
1174  f_msg.m_nentries = 0;
1175  }
1176  }
1177 
1178  /*
1179  * Send the rest
1180  */
1181  if (f_msg.m_nentries > 0)
1182  {
1183  len = offsetof(PgStat_MsgFuncpurge, m_functionid[0])
1184  + f_msg.m_nentries * sizeof(Oid);
1185 
1186  pgstat_send(&f_msg, len);
1187  }
1188 
1189  hash_destroy(htab);
1190  }
1191 }
1192 
1193 
1194 /* ----------
1195  * pgstat_collect_oids() -
1196  *
1197  * Collect the OIDs of all objects listed in the specified system catalog
1198  * into a temporary hash table. Caller should hash_destroy the result
1199  * when done with it. (However, we make the table in CurrentMemoryContext
1200  * so that it will be freed properly in event of an error.)
1201  * ----------
1202  */
1203 static HTAB *
1205 {
1206  HTAB *htab;
1207  HASHCTL hash_ctl;
1208  Relation rel;
1209  HeapScanDesc scan;
1210  HeapTuple tup;
1211  Snapshot snapshot;
1212 
1213  memset(&hash_ctl, 0, sizeof(hash_ctl));
1214  hash_ctl.keysize = sizeof(Oid);
1215  hash_ctl.entrysize = sizeof(Oid);
1216  hash_ctl.hcxt = CurrentMemoryContext;
1217  htab = hash_create("Temporary table of OIDs",
1219  &hash_ctl,
1221 
1222  rel = heap_open(catalogid, AccessShareLock);
1223  snapshot = RegisterSnapshot(GetLatestSnapshot());
1224  scan = heap_beginscan(rel, snapshot, 0, NULL);
1225  while ((tup = heap_getnext(scan, ForwardScanDirection)) != NULL)
1226  {
1227  Oid thisoid = HeapTupleGetOid(tup);
1228 
1230 
1231  (void) hash_search(htab, (void *) &thisoid, HASH_ENTER, NULL);
1232  }
1233  heap_endscan(scan);
1234  UnregisterSnapshot(snapshot);
1236 
1237  return htab;
1238 }
1239 
1240 
1241 /* ----------
1242  * pgstat_drop_database() -
1243  *
1244  * Tell the collector that we just dropped a database.
1245  * (If the message gets lost, we will still clean the dead DB eventually
1246  * via future invocations of pgstat_vacuum_stat().)
1247  * ----------
1248  */
1249 void
1251 {
1252  PgStat_MsgDropdb msg;
1253 
1255  return;
1256 
1258  msg.m_databaseid = databaseid;
1259  pgstat_send(&msg, sizeof(msg));
1260 }
1261 
1262 
1263 /* ----------
1264  * pgstat_drop_relation() -
1265  *
1266  * Tell the collector that we just dropped a relation.
1267  * (If the message gets lost, we will still clean the dead entry eventually
1268  * via future invocations of pgstat_vacuum_stat().)
1269  *
1270  * Currently not used for lack of any good place to call it; we rely
1271  * entirely on pgstat_vacuum_stat() to clean out stats for dead rels.
1272  * ----------
1273  */
1274 #ifdef NOT_USED
1275 void
1276 pgstat_drop_relation(Oid relid)
1277 {
1278  PgStat_MsgTabpurge msg;
1279  int len;
1280 
1282  return;
1283 
1284  msg.m_tableid[0] = relid;
1285  msg.m_nentries = 1;
1286 
1287  len = offsetof(PgStat_MsgTabpurge, m_tableid[0]) + sizeof(Oid);
1288 
1290  msg.m_databaseid = MyDatabaseId;
1291  pgstat_send(&msg, len);
1292 }
1293 #endif /* NOT_USED */
1294 
1295 
1296 /* ----------
1297  * pgstat_reset_counters() -
1298  *
1299  * Tell the statistics collector to reset counters for our database.
1300  *
1301  * Permission checking for this function is managed through the normal
1302  * GRANT system.
1303  * ----------
1304  */
1305 void
1307 {
1309 
1311  return;
1312 
1314  msg.m_databaseid = MyDatabaseId;
1315  pgstat_send(&msg, sizeof(msg));
1316 }
1317 
1318 /* ----------
1319  * pgstat_reset_shared_counters() -
1320  *
1321  * Tell the statistics collector to reset cluster-wide shared counters.
1322  *
1323  * Permission checking for this function is managed through the normal
1324  * GRANT system.
1325  * ----------
1326  */
1327 void
1328 pgstat_reset_shared_counters(const char *target)
1329 {
1331 
1333  return;
1334 
1335  if (strcmp(target, "archiver") == 0)
1337  else if (strcmp(target, "bgwriter") == 0)
1339  else
1340  ereport(ERROR,
1341  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
1342  errmsg("unrecognized reset target: \"%s\"", target),
1343  errhint("Target must be \"archiver\" or \"bgwriter\".")));
1344 
1346  pgstat_send(&msg, sizeof(msg));
1347 }
1348 
1349 /* ----------
1350  * pgstat_reset_single_counter() -
1351  *
1352  * Tell the statistics collector to reset a single counter.
1353  *
1354  * Permission checking for this function is managed through the normal
1355  * GRANT system.
1356  * ----------
1357  */
1358 void
1360 {
1362 
1364  return;
1365 
1367  msg.m_databaseid = MyDatabaseId;
1368  msg.m_resettype = type;
1369  msg.m_objectid = objoid;
1370 
1371  pgstat_send(&msg, sizeof(msg));
1372 }
1373 
1374 /* ----------
1375  * pgstat_report_autovac() -
1376  *
1377  * Called from autovacuum.c to report startup of an autovacuum process.
1378  * We are called before InitPostgres is done, so can't rely on MyDatabaseId;
1379  * the db OID must be passed in, instead.
1380  * ----------
1381  */
1382 void
1384 {
1386 
1388  return;
1389 
1391  msg.m_databaseid = dboid;
1393 
1394  pgstat_send(&msg, sizeof(msg));
1395 }
1396 
1397 
1398 /* ---------
1399  * pgstat_report_vacuum() -
1400  *
1401  * Tell the collector about the table we just vacuumed.
1402  * ---------
1403  */
1404 void
1405 pgstat_report_vacuum(Oid tableoid, bool shared,
1406  PgStat_Counter livetuples, PgStat_Counter deadtuples)
1407 {
1408  PgStat_MsgVacuum msg;
1409 
1411  return;
1412 
1414  msg.m_databaseid = shared ? InvalidOid : MyDatabaseId;
1415  msg.m_tableoid = tableoid;
1418  msg.m_live_tuples = livetuples;
1419  msg.m_dead_tuples = deadtuples;
1420  pgstat_send(&msg, sizeof(msg));
1421 }
1422 
1423 /* --------
1424  * pgstat_report_analyze() -
1425  *
1426  * Tell the collector about the table we just analyzed.
1427  *
1428  * Caller must provide new live- and dead-tuples estimates, as well as a
1429  * flag indicating whether to reset the changes_since_analyze counter.
1430  * --------
1431  */
1432 void
1434  PgStat_Counter livetuples, PgStat_Counter deadtuples,
1435  bool resetcounter)
1436 {
1437  PgStat_MsgAnalyze msg;
1438 
1440  return;
1441 
1442  /*
1443  * Unlike VACUUM, ANALYZE might be running inside a transaction that has
1444  * already inserted and/or deleted rows in the target table. ANALYZE will
1445  * have counted such rows as live or dead respectively. Because we will
1446  * report our counts of such rows at transaction end, we should subtract
1447  * off these counts from what we send to the collector now, else they'll
1448  * be double-counted after commit. (This approach also ensures that the
1449  * collector ends up with the right numbers if we abort instead of
1450  * committing.)
1451  */
1452  if (rel->pgstat_info != NULL)
1453  {
1455 
1456  for (trans = rel->pgstat_info->trans; trans; trans = trans->upper)
1457  {
1458  livetuples -= trans->tuples_inserted - trans->tuples_deleted;
1459  deadtuples -= trans->tuples_updated + trans->tuples_deleted;
1460  }
1461  /* count stuff inserted by already-aborted subxacts, too */
1462  deadtuples -= rel->pgstat_info->t_counts.t_delta_dead_tuples;
1463  /* Since ANALYZE's counts are estimates, we could have underflowed */
1464  livetuples = Max(livetuples, 0);
1465  deadtuples = Max(deadtuples, 0);
1466  }
1467 
1469  msg.m_databaseid = rel->rd_rel->relisshared ? InvalidOid : MyDatabaseId;
1470  msg.m_tableoid = RelationGetRelid(rel);
1472  msg.m_resetcounter = resetcounter;
1474  msg.m_live_tuples = livetuples;
1475  msg.m_dead_tuples = deadtuples;
1476  pgstat_send(&msg, sizeof(msg));
1477 }
1478 
1479 /* --------
1480  * pgstat_report_recovery_conflict() -
1481  *
1482  * Tell the collector about a Hot Standby recovery conflict.
1483  * --------
1484  */
1485 void
1487 {
1489 
1491  return;
1492 
1494  msg.m_databaseid = MyDatabaseId;
1495  msg.m_reason = reason;
1496  pgstat_send(&msg, sizeof(msg));
1497 }
1498 
1499 /* --------
1500  * pgstat_report_deadlock() -
1501  *
1502  * Tell the collector about a deadlock detected.
1503  * --------
1504  */
1505 void
1507 {
1508  PgStat_MsgDeadlock msg;
1509 
1511  return;
1512 
1514  msg.m_databaseid = MyDatabaseId;
1515  pgstat_send(&msg, sizeof(msg));
1516 }
1517 
1518 /* --------
1519  * pgstat_report_tempfile() -
1520  *
1521  * Tell the collector about a temporary file.
1522  * --------
1523  */
1524 void
1525 pgstat_report_tempfile(size_t filesize)
1526 {
1527  PgStat_MsgTempFile msg;
1528 
1530  return;
1531 
1533  msg.m_databaseid = MyDatabaseId;
1534  msg.m_filesize = filesize;
1535  pgstat_send(&msg, sizeof(msg));
1536 }
1537 
1538 
1539 /* ----------
1540  * pgstat_ping() -
1541  *
1542  * Send some junk data to the collector to increase traffic.
1543  * ----------
1544  */
1545 void
1547 {
1548  PgStat_MsgDummy msg;
1549 
1551  return;
1552 
1554  pgstat_send(&msg, sizeof(msg));
1555 }
1556 
1557 /* ----------
1558  * pgstat_send_inquiry() -
1559  *
1560  * Notify collector that we need fresh data.
1561  * ----------
1562  */
1563 static void
1564 pgstat_send_inquiry(TimestampTz clock_time, TimestampTz cutoff_time, Oid databaseid)
1565 {
1566  PgStat_MsgInquiry msg;
1567 
1569  msg.clock_time = clock_time;
1570  msg.cutoff_time = cutoff_time;
1571  msg.databaseid = databaseid;
1572  pgstat_send(&msg, sizeof(msg));
1573 }
1574 
1575 
1576 /*
1577  * Initialize function call usage data.
1578  * Called by the executor before invoking a function.
1579  */
1580 void
1583 {
1584  PgStat_BackendFunctionEntry *htabent;
1585  bool found;
1586 
1587  if (pgstat_track_functions <= fcinfo->flinfo->fn_stats)
1588  {
1589  /* stats not wanted */
1590  fcu->fs = NULL;
1591  return;
1592  }
1593 
1594  if (!pgStatFunctions)
1595  {
1596  /* First time through - initialize function stat table */
1597  HASHCTL hash_ctl;
1598 
1599  memset(&hash_ctl, 0, sizeof(hash_ctl));
1600  hash_ctl.keysize = sizeof(Oid);
1601  hash_ctl.entrysize = sizeof(PgStat_BackendFunctionEntry);
1602  pgStatFunctions = hash_create("Function stat entries",
1604  &hash_ctl,
1605  HASH_ELEM | HASH_BLOBS);
1606  }
1607 
1608  /* Get the stats entry for this function, create if necessary */
1609  htabent = hash_search(pgStatFunctions, &fcinfo->flinfo->fn_oid,
1610  HASH_ENTER, &found);
1611  if (!found)
1612  MemSet(&htabent->f_counts, 0, sizeof(PgStat_FunctionCounts));
1613 
1614  fcu->fs = &htabent->f_counts;
1615 
1616  /* save stats for this function, later used to compensate for recursion */
1617  fcu->save_f_total_time = htabent->f_counts.f_total_time;
1618 
1619  /* save current backend-wide total time */
1620  fcu->save_total = total_func_time;
1621 
1622  /* get clock time as of function start */
1624 }
1625 
1626 /*
1627  * find_funcstat_entry - find any existing PgStat_BackendFunctionEntry entry
1628  * for specified function
1629  *
1630  * If no entry, return NULL, don't create a new one
1631  */
1634 {
1635  if (pgStatFunctions == NULL)
1636  return NULL;
1637 
1638  return (PgStat_BackendFunctionEntry *) hash_search(pgStatFunctions,
1639  (void *) &func_id,
1640  HASH_FIND, NULL);
1641 }
1642 
1643 /*
1644  * Calculate function call usage and update stat counters.
1645  * Called by the executor after invoking a function.
1646  *
1647  * In the case of a set-returning function that runs in value-per-call mode,
1648  * we will see multiple pgstat_init_function_usage/pgstat_end_function_usage
1649  * calls for what the user considers a single call of the function. The
1650  * finalize flag should be TRUE on the last call.
1651  */
1652 void
1654 {
1655  PgStat_FunctionCounts *fs = fcu->fs;
1656  instr_time f_total;
1657  instr_time f_others;
1658  instr_time f_self;
1659 
1660  /* stats not wanted? */
1661  if (fs == NULL)
1662  return;
1663 
1664  /* total elapsed time in this function call */
1665  INSTR_TIME_SET_CURRENT(f_total);
1666  INSTR_TIME_SUBTRACT(f_total, fcu->f_start);
1667 
1668  /* self usage: elapsed minus anything already charged to other calls */
1669  f_others = total_func_time;
1670  INSTR_TIME_SUBTRACT(f_others, fcu->save_total);
1671  f_self = f_total;
1672  INSTR_TIME_SUBTRACT(f_self, f_others);
1673 
1674  /* update backend-wide total time */
1676 
1677  /*
1678  * Compute the new f_total_time as the total elapsed time added to the
1679  * pre-call value of f_total_time. This is necessary to avoid
1680  * double-counting any time taken by recursive calls of myself. (We do
1681  * not need any similar kluge for self time, since that already excludes
1682  * any recursive calls.)
1683  */
1684  INSTR_TIME_ADD(f_total, fcu->save_f_total_time);
1685 
1686  /* update counters in function stats table */
1687  if (finalize)
1688  fs->f_numcalls++;
1689  fs->f_total_time = f_total;
1690  INSTR_TIME_ADD(fs->f_self_time, f_self);
1691 
1692  /* indicate that we have something to send */
1693  have_function_stats = true;
1694 }
1695 
1696 
1697 /* ----------
1698  * pgstat_initstats() -
1699  *
1700  * Initialize a relcache entry to count access statistics.
1701  * Called whenever a relation is opened.
1702  *
1703  * We assume that a relcache entry's pgstat_info field is zeroed by
1704  * relcache.c when the relcache entry is made; thereafter it is long-lived
1705  * data. We can avoid repeated searches of the TabStatus arrays when the
1706  * same relation is touched repeatedly within a transaction.
1707  * ----------
1708  */
1709 void
1711 {
1712  Oid rel_id = rel->rd_id;
1713  char relkind = rel->rd_rel->relkind;
1714 
1715  /* We only count stats for things that have storage */
1716  if (!(relkind == RELKIND_RELATION ||
1717  relkind == RELKIND_MATVIEW ||
1718  relkind == RELKIND_INDEX ||
1719  relkind == RELKIND_TOASTVALUE ||
1720  relkind == RELKIND_SEQUENCE))
1721  {
1722  rel->pgstat_info = NULL;
1723  return;
1724  }
1725 
1727  {
1728  /* We're not counting at all */
1729  rel->pgstat_info = NULL;
1730  return;
1731  }
1732 
1733  /*
1734  * If we already set up this relation in the current transaction, nothing
1735  * to do.
1736  */
1737  if (rel->pgstat_info != NULL &&
1738  rel->pgstat_info->t_id == rel_id)
1739  return;
1740 
1741  /* Else find or make the PgStat_TableStatus entry, and update link */
1742  rel->pgstat_info = get_tabstat_entry(rel_id, rel->rd_rel->relisshared);
1743 }
1744 
1745 /*
1746  * get_tabstat_entry - find or create a PgStat_TableStatus entry for rel
1747  */
1748 static PgStat_TableStatus *
1749 get_tabstat_entry(Oid rel_id, bool isshared)
1750 {
1751  TabStatHashEntry *hash_entry;
1752  PgStat_TableStatus *entry;
1753  TabStatusArray *tsa;
1754  bool found;
1755 
1756  /*
1757  * Create hash table if we don't have it already.
1758  */
1759  if (pgStatTabHash == NULL)
1760  {
1761  HASHCTL ctl;
1762 
1763  memset(&ctl, 0, sizeof(ctl));
1764  ctl.keysize = sizeof(Oid);
1765  ctl.entrysize = sizeof(TabStatHashEntry);
1766 
1767  pgStatTabHash = hash_create("pgstat TabStatusArray lookup hash table",
1769  &ctl,
1770  HASH_ELEM | HASH_BLOBS);
1771  }
1772 
1773  /*
1774  * Find an entry or create a new one.
1775  */
1776  hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_ENTER, &found);
1777  if (!found)
1778  {
1779  /* initialize new entry with null pointer */
1780  hash_entry->tsa_entry = NULL;
1781  }
1782 
1783  /*
1784  * If entry is already valid, we're done.
1785  */
1786  if (hash_entry->tsa_entry)
1787  return hash_entry->tsa_entry;
1788 
1789  /*
1790  * Locate the first pgStatTabList entry with free space, making a new list
1791  * entry if needed. Note that we could get an OOM failure here, but if so
1792  * we have left the hashtable and the list in a consistent state.
1793  */
1794  if (pgStatTabList == NULL)
1795  {
1796  /* Set up first pgStatTabList entry */
1797  pgStatTabList = (TabStatusArray *)
1799  sizeof(TabStatusArray));
1800  }
1801 
1802  tsa = pgStatTabList;
1803  while (tsa->tsa_used >= TABSTAT_QUANTUM)
1804  {
1805  if (tsa->tsa_next == NULL)
1806  tsa->tsa_next = (TabStatusArray *)
1808  sizeof(TabStatusArray));
1809  tsa = tsa->tsa_next;
1810  }
1811 
1812  /*
1813  * Allocate a PgStat_TableStatus entry within this list entry. We assume
1814  * the entry was already zeroed, either at creation or after last use.
1815  */
1816  entry = &tsa->tsa_entries[tsa->tsa_used++];
1817  entry->t_id = rel_id;
1818  entry->t_shared = isshared;
1819 
1820  /*
1821  * Now we can fill the entry in pgStatTabHash.
1822  */
1823  hash_entry->tsa_entry = entry;
1824 
1825  return entry;
1826 }
1827 
1828 /*
1829  * find_tabstat_entry - find any existing PgStat_TableStatus entry for rel
1830  *
1831  * If no entry, return NULL, don't create a new one
1832  *
1833  * Note: if we got an error in the most recent execution of pgstat_report_stat,
1834  * it's possible that an entry exists but there's no hashtable entry for it.
1835  * That's okay, we'll treat this case as "doesn't exist".
1836  */
1839 {
1840  TabStatHashEntry *hash_entry;
1841 
1842  /* If hashtable doesn't exist, there are no entries at all */
1843  if (!pgStatTabHash)
1844  return NULL;
1845 
1846  hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_FIND, NULL);
1847  if (!hash_entry)
1848  return NULL;
1849 
1850  /* Note that this step could also return NULL, but that's correct */
1851  return hash_entry->tsa_entry;
1852 }
1853 
1854 /*
1855  * get_tabstat_stack_level - add a new (sub)transaction stack entry if needed
1856  */
1857 static PgStat_SubXactStatus *
1859 {
1860  PgStat_SubXactStatus *xact_state;
1861 
1862  xact_state = pgStatXactStack;
1863  if (xact_state == NULL || xact_state->nest_level != nest_level)
1864  {
1865  xact_state = (PgStat_SubXactStatus *)
1867  sizeof(PgStat_SubXactStatus));
1868  xact_state->nest_level = nest_level;
1869  xact_state->prev = pgStatXactStack;
1870  xact_state->first = NULL;
1871  pgStatXactStack = xact_state;
1872  }
1873  return xact_state;
1874 }
1875 
1876 /*
1877  * add_tabstat_xact_level - add a new (sub)transaction state record
1878  */
1879 static void
1880 add_tabstat_xact_level(PgStat_TableStatus *pgstat_info, int nest_level)
1881 {
1882  PgStat_SubXactStatus *xact_state;
1884 
1885  /*
1886  * If this is the first rel to be modified at the current nest level, we
1887  * first have to push a transaction stack entry.
1888  */
1889  xact_state = get_tabstat_stack_level(nest_level);
1890 
1891  /* Now make a per-table stack entry */
1892  trans = (PgStat_TableXactStatus *)
1894  sizeof(PgStat_TableXactStatus));
1895  trans->nest_level = nest_level;
1896  trans->upper = pgstat_info->trans;
1897  trans->parent = pgstat_info;
1898  trans->next = xact_state->first;
1899  xact_state->first = trans;
1900  pgstat_info->trans = trans;
1901 }
1902 
1903 /*
1904  * pgstat_count_heap_insert - count a tuple insertion of n tuples
1905  */
1906 void
1908 {
1909  PgStat_TableStatus *pgstat_info = rel->pgstat_info;
1910 
1911  if (pgstat_info != NULL)
1912  {
1913  /* We have to log the effect at the proper transactional level */
1914  int nest_level = GetCurrentTransactionNestLevel();
1915 
1916  if (pgstat_info->trans == NULL ||
1917  pgstat_info->trans->nest_level != nest_level)
1918  add_tabstat_xact_level(pgstat_info, nest_level);
1919 
1920  pgstat_info->trans->tuples_inserted += n;
1921  }
1922 }
1923 
1924 /*
1925  * pgstat_count_heap_update - count a tuple update
1926  */
1927 void
1929 {
1930  PgStat_TableStatus *pgstat_info = rel->pgstat_info;
1931 
1932  if (pgstat_info != NULL)
1933  {
1934  /* We have to log the effect at the proper transactional level */
1935  int nest_level = GetCurrentTransactionNestLevel();
1936 
1937  if (pgstat_info->trans == NULL ||
1938  pgstat_info->trans->nest_level != nest_level)
1939  add_tabstat_xact_level(pgstat_info, nest_level);
1940 
1941  pgstat_info->trans->tuples_updated++;
1942 
1943  /* t_tuples_hot_updated is nontransactional, so just advance it */
1944  if (hot)
1945  pgstat_info->t_counts.t_tuples_hot_updated++;
1946  }
1947 }
1948 
1949 /*
1950  * pgstat_count_heap_delete - count a tuple deletion
1951  */
1952 void
1954 {
1955  PgStat_TableStatus *pgstat_info = rel->pgstat_info;
1956 
1957  if (pgstat_info != NULL)
1958  {
1959  /* We have to log the effect at the proper transactional level */
1960  int nest_level = GetCurrentTransactionNestLevel();
1961 
1962  if (pgstat_info->trans == NULL ||
1963  pgstat_info->trans->nest_level != nest_level)
1964  add_tabstat_xact_level(pgstat_info, nest_level);
1965 
1966  pgstat_info->trans->tuples_deleted++;
1967  }
1968 }
1969 
1970 /*
1971  * pgstat_truncate_save_counters
1972  *
1973  * Whenever a table is truncated, we save its i/u/d counters so that they can
1974  * be cleared, and if the (sub)xact that executed the truncate later aborts,
1975  * the counters can be restored to the saved (pre-truncate) values. Note we do
1976  * this on the first truncate in any particular subxact level only.
1977  */
1978 static void
1980 {
1981  if (!trans->truncated)
1982  {
1983  trans->inserted_pre_trunc = trans->tuples_inserted;
1984  trans->updated_pre_trunc = trans->tuples_updated;
1985  trans->deleted_pre_trunc = trans->tuples_deleted;
1986  trans->truncated = true;
1987  }
1988 }
1989 
1990 /*
1991  * pgstat_truncate_restore_counters - restore counters when a truncate aborts
1992  */
1993 static void
1995 {
1996  if (trans->truncated)
1997  {
1998  trans->tuples_inserted = trans->inserted_pre_trunc;
1999  trans->tuples_updated = trans->updated_pre_trunc;
2000  trans->tuples_deleted = trans->deleted_pre_trunc;
2001  }
2002 }
2003 
2004 /*
2005  * pgstat_count_truncate - update tuple counters due to truncate
2006  */
2007 void
2009 {
2010  PgStat_TableStatus *pgstat_info = rel->pgstat_info;
2011 
2012  if (pgstat_info != NULL)
2013  {
2014  /* We have to log the effect at the proper transactional level */
2015  int nest_level = GetCurrentTransactionNestLevel();
2016 
2017  if (pgstat_info->trans == NULL ||
2018  pgstat_info->trans->nest_level != nest_level)
2019  add_tabstat_xact_level(pgstat_info, nest_level);
2020 
2021  pgstat_truncate_save_counters(pgstat_info->trans);
2022  pgstat_info->trans->tuples_inserted = 0;
2023  pgstat_info->trans->tuples_updated = 0;
2024  pgstat_info->trans->tuples_deleted = 0;
2025  }
2026 }
2027 
2028 /*
2029  * pgstat_update_heap_dead_tuples - update dead-tuples count
2030  *
2031  * The semantics of this are that we are reporting the nontransactional
2032  * recovery of "delta" dead tuples; so t_delta_dead_tuples decreases
2033  * rather than increasing, and the change goes straight into the per-table
2034  * counter, not into transactional state.
2035  */
2036 void
2038 {
2039  PgStat_TableStatus *pgstat_info = rel->pgstat_info;
2040 
2041  if (pgstat_info != NULL)
2042  pgstat_info->t_counts.t_delta_dead_tuples -= delta;
2043 }
2044 
2045 
2046 /* ----------
2047  * AtEOXact_PgStat
2048  *
2049  * Called from access/transam/xact.c at top-level transaction commit/abort.
2050  * ----------
2051  */
2052 void
2053 AtEOXact_PgStat(bool isCommit)
2054 {
2055  PgStat_SubXactStatus *xact_state;
2056 
2057  /*
2058  * Count transaction commit or abort. (We use counters, not just bools,
2059  * in case the reporting message isn't sent right away.)
2060  */
2061  if (isCommit)
2062  pgStatXactCommit++;
2063  else
2065 
2066  /*
2067  * Transfer transactional insert/update counts into the base tabstat
2068  * entries. We don't bother to free any of the transactional state, since
2069  * it's all in TopTransactionContext and will go away anyway.
2070  */
2071  xact_state = pgStatXactStack;
2072  if (xact_state != NULL)
2073  {
2075 
2076  Assert(xact_state->nest_level == 1);
2077  Assert(xact_state->prev == NULL);
2078  for (trans = xact_state->first; trans != NULL; trans = trans->next)
2079  {
2080  PgStat_TableStatus *tabstat;
2081 
2082  Assert(trans->nest_level == 1);
2083  Assert(trans->upper == NULL);
2084  tabstat = trans->parent;
2085  Assert(tabstat->trans == trans);
2086  /* restore pre-truncate stats (if any) in case of aborted xact */
2087  if (!isCommit)
2089  /* count attempted actions regardless of commit/abort */
2090  tabstat->t_counts.t_tuples_inserted += trans->tuples_inserted;
2091  tabstat->t_counts.t_tuples_updated += trans->tuples_updated;
2092  tabstat->t_counts.t_tuples_deleted += trans->tuples_deleted;
2093  if (isCommit)
2094  {
2095  tabstat->t_counts.t_truncated = trans->truncated;
2096  if (trans->truncated)
2097  {
2098  /* forget live/dead stats seen by backend thus far */
2099  tabstat->t_counts.t_delta_live_tuples = 0;
2100  tabstat->t_counts.t_delta_dead_tuples = 0;
2101  }
2102  /* insert adds a live tuple, delete removes one */
2103  tabstat->t_counts.t_delta_live_tuples +=
2104  trans->tuples_inserted - trans->tuples_deleted;
2105  /* update and delete each create a dead tuple */
2106  tabstat->t_counts.t_delta_dead_tuples +=
2107  trans->tuples_updated + trans->tuples_deleted;
2108  /* insert, update, delete each count as one change event */
2109  tabstat->t_counts.t_changed_tuples +=
2110  trans->tuples_inserted + trans->tuples_updated +
2111  trans->tuples_deleted;
2112  }
2113  else
2114  {
2115  /* inserted tuples are dead, deleted tuples are unaffected */
2116  tabstat->t_counts.t_delta_dead_tuples +=
2117  trans->tuples_inserted + trans->tuples_updated;
2118  /* an aborted xact generates no changed_tuple events */
2119  }
2120  tabstat->trans = NULL;
2121  }
2122  }
2123  pgStatXactStack = NULL;
2124 
2125  /* Make sure any stats snapshot is thrown away */
2127 }
2128 
2129 /* ----------
2130  * AtEOSubXact_PgStat
2131  *
2132  * Called from access/transam/xact.c at subtransaction commit/abort.
2133  * ----------
2134  */
2135 void
2136 AtEOSubXact_PgStat(bool isCommit, int nestDepth)
2137 {
2138  PgStat_SubXactStatus *xact_state;
2139 
2140  /*
2141  * Transfer transactional insert/update counts into the next higher
2142  * subtransaction state.
2143  */
2144  xact_state = pgStatXactStack;
2145  if (xact_state != NULL &&
2146  xact_state->nest_level >= nestDepth)
2147  {
2149  PgStat_TableXactStatus *next_trans;
2150 
2151  /* delink xact_state from stack immediately to simplify reuse case */
2152  pgStatXactStack = xact_state->prev;
2153 
2154  for (trans = xact_state->first; trans != NULL; trans = next_trans)
2155  {
2156  PgStat_TableStatus *tabstat;
2157 
2158  next_trans = trans->next;
2159  Assert(trans->nest_level == nestDepth);
2160  tabstat = trans->parent;
2161  Assert(tabstat->trans == trans);
2162  if (isCommit)
2163  {
2164  if (trans->upper && trans->upper->nest_level == nestDepth - 1)
2165  {
2166  if (trans->truncated)
2167  {
2168  /* propagate the truncate status one level up */
2170  /* replace upper xact stats with ours */
2171  trans->upper->tuples_inserted = trans->tuples_inserted;
2172  trans->upper->tuples_updated = trans->tuples_updated;
2173  trans->upper->tuples_deleted = trans->tuples_deleted;
2174  }
2175  else
2176  {
2177  trans->upper->tuples_inserted += trans->tuples_inserted;
2178  trans->upper->tuples_updated += trans->tuples_updated;
2179  trans->upper->tuples_deleted += trans->tuples_deleted;
2180  }
2181  tabstat->trans = trans->upper;
2182  pfree(trans);
2183  }
2184  else
2185  {
2186  /*
2187  * When there isn't an immediate parent state, we can just
2188  * reuse the record instead of going through a
2189  * palloc/pfree pushup (this works since it's all in
2190  * TopTransactionContext anyway). We have to re-link it
2191  * into the parent level, though, and that might mean
2192  * pushing a new entry into the pgStatXactStack.
2193  */
2194  PgStat_SubXactStatus *upper_xact_state;
2195 
2196  upper_xact_state = get_tabstat_stack_level(nestDepth - 1);
2197  trans->next = upper_xact_state->first;
2198  upper_xact_state->first = trans;
2199  trans->nest_level = nestDepth - 1;
2200  }
2201  }
2202  else
2203  {
2204  /*
2205  * On abort, update top-level tabstat counts, then forget the
2206  * subtransaction
2207  */
2208 
2209  /* first restore values obliterated by truncate */
2211  /* count attempted actions regardless of commit/abort */
2212  tabstat->t_counts.t_tuples_inserted += trans->tuples_inserted;
2213  tabstat->t_counts.t_tuples_updated += trans->tuples_updated;
2214  tabstat->t_counts.t_tuples_deleted += trans->tuples_deleted;
2215  /* inserted tuples are dead, deleted tuples are unaffected */
2216  tabstat->t_counts.t_delta_dead_tuples +=
2217  trans->tuples_inserted + trans->tuples_updated;
2218  tabstat->trans = trans->upper;
2219  pfree(trans);
2220  }
2221  }
2222  pfree(xact_state);
2223  }
2224 }
2225 
2226 
2227 /*
2228  * AtPrepare_PgStat
2229  * Save the transactional stats state at 2PC transaction prepare.
2230  *
2231  * In this phase we just generate 2PC records for all the pending
2232  * transaction-dependent stats work.
2233  */
2234 void
2236 {
2237  PgStat_SubXactStatus *xact_state;
2238 
2239  xact_state = pgStatXactStack;
2240  if (xact_state != NULL)
2241  {
2243 
2244  Assert(xact_state->nest_level == 1);
2245  Assert(xact_state->prev == NULL);
2246  for (trans = xact_state->first; trans != NULL; trans = trans->next)
2247  {
2248  PgStat_TableStatus *tabstat;
2249  TwoPhasePgStatRecord record;
2250 
2251  Assert(trans->nest_level == 1);
2252  Assert(trans->upper == NULL);
2253  tabstat = trans->parent;
2254  Assert(tabstat->trans == trans);
2255 
2256  record.tuples_inserted = trans->tuples_inserted;
2257  record.tuples_updated = trans->tuples_updated;
2258  record.tuples_deleted = trans->tuples_deleted;
2259  record.inserted_pre_trunc = trans->inserted_pre_trunc;
2260  record.updated_pre_trunc = trans->updated_pre_trunc;
2261  record.deleted_pre_trunc = trans->deleted_pre_trunc;
2262  record.t_id = tabstat->t_id;
2263  record.t_shared = tabstat->t_shared;
2264  record.t_truncated = trans->truncated;
2265 
2267  &record, sizeof(TwoPhasePgStatRecord));
2268  }
2269  }
2270 }
2271 
2272 /*
2273  * PostPrepare_PgStat
2274  * Clean up after successful PREPARE.
2275  *
2276  * All we need do here is unlink the transaction stats state from the
2277  * nontransactional state. The nontransactional action counts will be
2278  * reported to the stats collector immediately, while the effects on live
2279  * and dead tuple counts are preserved in the 2PC state file.
2280  *
2281  * Note: AtEOXact_PgStat is not called during PREPARE.
2282  */
2283 void
2285 {
2286  PgStat_SubXactStatus *xact_state;
2287 
2288  /*
2289  * We don't bother to free any of the transactional state, since it's all
2290  * in TopTransactionContext and will go away anyway.
2291  */
2292  xact_state = pgStatXactStack;
2293  if (xact_state != NULL)
2294  {
2296 
2297  for (trans = xact_state->first; trans != NULL; trans = trans->next)
2298  {
2299  PgStat_TableStatus *tabstat;
2300 
2301  tabstat = trans->parent;
2302  tabstat->trans = NULL;
2303  }
2304  }
2305  pgStatXactStack = NULL;
2306 
2307  /* Make sure any stats snapshot is thrown away */
2309 }
2310 
2311 /*
2312  * 2PC processing routine for COMMIT PREPARED case.
2313  *
2314  * Load the saved counts into our local pgstats state.
2315  */
2316 void
2318  void *recdata, uint32 len)
2319 {
2320  TwoPhasePgStatRecord *rec = (TwoPhasePgStatRecord *) recdata;
2321  PgStat_TableStatus *pgstat_info;
2322 
2323  /* Find or create a tabstat entry for the rel */
2324  pgstat_info = get_tabstat_entry(rec->t_id, rec->t_shared);
2325 
2326  /* Same math as in AtEOXact_PgStat, commit case */
2327  pgstat_info->t_counts.t_tuples_inserted += rec->tuples_inserted;
2328  pgstat_info->t_counts.t_tuples_updated += rec->tuples_updated;
2329  pgstat_info->t_counts.t_tuples_deleted += rec->tuples_deleted;
2330  pgstat_info->t_counts.t_truncated = rec->t_truncated;
2331  if (rec->t_truncated)
2332  {
2333  /* forget live/dead stats seen by backend thus far */
2334  pgstat_info->t_counts.t_delta_live_tuples = 0;
2335  pgstat_info->t_counts.t_delta_dead_tuples = 0;
2336  }
2337  pgstat_info->t_counts.t_delta_live_tuples +=
2338  rec->tuples_inserted - rec->tuples_deleted;
2339  pgstat_info->t_counts.t_delta_dead_tuples +=
2340  rec->tuples_updated + rec->tuples_deleted;
2341  pgstat_info->t_counts.t_changed_tuples +=
2342  rec->tuples_inserted + rec->tuples_updated +
2343  rec->tuples_deleted;
2344 }
2345 
2346 /*
2347  * 2PC processing routine for ROLLBACK PREPARED case.
2348  *
2349  * Load the saved counts into our local pgstats state, but treat them
2350  * as aborted.
2351  */
2352 void
2354  void *recdata, uint32 len)
2355 {
2356  TwoPhasePgStatRecord *rec = (TwoPhasePgStatRecord *) recdata;
2357  PgStat_TableStatus *pgstat_info;
2358 
2359  /* Find or create a tabstat entry for the rel */
2360  pgstat_info = get_tabstat_entry(rec->t_id, rec->t_shared);
2361 
2362  /* Same math as in AtEOXact_PgStat, abort case */
2363  if (rec->t_truncated)
2364  {
2365  rec->tuples_inserted = rec->inserted_pre_trunc;
2366  rec->tuples_updated = rec->updated_pre_trunc;
2367  rec->tuples_deleted = rec->deleted_pre_trunc;
2368  }
2369  pgstat_info->t_counts.t_tuples_inserted += rec->tuples_inserted;
2370  pgstat_info->t_counts.t_tuples_updated += rec->tuples_updated;
2371  pgstat_info->t_counts.t_tuples_deleted += rec->tuples_deleted;
2372  pgstat_info->t_counts.t_delta_dead_tuples +=
2373  rec->tuples_inserted + rec->tuples_updated;
2374 }
2375 
2376 
2377 /* ----------
2378  * pgstat_fetch_stat_dbentry() -
2379  *
2380  * Support function for the SQL-callable pgstat* functions. Returns
2381  * the collected statistics for one database or NULL. NULL doesn't mean
2382  * that the database doesn't exist, it is just not yet known by the
2383  * collector, so the caller is better off to report ZERO instead.
2384  * ----------
2385  */
2388 {
2389  /*
2390  * If not done for this transaction, read the statistics collector stats
2391  * file into some hash tables.
2392  */
2394 
2395  /*
2396  * Lookup the requested database; return NULL if not found
2397  */
2398  return (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
2399  (void *) &dbid,
2400  HASH_FIND, NULL);
2401 }
2402 
2403 
2404 /* ----------
2405  * pgstat_fetch_stat_tabentry() -
2406  *
2407  * Support function for the SQL-callable pgstat* functions. Returns
2408  * the collected statistics for one table or NULL. NULL doesn't mean
2409  * that the table doesn't exist, it is just not yet known by the
2410  * collector, so the caller is better off to report ZERO instead.
2411  * ----------
2412  */
2415 {
2416  Oid dbid;
2417  PgStat_StatDBEntry *dbentry;
2418  PgStat_StatTabEntry *tabentry;
2419 
2420  /*
2421  * If not done for this transaction, read the statistics collector stats
2422  * file into some hash tables.
2423  */
2425 
2426  /*
2427  * Lookup our database, then look in its table hash table.
2428  */
2429  dbid = MyDatabaseId;
2430  dbentry = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
2431  (void *) &dbid,
2432  HASH_FIND, NULL);
2433  if (dbentry != NULL && dbentry->tables != NULL)
2434  {
2435  tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
2436  (void *) &relid,
2437  HASH_FIND, NULL);
2438  if (tabentry)
2439  return tabentry;
2440  }
2441 
2442  /*
2443  * If we didn't find it, maybe it's a shared table.
2444  */
2445  dbid = InvalidOid;
2446  dbentry = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
2447  (void *) &dbid,
2448  HASH_FIND, NULL);
2449  if (dbentry != NULL && dbentry->tables != NULL)
2450  {
2451  tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
2452  (void *) &relid,
2453  HASH_FIND, NULL);
2454  if (tabentry)
2455  return tabentry;
2456  }
2457 
2458  return NULL;
2459 }
2460 
2461 
2462 /* ----------
2463  * pgstat_fetch_stat_funcentry() -
2464  *
2465  * Support function for the SQL-callable pgstat* functions. Returns
2466  * the collected statistics for one function or NULL.
2467  * ----------
2468  */
2471 {
2472  PgStat_StatDBEntry *dbentry;
2473  PgStat_StatFuncEntry *funcentry = NULL;
2474 
2475  /* load the stats file if needed */
2477 
2478  /* Lookup our database, then find the requested function. */
2480  if (dbentry != NULL && dbentry->functions != NULL)
2481  {
2482  funcentry = (PgStat_StatFuncEntry *) hash_search(dbentry->functions,
2483  (void *) &func_id,
2484  HASH_FIND, NULL);
2485  }
2486 
2487  return funcentry;
2488 }
2489 
2490 
2491 /* ----------
2492  * pgstat_fetch_stat_beentry() -
2493  *
2494  * Support function for the SQL-callable pgstat* functions. Returns
2495  * our local copy of the current-activity entry for one backend.
2496  *
2497  * NB: caller is responsible for a check if the user is permitted to see
2498  * this info (especially the querystring).
2499  * ----------
2500  */
2503 {
2505 
2506  if (beid < 1 || beid > localNumBackends)
2507  return NULL;
2508 
2509  return &localBackendStatusTable[beid - 1].backendStatus;
2510 }
2511 
2512 
2513 /* ----------
2514  * pgstat_fetch_stat_local_beentry() -
2515  *
2516  * Like pgstat_fetch_stat_beentry() but with locally computed additions (like
2517  * xid and xmin values of the backend)
2518  *
2519  * NB: caller is responsible for a check if the user is permitted to see
2520  * this info (especially the querystring).
2521  * ----------
2522  */
2525 {
2527 
2528  if (beid < 1 || beid > localNumBackends)
2529  return NULL;
2530 
2531  return &localBackendStatusTable[beid - 1];
2532 }
2533 
2534 
2535 /* ----------
2536  * pgstat_fetch_stat_numbackends() -
2537  *
2538  * Support function for the SQL-callable pgstat* functions. Returns
2539  * the maximum current backend id.
2540  * ----------
2541  */
2542 int
2544 {
2546 
2547  return localNumBackends;
2548 }
2549 
2550 /*
2551  * ---------
2552  * pgstat_fetch_stat_archiver() -
2553  *
2554  * Support function for the SQL-callable pgstat* functions. Returns
2555  * a pointer to the archiver statistics struct.
2556  * ---------
2557  */
2560 {
2562 
2563  return &archiverStats;
2564 }
2565 
2566 
2567 /*
2568  * ---------
2569  * pgstat_fetch_global() -
2570  *
2571  * Support function for the SQL-callable pgstat* functions. Returns
2572  * a pointer to the global statistics struct.
2573  * ---------
2574  */
2577 {
2579 
2580  return &globalStats;
2581 }
2582 
2583 
2584 /* ------------------------------------------------------------
2585  * Functions for management of the shared-memory PgBackendStatus array
2586  * ------------------------------------------------------------
2587  */
2588 
2591 static char *BackendAppnameBuffer = NULL;
2592 static char *BackendClientHostnameBuffer = NULL;
2593 static char *BackendActivityBuffer = NULL;
2595 #ifdef USE_SSL
2596 static PgBackendSSLStatus *BackendSslStatusBuffer = NULL;
2597 #endif
2598 
2599 
2600 /*
2601  * Report shared-memory space needed by CreateSharedBackendStatus.
2602  */
2603 Size
2605 {
2606  Size size;
2607 
2608  /* BackendStatusArray: */
2609  size = mul_size(sizeof(PgBackendStatus), NumBackendStatSlots);
2610  /* BackendAppnameBuffer: */
2611  size = add_size(size,
2613  /* BackendClientHostnameBuffer: */
2614  size = add_size(size,
2616  /* BackendActivityBuffer: */
2617  size = add_size(size,
2619 #ifdef USE_SSL
2620  /* BackendSslStatusBuffer: */
2621  size = add_size(size,
2623 #endif
2624  return size;
2625 }
2626 
2627 /*
2628  * Initialize the shared status array and several string buffers
2629  * during postmaster startup.
2630  */
2631 void
2633 {
2634  Size size;
2635  bool found;
2636  int i;
2637  char *buffer;
2638 
2639  /* Create or attach to the shared array */
2640  size = mul_size(sizeof(PgBackendStatus), NumBackendStatSlots);
2641  BackendStatusArray = (PgBackendStatus *)
2642  ShmemInitStruct("Backend Status Array", size, &found);
2643 
2644  if (!found)
2645  {
2646  /*
2647  * We're the first - initialize.
2648  */
2649  MemSet(BackendStatusArray, 0, size);
2650  }
2651 
2652  /* Create or attach to the shared appname buffer */
2653  size = mul_size(NAMEDATALEN, MaxBackends);
2654  BackendAppnameBuffer = (char *)
2655  ShmemInitStruct("Backend Application Name Buffer", size, &found);
2656 
2657  if (!found)
2658  {
2659  MemSet(BackendAppnameBuffer, 0, size);
2660 
2661  /* Initialize st_appname pointers. */
2662  buffer = BackendAppnameBuffer;
2663  for (i = 0; i < NumBackendStatSlots; i++)
2664  {
2665  BackendStatusArray[i].st_appname = buffer;
2666  buffer += NAMEDATALEN;
2667  }
2668  }
2669 
2670  /* Create or attach to the shared client hostname buffer */
2671  size = mul_size(NAMEDATALEN, MaxBackends);
2672  BackendClientHostnameBuffer = (char *)
2673  ShmemInitStruct("Backend Client Host Name Buffer", size, &found);
2674 
2675  if (!found)
2676  {
2678 
2679  /* Initialize st_clienthostname pointers. */
2680  buffer = BackendClientHostnameBuffer;
2681  for (i = 0; i < NumBackendStatSlots; i++)
2682  {
2683  BackendStatusArray[i].st_clienthostname = buffer;
2684  buffer += NAMEDATALEN;
2685  }
2686  }
2687 
2688  /* Create or attach to the shared activity buffer */
2691  BackendActivityBuffer = (char *)
2692  ShmemInitStruct("Backend Activity Buffer",
2694  &found);
2695 
2696  if (!found)
2697  {
2698  MemSet(BackendActivityBuffer, 0, size);
2699 
2700  /* Initialize st_activity pointers. */
2701  buffer = BackendActivityBuffer;
2702  for (i = 0; i < NumBackendStatSlots; i++)
2703  {
2704  BackendStatusArray[i].st_activity_raw = buffer;
2706  }
2707  }
2708 
2709 #ifdef USE_SSL
2710  /* Create or attach to the shared SSL status buffer */
2712  BackendSslStatusBuffer = (PgBackendSSLStatus *)
2713  ShmemInitStruct("Backend SSL Status Buffer", size, &found);
2714 
2715  if (!found)
2716  {
2717  PgBackendSSLStatus *ptr;
2718 
2719  MemSet(BackendSslStatusBuffer, 0, size);
2720 
2721  /* Initialize st_sslstatus pointers. */
2722  ptr = BackendSslStatusBuffer;
2723  for (i = 0; i < NumBackendStatSlots; i++)
2724  {
2725  BackendStatusArray[i].st_sslstatus = ptr;
2726  ptr++;
2727  }
2728  }
2729 #endif
2730 }
2731 
2732 
2733 /* ----------
2734  * pgstat_initialize() -
2735  *
2736  * Initialize pgstats state, and set up our on-proc-exit hook.
2737  * Called from InitPostgres and AuxiliaryProcessMain. For auxiliary process,
2738  * MyBackendId is invalid. Otherwise, MyBackendId must be set,
2739  * but we must not have started any transaction yet (since the
2740  * exit hook must run after the last transaction exit).
2741  * NOTE: MyDatabaseId isn't set yet; so the shutdown hook has to be careful.
2742  * ----------
2743  */
2744 void
2746 {
2747  /* Initialize MyBEEntry */
2749  {
2751  MyBEEntry = &BackendStatusArray[MyBackendId - 1];
2752  }
2753  else
2754  {
2755  /* Must be an auxiliary process */
2757 
2758  /*
2759  * Assign the MyBEEntry for an auxiliary process. Since it doesn't
2760  * have a BackendId, the slot is statically allocated based on the
2761  * auxiliary process type (MyAuxProcType). Backends use slots indexed
2762  * in the range from 1 to MaxBackends (inclusive), so we use
2763  * MaxBackends + AuxBackendType + 1 as the index of the slot for an
2764  * auxiliary process.
2765  */
2766  MyBEEntry = &BackendStatusArray[MaxBackends + MyAuxProcType];
2767  }
2768 
2769  /* Set up a process-exit hook to clean up */
2771 }
2772 
2773 /* ----------
2774  * pgstat_bestart() -
2775  *
2776  * Initialize this backend's entry in the PgBackendStatus array.
2777  * Called from InitPostgres.
2778  *
2779  * Apart from auxiliary processes, MyBackendId, MyDatabaseId,
2780  * session userid, and application_name must be set for a
2781  * backend (hence, this cannot be combined with pgstat_initialize).
2782  * ----------
2783  */
2784 void
2786 {
2787  TimestampTz proc_start_timestamp;
2788  SockAddr clientaddr;
2789  volatile PgBackendStatus *beentry;
2790 
2791  /*
2792  * To minimize the time spent modifying the PgBackendStatus entry, fetch
2793  * all the needed data first.
2794  *
2795  * If we have a MyProcPort, use its session start time (for consistency,
2796  * and to save a kernel call).
2797  */
2798  if (MyProcPort)
2799  proc_start_timestamp = MyProcPort->SessionStartTime;
2800  else
2801  proc_start_timestamp = GetCurrentTimestamp();
2802 
2803  /*
2804  * We may not have a MyProcPort (eg, if this is the autovacuum process).
2805  * If so, use all-zeroes client address, which is dealt with specially in
2806  * pg_stat_get_backend_client_addr and pg_stat_get_backend_client_port.
2807  */
2808  if (MyProcPort)
2809  memcpy(&clientaddr, &MyProcPort->raddr, sizeof(clientaddr));
2810  else
2811  MemSet(&clientaddr, 0, sizeof(clientaddr));
2812 
2813  /*
2814  * Initialize my status entry, following the protocol of bumping
2815  * st_changecount before and after; and make sure it's even afterwards. We
2816  * use a volatile pointer here to ensure the compiler doesn't try to get
2817  * cute.
2818  */
2819  beentry = MyBEEntry;
2820 
2821  /* pgstats state must be initialized from pgstat_initialize() */
2822  Assert(beentry != NULL);
2823 
2825  {
2827  {
2828  /* Autovacuum Launcher */
2830  }
2831  else if (IsAutoVacuumWorkerProcess())
2832  {
2833  /* Autovacuum Worker */
2834  beentry->st_backendType = B_AUTOVAC_WORKER;
2835  }
2836  else if (am_walsender)
2837  {
2838  /* Wal sender */
2839  beentry->st_backendType = B_WAL_SENDER;
2840  }
2841  else if (IsBackgroundWorker)
2842  {
2843  /* bgworker */
2844  beentry->st_backendType = B_BG_WORKER;
2845  }
2846  else
2847  {
2848  /* client-backend */
2849  beentry->st_backendType = B_BACKEND;
2850  }
2851  }
2852  else
2853  {
2854  /* Must be an auxiliary process */
2856  switch (MyAuxProcType)
2857  {
2858  case StartupProcess:
2859  beentry->st_backendType = B_STARTUP;
2860  break;
2861  case BgWriterProcess:
2862  beentry->st_backendType = B_BG_WRITER;
2863  break;
2864  case CheckpointerProcess:
2865  beentry->st_backendType = B_CHECKPOINTER;
2866  break;
2867  case WalWriterProcess:
2868  beentry->st_backendType = B_WAL_WRITER;
2869  break;
2870  case WalReceiverProcess:
2871  beentry->st_backendType = B_WAL_RECEIVER;
2872  break;
2873  default:
2874  elog(FATAL, "unrecognized process type: %d",
2875  (int) MyAuxProcType);
2876  proc_exit(1);
2877  }
2878  }
2879 
2880  do
2881  {
2883  } while ((beentry->st_changecount & 1) == 0);
2884 
2885  beentry->st_procpid = MyProcPid;
2886  beentry->st_proc_start_timestamp = proc_start_timestamp;
2887  beentry->st_activity_start_timestamp = 0;
2888  beentry->st_state_start_timestamp = 0;
2889  beentry->st_xact_start_timestamp = 0;
2890  beentry->st_databaseid = MyDatabaseId;
2891 
2892  /* We have userid for client-backends, wal-sender and bgworker processes */
2893  if (beentry->st_backendType == B_BACKEND
2894  || beentry->st_backendType == B_WAL_SENDER
2895  || beentry->st_backendType == B_BG_WORKER)
2896  beentry->st_userid = GetSessionUserId();
2897  else
2898  beentry->st_userid = InvalidOid;
2899 
2900  beentry->st_clientaddr = clientaddr;
2903  NAMEDATALEN);
2904  else
2905  beentry->st_clienthostname[0] = '\0';
2906 #ifdef USE_SSL
2907  if (MyProcPort && MyProcPort->ssl != NULL)
2908  {
2909  beentry->st_ssl = true;
2915  }
2916  else
2917  {
2918  beentry->st_ssl = false;
2919  }
2920 #else
2921  beentry->st_ssl = false;
2922 #endif
2923  beentry->st_state = STATE_UNDEFINED;
2924  beentry->st_appname[0] = '\0';
2925  beentry->st_activity_raw[0] = '\0';
2926  /* Also make sure the last byte in each string area is always 0 */
2927  beentry->st_clienthostname[NAMEDATALEN - 1] = '\0';
2928  beentry->st_appname[NAMEDATALEN - 1] = '\0';
2932 
2933  /*
2934  * we don't zero st_progress_param here to save cycles; nobody should
2935  * examine it until st_progress_command has been set to something other
2936  * than PROGRESS_COMMAND_INVALID
2937  */
2938 
2940 
2941  /* Update app name to current GUC setting */
2942  if (application_name)
2944 }
2945 
2946 /*
2947  * Shut down a single backend's statistics reporting at process exit.
2948  *
2949  * Flush any remaining statistics counts out to the collector.
2950  * Without this, operations triggered during backend exit (such as
2951  * temp table deletions) won't be counted.
2952  *
2953  * Lastly, clear out our entry in the PgBackendStatus array.
2954  */
2955 static void
2957 {
2958  volatile PgBackendStatus *beentry = MyBEEntry;
2959 
2960  /*
2961  * If we got as far as discovering our own database ID, we can report what
2962  * we did to the collector. Otherwise, we'd be sending an invalid
2963  * database ID, so forget it. (This means that accesses to pg_database
2964  * during failed backend starts might never get counted.)
2965  */
2966  if (OidIsValid(MyDatabaseId))
2967  pgstat_report_stat(true);
2968 
2969  /*
2970  * Clear my status entry, following the protocol of bumping st_changecount
2971  * before and after. We use a volatile pointer here to ensure the
2972  * compiler doesn't try to get cute.
2973  */
2975 
2976  beentry->st_procpid = 0; /* mark invalid */
2977 
2979 }
2980 
2981 
2982 /* ----------
2983  * pgstat_report_activity() -
2984  *
2985  * Called from tcop/postgres.c to report what the backend is actually doing
2986  * (but note cmd_str can be NULL for certain cases).
2987  *
2988  * All updates of the status entry follow the protocol of bumping
2989  * st_changecount before and after. We use a volatile pointer here to
2990  * ensure the compiler doesn't try to get cute.
2991  * ----------
2992  */
2993 void
2995 {
2996  volatile PgBackendStatus *beentry = MyBEEntry;
2997  TimestampTz start_timestamp;
2998  TimestampTz current_timestamp;
2999  int len = 0;
3000 
3001  TRACE_POSTGRESQL_STATEMENT_STATUS(cmd_str);
3002 
3003  if (!beentry)
3004  return;
3005 
3007  {
3008  if (beentry->st_state != STATE_DISABLED)
3009  {
3010  volatile PGPROC *proc = MyProc;
3011 
3012  /*
3013  * track_activities is disabled, but we last reported a
3014  * non-disabled state. As our final update, change the state and
3015  * clear fields we will not be updating anymore.
3016  */
3018  beentry->st_state = STATE_DISABLED;
3019  beentry->st_state_start_timestamp = 0;
3020  beentry->st_activity_raw[0] = '\0';
3021  beentry->st_activity_start_timestamp = 0;
3022  /* st_xact_start_timestamp and wait_event_info are also disabled */
3023  beentry->st_xact_start_timestamp = 0;
3024  proc->wait_event_info = 0;
3026  }
3027  return;
3028  }
3029 
3030  /*
3031  * To minimize the time spent modifying the entry, fetch all the needed
3032  * data first.
3033  */
3034  start_timestamp = GetCurrentStatementStartTimestamp();
3035  if (cmd_str != NULL)
3036  {
3037  /*
3038  * Compute length of to-be-stored string unaware of multi-byte
3039  * characters. For speed reasons that'll get corrected on read, rather
3040  * than computed every write.
3041  */
3042  len = Min(strlen(cmd_str), pgstat_track_activity_query_size - 1);
3043  }
3044  current_timestamp = GetCurrentTimestamp();
3045 
3046  /*
3047  * Now update the status entry
3048  */
3050 
3051  beentry->st_state = state;
3052  beentry->st_state_start_timestamp = current_timestamp;
3053 
3054  if (cmd_str != NULL)
3055  {
3056  memcpy((char *) beentry->st_activity_raw, cmd_str, len);
3057  beentry->st_activity_raw[len] = '\0';
3058  beentry->st_activity_start_timestamp = start_timestamp;
3059  }
3060 
3062 }
3063 
3064 /*-----------
3065  * pgstat_progress_start_command() -
3066  *
3067  * Set st_progress_command (and st_progress_command_target) in own backend
3068  * entry. Also, zero-initialize st_progress_param array.
3069  *-----------
3070  */
3071 void
3073 {
3074  volatile PgBackendStatus *beentry = MyBEEntry;
3075 
3076  if (!beentry || !pgstat_track_activities)
3077  return;
3078 
3080  beentry->st_progress_command = cmdtype;
3081  beentry->st_progress_command_target = relid;
3082  MemSet(&beentry->st_progress_param, 0, sizeof(beentry->st_progress_param));
3084 }
3085 
3086 /*-----------
3087  * pgstat_progress_update_param() -
3088  *
3089  * Update index'th member in st_progress_param[] of own backend entry.
3090  *-----------
3091  */
3092 void
3094 {
3095  volatile PgBackendStatus *beentry = MyBEEntry;
3096 
3097  Assert(index >= 0 && index < PGSTAT_NUM_PROGRESS_PARAM);
3098 
3099  if (!beentry || !pgstat_track_activities)
3100  return;
3101 
3103  beentry->st_progress_param[index] = val;
3105 }
3106 
3107 /*-----------
3108  * pgstat_progress_update_multi_param() -
3109  *
3110  * Update multiple members in st_progress_param[] of own backend entry.
3111  * This is atomic; readers won't see intermediate states.
3112  *-----------
3113  */
3114 void
3116  const int64 *val)
3117 {
3118  volatile PgBackendStatus *beentry = MyBEEntry;
3119  int i;
3120 
3121  if (!beentry || !pgstat_track_activities || nparam == 0)
3122  return;
3123 
3125 
3126  for (i = 0; i < nparam; ++i)
3127  {
3128  Assert(index[i] >= 0 && index[i] < PGSTAT_NUM_PROGRESS_PARAM);
3129 
3130  beentry->st_progress_param[index[i]] = val[i];
3131  }
3132 
3134 }
3135 
3136 /*-----------
3137  * pgstat_progress_end_command() -
3138  *
3139  * Reset st_progress_command (and st_progress_command_target) in own backend
3140  * entry. This signals the end of the command.
3141  *-----------
3142  */
3143 void
3145 {
3146  volatile PgBackendStatus *beentry = MyBEEntry;
3147 
3148  if (!beentry)
3149  return;
3152  return;
3153 
3158 }
3159 
3160 /* ----------
3161  * pgstat_report_appname() -
3162  *
3163  * Called to update our application name.
3164  * ----------
3165  */
3166 void
3167 pgstat_report_appname(const char *appname)
3168 {
3169  volatile PgBackendStatus *beentry = MyBEEntry;
3170  int len;
3171 
3172  if (!beentry)
3173  return;
3174 
3175  /* This should be unnecessary if GUC did its job, but be safe */
3176  len = pg_mbcliplen(appname, strlen(appname), NAMEDATALEN - 1);
3177 
3178  /*
3179  * Update my status entry, following the protocol of bumping
3180  * st_changecount before and after. We use a volatile pointer here to
3181  * ensure the compiler doesn't try to get cute.
3182  */
3184 
3185  memcpy((char *) beentry->st_appname, appname, len);
3186  beentry->st_appname[len] = '\0';
3187 
3189 }
3190 
3191 /*
3192  * Report current transaction start timestamp as the specified value.
3193  * Zero means there is no active transaction.
3194  */
3195 void
3197 {
3198  volatile PgBackendStatus *beentry = MyBEEntry;
3199 
3200  if (!pgstat_track_activities || !beentry)
3201  return;
3202 
3203  /*
3204  * Update my status entry, following the protocol of bumping
3205  * st_changecount before and after. We use a volatile pointer here to
3206  * ensure the compiler doesn't try to get cute.
3207  */
3209  beentry->st_xact_start_timestamp = tstamp;
3211 }
3212 
3213 /* ----------
3214  * pgstat_read_current_status() -
3215  *
3216  * Copy the current contents of the PgBackendStatus array to local memory,
3217  * if not already done in this transaction.
3218  * ----------
3219  */
3220 static void
3222 {
3223  volatile PgBackendStatus *beentry;
3224  LocalPgBackendStatus *localtable;
3225  LocalPgBackendStatus *localentry;
3226  char *localappname,
3227  *localactivity;
3228 #ifdef USE_SSL
3229  PgBackendSSLStatus *localsslstatus;
3230 #endif
3231  int i;
3232 
3234  if (localBackendStatusTable)
3235  return; /* already done */
3236 
3238 
3239  localtable = (LocalPgBackendStatus *)
3240  MemoryContextAlloc(pgStatLocalContext,
3242  localappname = (char *)
3243  MemoryContextAlloc(pgStatLocalContext,
3245  localactivity = (char *)
3246  MemoryContextAlloc(pgStatLocalContext,
3247  pgstat_track_activity_query_size * NumBackendStatSlots);
3248 #ifdef USE_SSL
3249  localsslstatus = (PgBackendSSLStatus *)
3250  MemoryContextAlloc(pgStatLocalContext,
3252 #endif
3253 
3254  localNumBackends = 0;
3255 
3256  beentry = BackendStatusArray;
3257  localentry = localtable;
3258  for (i = 1; i <= NumBackendStatSlots; i++)
3259  {
3260  /*
3261  * Follow the protocol of retrying if st_changecount changes while we
3262  * copy the entry, or if it's odd. (The check for odd is needed to
3263  * cover the case where we are able to completely copy the entry while
3264  * the source backend is between increment steps.) We use a volatile
3265  * pointer here to ensure the compiler doesn't try to get cute.
3266  */
3267  for (;;)
3268  {
3269  int before_changecount;
3270  int after_changecount;
3271 
3272  pgstat_save_changecount_before(beentry, before_changecount);
3273 
3274  localentry->backendStatus.st_procpid = beentry->st_procpid;
3275  if (localentry->backendStatus.st_procpid > 0)
3276  {
3277  memcpy(&localentry->backendStatus, (char *) beentry, sizeof(PgBackendStatus));
3278 
3279  /*
3280  * strcpy is safe even if the string is modified concurrently,
3281  * because there's always a \0 at the end of the buffer.
3282  */
3283  strcpy(localappname, (char *) beentry->st_appname);
3284  localentry->backendStatus.st_appname = localappname;
3285  strcpy(localactivity, (char *) beentry->st_activity_raw);
3286  localentry->backendStatus.st_activity_raw = localactivity;
3287  localentry->backendStatus.st_ssl = beentry->st_ssl;
3288 #ifdef USE_SSL
3289  if (beentry->st_ssl)
3290  {
3291  memcpy(localsslstatus, beentry->st_sslstatus, sizeof(PgBackendSSLStatus));
3292  localentry->backendStatus.st_sslstatus = localsslstatus;
3293  }
3294 #endif
3295  }
3296 
3297  pgstat_save_changecount_after(beentry, after_changecount);
3298  if (before_changecount == after_changecount &&
3299  (before_changecount & 1) == 0)
3300  break;
3301 
3302  /* Make sure we can break out of loop if stuck... */
3304  }
3305 
3306  beentry++;
3307  /* Only valid entries get included into the local array */
3308  if (localentry->backendStatus.st_procpid > 0)
3309  {
3311  &localentry->backend_xid,
3312  &localentry->backend_xmin);
3313 
3314  localentry++;
3315  localappname += NAMEDATALEN;
3316  localactivity += pgstat_track_activity_query_size;
3317 #ifdef USE_SSL
3318  localsslstatus++;
3319 #endif
3320  localNumBackends++;
3321  }
3322  }
3323 
3324  /* Set the pointer only after completion of a valid table */
3325  localBackendStatusTable = localtable;
3326 }
3327 
3328 /* ----------
3329  * pgstat_get_wait_event_type() -
3330  *
3331  * Return a string representing the current wait event type, backend is
3332  * waiting on.
3333  */
3334 const char *
3336 {
3337  uint32 classId;
3338  const char *event_type;
3339 
3340  /* report process as not waiting. */
3341  if (wait_event_info == 0)
3342  return NULL;
3343 
3344  classId = wait_event_info & 0xFF000000;
3345 
3346  switch (classId)
3347  {
3348  case PG_WAIT_LWLOCK:
3349  event_type = "LWLock";
3350  break;
3351  case PG_WAIT_LOCK:
3352  event_type = "Lock";
3353  break;
3354  case PG_WAIT_BUFFER_PIN:
3355  event_type = "BufferPin";
3356  break;
3357  case PG_WAIT_ACTIVITY:
3358  event_type = "Activity";
3359  break;
3360  case PG_WAIT_CLIENT:
3361  event_type = "Client";
3362  break;
3363  case PG_WAIT_EXTENSION:
3364  event_type = "Extension";
3365  break;
3366  case PG_WAIT_IPC:
3367  event_type = "IPC";
3368  break;
3369  case PG_WAIT_TIMEOUT:
3370  event_type = "Timeout";
3371  break;
3372  case PG_WAIT_IO:
3373  event_type = "IO";
3374  break;
3375  default:
3376  event_type = "???";
3377  break;
3378  }
3379 
3380  return event_type;
3381 }
3382 
3383 /* ----------
3384  * pgstat_get_wait_event() -
3385  *
3386  * Return a string representing the current wait event, backend is
3387  * waiting on.
3388  */
3389 const char *
3391 {
3392  uint32 classId;
3393  uint16 eventId;
3394  const char *event_name;
3395 
3396  /* report process as not waiting. */
3397  if (wait_event_info == 0)
3398  return NULL;
3399 
3400  classId = wait_event_info & 0xFF000000;
3401  eventId = wait_event_info & 0x0000FFFF;
3402 
3403  switch (classId)
3404  {
3405  case PG_WAIT_LWLOCK:
3406  event_name = GetLWLockIdentifier(classId, eventId);
3407  break;
3408  case PG_WAIT_LOCK:
3409  event_name = GetLockNameFromTagType(eventId);
3410  break;
3411  case PG_WAIT_BUFFER_PIN:
3412  event_name = "BufferPin";
3413  break;
3414  case PG_WAIT_ACTIVITY:
3415  {
3416  WaitEventActivity w = (WaitEventActivity) wait_event_info;
3417 
3418  event_name = pgstat_get_wait_activity(w);
3419  break;
3420  }
3421  case PG_WAIT_CLIENT:
3422  {
3423  WaitEventClient w = (WaitEventClient) wait_event_info;
3424 
3425  event_name = pgstat_get_wait_client(w);
3426  break;
3427  }
3428  case PG_WAIT_EXTENSION:
3429  event_name = "Extension";
3430  break;
3431  case PG_WAIT_IPC:
3432  {
3433  WaitEventIPC w = (WaitEventIPC) wait_event_info;
3434 
3435  event_name = pgstat_get_wait_ipc(w);
3436  break;
3437  }
3438  case PG_WAIT_TIMEOUT:
3439  {
3440  WaitEventTimeout w = (WaitEventTimeout) wait_event_info;
3441 
3442  event_name = pgstat_get_wait_timeout(w);
3443  break;
3444  }
3445  case PG_WAIT_IO:
3446  {
3447  WaitEventIO w = (WaitEventIO) wait_event_info;
3448 
3449  event_name = pgstat_get_wait_io(w);
3450  break;
3451  }
3452  default:
3453  event_name = "unknown wait event";
3454  break;
3455  }
3456 
3457  return event_name;
3458 }
3459 
3460 /* ----------
3461  * pgstat_get_wait_activity() -
3462  *
3463  * Convert WaitEventActivity to string.
3464  * ----------
3465  */
3466 static const char *
3468 {
3469  const char *event_name = "unknown wait event";
3470 
3471  switch (w)
3472  {
3474  event_name = "ArchiverMain";
3475  break;
3477  event_name = "AutoVacuumMain";
3478  break;
3480  event_name = "BgWriterHibernate";
3481  break;
3483  event_name = "BgWriterMain";
3484  break;
3486  event_name = "CheckpointerMain";
3487  break;
3489  event_name = "LogicalLauncherMain";
3490  break;
3492  event_name = "LogicalApplyMain";
3493  break;
3495  event_name = "PgStatMain";
3496  break;
3498  event_name = "RecoveryWalAll";
3499  break;
3501  event_name = "RecoveryWalStream";
3502  break;
3504  event_name = "SysLoggerMain";
3505  break;
3507  event_name = "WalReceiverMain";
3508  break;
3510  event_name = "WalSenderMain";
3511  break;
3513  event_name = "WalWriterMain";
3514  break;
3515  /* no default case, so that compiler will warn */
3516  }
3517 
3518  return event_name;
3519 }
3520 
3521 /* ----------
3522  * pgstat_get_wait_client() -
3523  *
3524  * Convert WaitEventClient to string.
3525  * ----------
3526  */
3527 static const char *
3529 {
3530  const char *event_name = "unknown wait event";
3531 
3532  switch (w)
3533  {
3535  event_name = "ClientRead";
3536  break;
3538  event_name = "ClientWrite";
3539  break;
3541  event_name = "LibPQWalReceiverConnect";
3542  break;
3544  event_name = "LibPQWalReceiverReceive";
3545  break;
3547  event_name = "SSLOpenServer";
3548  break;
3550  event_name = "WalReceiverWaitStart";
3551  break;
3553  event_name = "WalSenderWaitForWAL";
3554  break;
3556  event_name = "WalSenderWriteData";
3557  break;
3558  /* no default case, so that compiler will warn */
3559  }
3560 
3561  return event_name;
3562 }
3563 
3564 /* ----------
3565  * pgstat_get_wait_ipc() -
3566  *
3567  * Convert WaitEventIPC to string.
3568  * ----------
3569  */
3570 static const char *
3572 {
3573  const char *event_name = "unknown wait event";
3574 
3575  switch (w)
3576  {
3578  event_name = "BgWorkerShutdown";
3579  break;
3581  event_name = "BgWorkerStartup";
3582  break;
3583  case WAIT_EVENT_BTREE_PAGE:
3584  event_name = "BtreePage";
3585  break;
3587  event_name = "ExecuteGather";
3588  break;
3590  event_name = "Hash/Batch/Allocating";
3591  break;
3593  event_name = "Hash/Batch/Electing";
3594  break;
3596  event_name = "Hash/Batch/Loading";
3597  break;
3599  event_name = "Hash/Build/Allocating";
3600  break;
3602  event_name = "Hash/Build/Electing";
3603  break;
3605  event_name = "Hash/Build/HashingInner";
3606  break;
3608  event_name = "Hash/Build/HashingOuter";
3609  break;
3611  event_name = "Hash/GrowBatches/Allocating";
3612  break;
3614  event_name = "Hash/GrowBatches/Deciding";
3615  break;
3617  event_name = "Hash/GrowBatches/Electing";
3618  break;
3620  event_name = "Hash/GrowBatches/Finishing";
3621  break;
3623  event_name = "Hash/GrowBatches/Repartitioning";
3624  break;
3626  event_name = "Hash/GrowBuckets/Allocating";
3627  break;
3629  event_name = "Hash/GrowBuckets/Electing";
3630  break;
3632  event_name = "Hash/GrowBuckets/Reinserting";
3633  break;
3635  event_name = "LogicalSyncData";
3636  break;
3638  event_name = "LogicalSyncStateChange";
3639  break;
3641  event_name = "MessageQueueInternal";
3642  break;
3644  event_name = "MessageQueuePutMessage";
3645  break;
3646  case WAIT_EVENT_MQ_RECEIVE:
3647  event_name = "MessageQueueReceive";
3648  break;
3649  case WAIT_EVENT_MQ_SEND:
3650  event_name = "MessageQueueSend";
3651  break;
3653  event_name = "ParallelFinish";
3654  break;
3656  event_name = "ParallelBitmapScan";
3657  break;
3659  event_name = "ParallelCreateIndexScan";
3660  break;
3662  event_name = "ProcArrayGroupUpdate";
3663  break;
3665  event_name = "ClogGroupUpdate";
3666  break;
3668  event_name = "ReplicationOriginDrop";
3669  break;
3671  event_name = "ReplicationSlotDrop";
3672  break;
3674  event_name = "SafeSnapshot";
3675  break;
3676  case WAIT_EVENT_SYNC_REP:
3677  event_name = "SyncRep";
3678  break;
3679  /* no default case, so that compiler will warn */
3680  }
3681 
3682  return event_name;
3683 }
3684 
3685 /* ----------
3686  * pgstat_get_wait_timeout() -
3687  *
3688  * Convert WaitEventTimeout to string.
3689  * ----------
3690  */
3691 static const char *
3693 {
3694  const char *event_name = "unknown wait event";
3695 
3696  switch (w)
3697  {
3699  event_name = "BaseBackupThrottle";
3700  break;
3701  case WAIT_EVENT_PG_SLEEP:
3702  event_name = "PgSleep";
3703  break;
3705  event_name = "RecoveryApplyDelay";
3706  break;
3707  /* no default case, so that compiler will warn */
3708  }
3709 
3710  return event_name;
3711 }
3712 
3713 /* ----------
3714  * pgstat_get_wait_io() -
3715  *
3716  * Convert WaitEventIO to string.
3717  * ----------
3718  */
3719 static const char *
3721 {
3722  const char *event_name = "unknown wait event";
3723 
3724  switch (w)
3725  {
3727  event_name = "BufFileRead";
3728  break;
3730  event_name = "BufFileWrite";
3731  break;
3733  event_name = "ControlFileRead";
3734  break;
3736  event_name = "ControlFileSync";
3737  break;
3739  event_name = "ControlFileSyncUpdate";
3740  break;
3742  event_name = "ControlFileWrite";
3743  break;
3745  event_name = "ControlFileWriteUpdate";
3746  break;
3748  event_name = "CopyFileRead";
3749  break;
3751  event_name = "CopyFileWrite";
3752  break;
3754  event_name = "DataFileExtend";
3755  break;
3757  event_name = "DataFileFlush";
3758  break;
3760  event_name = "DataFileImmediateSync";
3761  break;
3763  event_name = "DataFilePrefetch";
3764  break;
3766  event_name = "DataFileRead";
3767  break;
3769  event_name = "DataFileSync";
3770  break;
3772  event_name = "DataFileTruncate";
3773  break;
3775  event_name = "DataFileWrite";
3776  break;
3778  event_name = "DSMFillZeroWrite";
3779  break;
3781  event_name = "LockFileAddToDataDirRead";
3782  break;
3784  event_name = "LockFileAddToDataDirSync";
3785  break;
3787  event_name = "LockFileAddToDataDirWrite";
3788  break;
3790  event_name = "LockFileCreateRead";
3791  break;
3793  event_name = "LockFileCreateSync";
3794  break;
3796  event_name = "LockFileCreateWRITE";
3797  break;
3799  event_name = "LockFileReCheckDataDirRead";
3800  break;
3802  event_name = "LogicalRewriteCheckpointSync";
3803  break;
3805  event_name = "LogicalRewriteMappingSync";
3806  break;
3808  event_name = "LogicalRewriteMappingWrite";
3809  break;
3811  event_name = "LogicalRewriteSync";
3812  break;
3814  event_name = "LogicalRewriteTruncate";
3815  break;
3817  event_name = "LogicalRewriteWrite";
3818  break;
3820  event_name = "RelationMapRead";
3821  break;
3823  event_name = "RelationMapSync";
3824  break;
3826  event_name = "RelationMapWrite";
3827  break;
3829  event_name = "ReorderBufferRead";
3830  break;
3832  event_name = "ReorderBufferWrite";
3833  break;
3835  event_name = "ReorderLogicalMappingRead";
3836  break;
3838  event_name = "ReplicationSlotRead";
3839  break;
3841  event_name = "ReplicationSlotRestoreSync";
3842  break;
3844  event_name = "ReplicationSlotSync";
3845  break;
3847  event_name = "ReplicationSlotWrite";
3848  break;
3850  event_name = "SLRUFlushSync";
3851  break;
3852  case WAIT_EVENT_SLRU_READ:
3853  event_name = "SLRURead";
3854  break;
3855  case WAIT_EVENT_SLRU_SYNC:
3856  event_name = "SLRUSync";
3857  break;
3858  case WAIT_EVENT_SLRU_WRITE:
3859  event_name = "SLRUWrite";
3860  break;
3862  event_name = "SnapbuildRead";
3863  break;
3865  event_name = "SnapbuildSync";
3866  break;
3868  event_name = "SnapbuildWrite";
3869  break;
3871  event_name = "TimelineHistoryFileSync";
3872  break;
3874  event_name = "TimelineHistoryFileWrite";
3875  break;
3877  event_name = "TimelineHistoryRead";
3878  break;
3880  event_name = "TimelineHistorySync";
3881  break;
3883  event_name = "TimelineHistoryWrite";
3884  break;
3886  event_name = "TwophaseFileRead";
3887  break;
3889  event_name = "TwophaseFileSync";
3890  break;
3892  event_name = "TwophaseFileWrite";
3893  break;
3895  event_name = "WALSenderTimelineHistoryRead";
3896  break;
3898  event_name = "WALBootstrapSync";
3899  break;
3901  event_name = "WALBootstrapWrite";
3902  break;
3904  event_name = "WALCopyRead";
3905  break;
3907  event_name = "WALCopySync";
3908  break;
3910  event_name = "WALCopyWrite";
3911  break;
3913  event_name = "WALInitSync";
3914  break;
3916  event_name = "WALInitWrite";
3917  break;
3918  case WAIT_EVENT_WAL_READ:
3919  event_name = "WALRead";
3920  break;
3922  event_name = "WALSyncMethodAssign";
3923  break;
3924  case WAIT_EVENT_WAL_WRITE:
3925  event_name = "WALWrite";
3926  break;
3927 
3928  /* no default case, so that compiler will warn */
3929  }
3930 
3931  return event_name;
3932 }
3933 
3934 
3935 /* ----------
3936  * pgstat_get_backend_current_activity() -
3937  *
3938  * Return a string representing the current activity of the backend with
3939  * the specified PID. This looks directly at the BackendStatusArray,
3940  * and so will provide current information regardless of the age of our
3941  * transaction's snapshot of the status array.
3942  *
3943  * It is the caller's responsibility to invoke this only for backends whose
3944  * state is expected to remain stable while the result is in use. The
3945  * only current use is in deadlock reporting, where we can expect that
3946  * the target backend is blocked on a lock. (There are corner cases
3947  * where the target's wait could get aborted while we are looking at it,
3948  * but the very worst consequence is to return a pointer to a string
3949  * that's been changed, so we won't worry too much.)
3950  *
3951  * Note: return strings for special cases match pg_stat_get_backend_activity.
3952  * ----------
3953  */
3954 const char *
3955 pgstat_get_backend_current_activity(int pid, bool checkUser)
3956 {
3957  PgBackendStatus *beentry;
3958  int i;
3959 
3960  beentry = BackendStatusArray;
3961  for (i = 1; i <= MaxBackends; i++)
3962  {
3963  /*
3964  * Although we expect the target backend's entry to be stable, that
3965  * doesn't imply that anyone else's is. To avoid identifying the
3966  * wrong backend, while we check for a match to the desired PID we
3967  * must follow the protocol of retrying if st_changecount changes
3968  * while we examine the entry, or if it's odd. (This might be
3969  * unnecessary, since fetching or storing an int is almost certainly
3970  * atomic, but let's play it safe.) We use a volatile pointer here to
3971  * ensure the compiler doesn't try to get cute.
3972  */
3973  volatile PgBackendStatus *vbeentry = beentry;
3974  bool found;
3975 
3976  for (;;)
3977  {
3978  int before_changecount;
3979  int after_changecount;
3980 
3981  pgstat_save_changecount_before(vbeentry, before_changecount);
3982 
3983  found = (vbeentry->st_procpid == pid);
3984 
3985  pgstat_save_changecount_after(vbeentry, after_changecount);
3986 
3987  if (before_changecount == after_changecount &&
3988  (before_changecount & 1) == 0)
3989  break;
3990 
3991  /* Make sure we can break out of loop if stuck... */
3993  }
3994 
3995  if (found)
3996  {
3997  /* Now it is safe to use the non-volatile pointer */
3998  if (checkUser && !superuser() && beentry->st_userid != GetUserId())
3999  return "<insufficient privilege>";
4000  else if (*(beentry->st_activity_raw) == '\0')
4001  return "<command string not enabled>";
4002  else
4003  {
4004  /* this'll leak a bit of memory, but that seems acceptable */
4005  return pgstat_clip_activity(beentry->st_activity_raw);
4006  }
4007  }
4008 
4009  beentry++;
4010  }
4011 
4012  /* If we get here, caller is in error ... */
4013  return "<backend information not available>";
4014 }
4015 
4016 /* ----------
4017  * pgstat_get_crashed_backend_activity() -
4018  *
4019  * Return a string representing the current activity of the backend with
4020  * the specified PID. Like the function above, but reads shared memory with
4021  * the expectation that it may be corrupt. On success, copy the string
4022  * into the "buffer" argument and return that pointer. On failure,
4023  * return NULL.
4024  *
4025  * This function is only intended to be used by the postmaster to report the
4026  * query that crashed a backend. In particular, no attempt is made to
4027  * follow the correct concurrency protocol when accessing the
4028  * BackendStatusArray. But that's OK, in the worst case we'll return a
4029  * corrupted message. We also must take care not to trip on ereport(ERROR).
4030  * ----------
4031  */
4032 const char *
4033 pgstat_get_crashed_backend_activity(int pid, char *buffer, int buflen)
4034 {
4035  volatile PgBackendStatus *beentry;
4036  int i;
4037 
4038  beentry = BackendStatusArray;
4039 
4040  /*
4041  * We probably shouldn't get here before shared memory has been set up,
4042  * but be safe.
4043  */
4044  if (beentry == NULL || BackendActivityBuffer == NULL)
4045  return NULL;
4046 
4047  for (i = 1; i <= MaxBackends; i++)
4048  {
4049  if (beentry->st_procpid == pid)
4050  {
4051  /* Read pointer just once, so it can't change after validation */
4052  const char *activity = beentry->st_activity_raw;
4053  const char *activity_last;
4054 
4055  /*
4056  * We mustn't access activity string before we verify that it
4057  * falls within the BackendActivityBuffer. To make sure that the
4058  * entire string including its ending is contained within the
4059  * buffer, subtract one activity length from the buffer size.
4060  */
4063 
4064  if (activity < BackendActivityBuffer ||
4065  activity > activity_last)
4066  return NULL;
4067 
4068  /* If no string available, no point in a report */
4069  if (activity[0] == '\0')
4070  return NULL;
4071 
4072  /*
4073  * Copy only ASCII-safe characters so we don't run into encoding
4074  * problems when reporting the message; and be sure not to run off
4075  * the end of memory. As only ASCII characters are reported, it
4076  * doesn't seem necessary to perform multibyte aware clipping.
4077  */
4078  ascii_safe_strlcpy(buffer, activity,
4079  Min(buflen, pgstat_track_activity_query_size));
4080 
4081  return buffer;
4082  }
4083 
4084  beentry++;
4085  }
4086 
4087  /* PID not found */
4088  return NULL;
4089 }
4090 
4091 const char *
4093 {
4094  const char *backendDesc = "unknown process type";
4095 
4096  switch (backendType)
4097  {
4098  case B_AUTOVAC_LAUNCHER:
4099  backendDesc = "autovacuum launcher";
4100  break;
4101  case B_AUTOVAC_WORKER:
4102  backendDesc = "autovacuum worker";
4103  break;
4104  case B_BACKEND:
4105  backendDesc = "client backend";
4106  break;
4107  case B_BG_WORKER:
4108  backendDesc = "background worker";
4109  break;
4110  case B_BG_WRITER:
4111  backendDesc = "background writer";
4112  break;
4113  case B_CHECKPOINTER:
4114  backendDesc = "checkpointer";
4115  break;
4116  case B_STARTUP:
4117  backendDesc = "startup";
4118  break;
4119  case B_WAL_RECEIVER:
4120  backendDesc = "walreceiver";
4121  break;
4122  case B_WAL_SENDER:
4123  backendDesc = "walsender";
4124  break;
4125  case B_WAL_WRITER:
4126  backendDesc = "walwriter";
4127  break;
4128  }
4129 
4130  return backendDesc;
4131 }
4132 
4133 /* ------------------------------------------------------------
4134  * Local support functions follow
4135  * ------------------------------------------------------------
4136  */
4137 
4138 
4139 /* ----------
4140  * pgstat_setheader() -
4141  *
4142  * Set common header fields in a statistics message
4143  * ----------
4144  */
4145 static void
4147 {
4148  hdr->m_type = mtype;
4149 }
4150 
4151 
4152 /* ----------
4153  * pgstat_send() -
4154  *
4155  * Send out one statistics message to the collector
4156  * ----------
4157  */
4158 static void
4159 pgstat_send(void *msg, int len)
4160 {
4161  int rc;
4162 
4164  return;
4165 
4166  ((PgStat_MsgHdr *) msg)->m_size = len;
4167 
4168  /* We'll retry after EINTR, but ignore all other failures */
4169  do
4170  {
4171  rc = send(pgStatSock, msg, len, 0);
4172  } while (rc < 0 && errno == EINTR);
4173 
4174 #ifdef USE_ASSERT_CHECKING
4175  /* In debug builds, log send failures ... */
4176  if (rc < 0)
4177  elog(LOG, "could not send to statistics collector: %m");
4178 #endif
4179 }
4180 
4181 /* ----------
4182  * pgstat_send_archiver() -
4183  *
4184  * Tell the collector about the WAL file that we successfully
4185  * archived or failed to archive.
4186  * ----------
4187  */
4188 void
4189 pgstat_send_archiver(const char *xlog, bool failed)
4190 {
4191  PgStat_MsgArchiver msg;
4192 
4193  /*
4194  * Prepare and send the message
4195  */
4197  msg.m_failed = failed;
4198  StrNCpy(msg.m_xlog, xlog, sizeof(msg.m_xlog));
4200  pgstat_send(&msg, sizeof(msg));
4201 }
4202 
4203 /* ----------
4204  * pgstat_send_bgwriter() -
4205  *
4206  * Send bgwriter statistics to the collector
4207  * ----------
4208  */
4209 void
4211 {
4212  /* We assume this initializes to zeroes */
4213  static const PgStat_MsgBgWriter all_zeroes;
4214 
4215  /*
4216  * This function can be called even if nothing at all has happened. In
4217  * this case, avoid sending a completely empty message to the stats
4218  * collector.
4219  */
4220  if (memcmp(&BgWriterStats, &all_zeroes, sizeof(PgStat_MsgBgWriter)) == 0)
4221  return;
4222 
4223  /*
4224  * Prepare and send the message
4225  */
4226  pgstat_setheader(&BgWriterStats.m_hdr, PGSTAT_MTYPE_BGWRITER);
4227  pgstat_send(&BgWriterStats, sizeof(BgWriterStats));
4228 
4229  /*
4230  * Clear out the statistics buffer, so it can be re-used.
4231  */
4232  MemSet(&BgWriterStats, 0, sizeof(BgWriterStats));
4233 }
4234 
4235 
4236 /* ----------
4237  * PgstatCollectorMain() -
4238  *
4239  * Start up the statistics collector process. This is the body of the
4240  * postmaster child process.
4241  *
4242  * The argc/argv parameters are valid only in EXEC_BACKEND case.
4243  * ----------
4244  */
4245 NON_EXEC_STATIC void
4246 PgstatCollectorMain(int argc, char *argv[])
4247 {
4248  int len;
4249  PgStat_Msg msg;
4250  int wr;
4251 
4252  /*
4253  * Ignore all signals usually bound to some action in the postmaster,
4254  * except SIGHUP and SIGQUIT. Note we don't need a SIGUSR1 handler to
4255  * support latch operations, because we only use a local latch.
4256  */
4258  pqsignal(SIGINT, SIG_IGN);
4259  pqsignal(SIGTERM, SIG_IGN);
4271 
4272  /*
4273  * Identify myself via ps
4274  */
4275  init_ps_display("stats collector", "", "", "");
4276 
4277  /*
4278  * Read in existing stats files or initialize the stats to zero.
4279  */
4280  pgStatRunningInCollector = true;
4281  pgStatDBHash = pgstat_read_statsfiles(InvalidOid, true, true);
4282 
4283  /*
4284  * Loop to process messages until we get SIGQUIT or detect ungraceful
4285  * death of our parent postmaster.
4286  *
4287  * For performance reasons, we don't want to do ResetLatch/WaitLatch after
4288  * every message; instead, do that only after a recv() fails to obtain a
4289  * message. (This effectively means that if backends are sending us stuff
4290  * like mad, we won't notice postmaster death until things slack off a
4291  * bit; which seems fine.) To do that, we have an inner loop that
4292  * iterates as long as recv() succeeds. We do recognize got_SIGHUP inside
4293  * the inner loop, which means that such interrupts will get serviced but
4294  * the latch won't get cleared until next time there is a break in the
4295  * action.
4296  */
4297  for (;;)
4298  {
4299  /* Clear any already-pending wakeups */
4301 
4302  /*
4303  * Quit if we get SIGQUIT from the postmaster.
4304  */
4305  if (need_exit)
4306  break;
4307 
4308  /*
4309  * Inner loop iterates as long as we keep getting messages, or until
4310  * need_exit becomes set.
4311  */
4312  while (!need_exit)
4313  {
4314  /*
4315  * Reload configuration if we got SIGHUP from the postmaster.
4316  */
4317  if (got_SIGHUP)
4318  {
4319  got_SIGHUP = false;
4321  }
4322 
4323  /*
4324  * Write the stats file(s) if a new request has arrived that is
4325  * not satisfied by existing file(s).
4326  */
4328  pgstat_write_statsfiles(false, false);
4329 
4330  /*
4331  * Try to receive and process a message. This will not block,
4332  * since the socket is set to non-blocking mode.
4333  *
4334  * XXX On Windows, we have to force pgwin32_recv to cooperate,
4335  * despite the previous use of pg_set_noblock() on the socket.
4336  * This is extremely broken and should be fixed someday.
4337  */
4338 #ifdef WIN32
4339  pgwin32_noblock = 1;
4340 #endif
4341 
4342  len = recv(pgStatSock, (char *) &msg,
4343  sizeof(PgStat_Msg), 0);
4344 
4345 #ifdef WIN32
4346  pgwin32_noblock = 0;
4347 #endif
4348 
4349  if (len < 0)
4350  {
4351  if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
4352  break; /* out of inner loop */
4353  ereport(ERROR,
4355  errmsg("could not read statistics message: %m")));
4356  }
4357 
4358  /*
4359  * We ignore messages that are smaller than our common header
4360  */
4361  if (len < sizeof(PgStat_MsgHdr))
4362  continue;
4363 
4364  /*
4365  * The received length must match the length in the header
4366  */
4367  if (msg.msg_hdr.m_size != len)
4368  continue;
4369 
4370  /*
4371  * O.K. - we accept this message. Process it.
4372  */
4373  switch (msg.msg_hdr.m_type)
4374  {
4375  case PGSTAT_MTYPE_DUMMY:
4376  break;
4377 
4378  case PGSTAT_MTYPE_INQUIRY:
4379  pgstat_recv_inquiry((PgStat_MsgInquiry *) &msg, len);
4380  break;
4381 
4382  case PGSTAT_MTYPE_TABSTAT:
4383  pgstat_recv_tabstat((PgStat_MsgTabstat *) &msg, len);
4384  break;
4385 
4386  case PGSTAT_MTYPE_TABPURGE:
4388  break;
4389 
4390  case PGSTAT_MTYPE_DROPDB:
4391  pgstat_recv_dropdb((PgStat_MsgDropdb *) &msg, len);
4392  break;
4393 
4396  len);
4397  break;
4398 
4402  len);
4403  break;
4404 
4408  len);
4409  break;
4410 
4413  break;
4414 
4415  case PGSTAT_MTYPE_VACUUM:
4416  pgstat_recv_vacuum((PgStat_MsgVacuum *) &msg, len);
4417  break;
4418 
4419  case PGSTAT_MTYPE_ANALYZE:
4420  pgstat_recv_analyze((PgStat_MsgAnalyze *) &msg, len);
4421  break;
4422 
4423  case PGSTAT_MTYPE_ARCHIVER:
4425  break;
4426 
4427  case PGSTAT_MTYPE_BGWRITER:
4429  break;
4430 
4431  case PGSTAT_MTYPE_FUNCSTAT:
4433  break;
4434 
4437  break;
4438 
4441  break;
4442 
4443  case PGSTAT_MTYPE_DEADLOCK:
4445  break;
4446 
4447  case PGSTAT_MTYPE_TEMPFILE:
4449  break;
4450 
4451  default:
4452  break;
4453  }
4454  } /* end of inner message-processing loop */
4455 
4456  /* Sleep until there's something to do */
4457 #ifndef WIN32
4460  pgStatSock, -1L,
4462 #else
4463 
4464  /*
4465  * Windows, at least in its Windows Server 2003 R2 incarnation,
4466  * sometimes loses FD_READ events. Waking up and retrying the recv()
4467  * fixes that, so don't sleep indefinitely. This is a crock of the
4468  * first water, but until somebody wants to debug exactly what's
4469  * happening there, this is the best we can do. The two-second
4470  * timeout matches our pre-9.2 behavior, and needs to be short enough
4471  * to not provoke "using stale statistics" complaints from
4472  * backend_read_statsfile.
4473  */
4476  pgStatSock,
4477  2 * 1000L /* msec */ ,
4479 #endif
4480 
4481  /*
4482  * Emergency bailout if postmaster has died. This is to avoid the
4483  * necessity for manual cleanup of all postmaster children.
4484  */
4485  if (wr & WL_POSTMASTER_DEATH)
4486  break;
4487  } /* end of outer loop */
4488 
4489  /*
4490  * Save the final stats to reuse at next startup.
4491  */
4492  pgstat_write_statsfiles(true, true);
4493 
4494  exit(0);
4495 }
4496 
4497 
4498 /* SIGQUIT signal handler for collector process */
4499 static void
4501 {
4502  int save_errno = errno;
4503 
4504  need_exit = true;
4505  SetLatch(MyLatch);
4506 
4507  errno = save_errno;
4508 }
4509 
4510 /* SIGHUP handler for collector process */
4511 static void
4513 {
4514  int save_errno = errno;
4515 
4516  got_SIGHUP = true;
4517  SetLatch(MyLatch);
4518 
4519  errno = save_errno;
4520 }
4521 
4522 /*
4523  * Subroutine to clear stats in a database entry
4524  *
4525  * Tables and functions hashes are initialized to empty.
4526  */
4527 static void
4529 {
4530  HASHCTL hash_ctl;
4531 
4532  dbentry->n_xact_commit = 0;
4533  dbentry->n_xact_rollback = 0;
4534  dbentry->n_blocks_fetched = 0;
4535  dbentry->n_blocks_hit = 0;
4536  dbentry->n_tuples_returned = 0;
4537  dbentry->n_tuples_fetched = 0;
4538  dbentry->n_tuples_inserted = 0;
4539  dbentry->n_tuples_updated = 0;
4540  dbentry->n_tuples_deleted = 0;
4541  dbentry->last_autovac_time = 0;
4542  dbentry->n_conflict_tablespace = 0;
4543  dbentry->n_conflict_lock = 0;
4544  dbentry->n_conflict_snapshot = 0;
4545  dbentry->n_conflict_bufferpin = 0;
4546  dbentry->n_conflict_startup_deadlock = 0;
4547  dbentry->n_temp_files = 0;
4548  dbentry->n_temp_bytes = 0;
4549  dbentry->n_deadlocks = 0;
4550  dbentry->n_block_read_time = 0;
4551  dbentry->n_block_write_time = 0;
4552 
4554  dbentry->stats_timestamp = 0;
4555 
4556  memset(&hash_ctl, 0, sizeof(hash_ctl));
4557  hash_ctl.keysize = sizeof(Oid);
4558  hash_ctl.entrysize = sizeof(PgStat_StatTabEntry);
4559  dbentry->tables = hash_create("Per-database table",
4561  &hash_ctl,
4562  HASH_ELEM | HASH_BLOBS);
4563 
4564  hash_ctl.keysize = sizeof(Oid);
4565  hash_ctl.entrysize = sizeof(PgStat_StatFuncEntry);
4566  dbentry->functions = hash_create("Per-database function",
4568  &hash_ctl,
4569  HASH_ELEM | HASH_BLOBS);
4570 }
4571 
4572 /*
4573  * Lookup the hash table entry for the specified database. If no hash
4574  * table entry exists, initialize it, if the create parameter is true.
4575  * Else, return NULL.
4576  */
4577 static PgStat_StatDBEntry *
4578 pgstat_get_db_entry(Oid databaseid, bool create)
4579 {
4580  PgStat_StatDBEntry *result;
4581  bool found;
4582  HASHACTION action = (create ? HASH_ENTER : HASH_FIND);
4583 
4584  /* Lookup or create the hash table entry for this database */
4585  result = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
4586  &databaseid,
4587  action, &found);
4588 
4589  if (!create && !found)
4590  return NULL;
4591 
4592  /*
4593  * If not found, initialize the new one. This creates empty hash tables
4594  * for tables and functions, too.
4595  */
4596  if (!found)
4597  reset_dbentry_counters(result);
4598 
4599  return result;
4600 }
4601 
4602 
4603 /*
4604  * Lookup the hash table entry for the specified table. If no hash
4605  * table entry exists, initialize it, if the create parameter is true.
4606  * Else, return NULL.
4607  */
4608 static PgStat_StatTabEntry *
4609 pgstat_get_tab_entry(PgStat_StatDBEntry *dbentry, Oid tableoid, bool create)
4610 {
4611  PgStat_StatTabEntry *result;
4612  bool found;
4613  HASHACTION action = (create ? HASH_ENTER : HASH_FIND);
4614 
4615  /* Lookup or create the hash table entry for this table */
4616  result = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
4617  &tableoid,
4618  action, &found);
4619 
4620  if (!create && !found)
4621  return NULL;
4622 
4623  /* If not found, initialize the new one. */
4624  if (!found)
4625  {
4626  result->numscans = 0;
4627  result->tuples_returned = 0;
4628  result->tuples_fetched = 0;
4629  result->tuples_inserted = 0;
4630  result->tuples_updated = 0;
4631  result->tuples_deleted = 0;
4632  result->tuples_hot_updated = 0;
4633  result->n_live_tuples = 0;
4634  result->n_dead_tuples = 0;
4635  result->changes_since_analyze = 0;
4636  result->blocks_fetched = 0;
4637  result->blocks_hit = 0;
4638  result->vacuum_timestamp = 0;
4639  result->vacuum_count = 0;
4640  result->autovac_vacuum_timestamp = 0;
4641  result->autovac_vacuum_count = 0;
4642  result->analyze_timestamp = 0;
4643  result->analyze_count = 0;
4644  result->autovac_analyze_timestamp = 0;
4645  result->autovac_analyze_count = 0;
4646  }
4647 
4648  return result;
4649 }
4650 
4651 
4652 /* ----------
4653  * pgstat_write_statsfiles() -
4654  * Write the global statistics file, as well as requested DB files.
4655  *
4656  * 'permanent' specifies writing to the permanent files not temporary ones.
4657  * When true (happens only when the collector is shutting down), also remove
4658  * the temporary files so that backends starting up under a new postmaster
4659  * can't read old data before the new collector is ready.
4660  *
4661  * When 'allDbs' is false, only the requested databases (listed in
4662  * pending_write_requests) will be written; otherwise, all databases
4663  * will be written.
4664  * ----------
4665  */
4666 static void
4667 pgstat_write_statsfiles(bool permanent, bool allDbs)
4668 {
4669  HASH_SEQ_STATUS hstat;
4670  PgStat_StatDBEntry *dbentry;
4671  FILE *fpout;
4672  int32 format_id;
4673  const char *tmpfile = permanent ? PGSTAT_STAT_PERMANENT_TMPFILE : pgstat_stat_tmpname;
4674  const char *statfile = permanent ? PGSTAT_STAT_PERMANENT_FILENAME : pgstat_stat_filename;
4675  int rc;
4676 
4677  elog(DEBUG2, "writing stats file \"%s\"", statfile);
4678 
4679  /*
4680  * Open the statistics temp file to write out the current values.
4681  */
4682  fpout = AllocateFile(tmpfile, PG_BINARY_W);
4683  if (fpout == NULL)
4684  {
4685  ereport(LOG,
4687  errmsg("could not open temporary statistics file \"%s\": %m",
4688  tmpfile)));
4689  return;
4690  }
4691 
4692  /*
4693  * Set the timestamp of the stats file.
4694  */
4695  globalStats.stats_timestamp = GetCurrentTimestamp();
4696 
4697  /*
4698  * Write the file header --- currently just a format ID.
4699  */
4700  format_id = PGSTAT_FILE_FORMAT_ID;
4701  rc = fwrite(&format_id, sizeof(format_id), 1, fpout);
4702  (void) rc; /* we'll check for error with ferror */
4703 
4704  /*
4705  * Write global stats struct
4706  */
4707  rc = fwrite(&globalStats, sizeof(globalStats), 1, fpout);
4708  (void) rc; /* we'll check for error with ferror */
4709 
4710  /*
4711  * Write archiver stats struct
4712  */
4713  rc = fwrite(&archiverStats, sizeof(archiverStats), 1, fpout);
4714  (void) rc; /* we'll check for error with ferror */
4715 
4716  /*
4717  * Walk through the database table.
4718  */
4719  hash_seq_init(&hstat, pgStatDBHash);
4720  while ((dbentry = (PgStat_StatDBEntry *) hash_seq_search(&hstat)) != NULL)
4721  {
4722  /*
4723  * Write out the table and function stats for this DB into the
4724  * appropriate per-DB stat file, if required.
4725  */
4726  if (allDbs || pgstat_db_requested(dbentry->databaseid))
4727  {
4728  /* Make DB's timestamp consistent with the global stats */
4729  dbentry->stats_timestamp = globalStats.stats_timestamp;
4730 
4731  pgstat_write_db_statsfile(dbentry, permanent);
4732  }
4733 
4734  /*
4735  * Write out the DB entry. We don't write the tables or functions
4736  * pointers, since they're of no use to any other process.
4737  */
4738  fputc('D', fpout);
4739  rc = fwrite(dbentry, offsetof(PgStat_StatDBEntry, tables), 1, fpout);
4740  (void) rc; /* we'll check for error with ferror */
4741  }
4742 
4743  /*
4744  * No more output to be done. Close the temp file and replace the old
4745  * pgstat.stat with it. The ferror() check replaces testing for error
4746  * after each individual fputc or fwrite above.
4747  */
4748  fputc('E', fpout);
4749 
4750  if (ferror(fpout))
4751  {
4752  ereport(LOG,
4754  errmsg("could not write temporary statistics file \"%s\": %m",
4755  tmpfile)));
4756  FreeFile(fpout);
4757  unlink(tmpfile);
4758  }
4759  else if (FreeFile(fpout) < 0)
4760  {
4761  ereport(LOG,
4763  errmsg("could not close temporary statistics file \"%s\": %m",
4764  tmpfile)));
4765  unlink(tmpfile);
4766  }
4767  else if (rename(tmpfile, statfile) < 0)
4768  {
4769  ereport(LOG,
4771  errmsg("could not rename temporary statistics file \"%s\" to \"%s\": %m",
4772  tmpfile, statfile)));
4773  unlink(tmpfile);
4774  }
4775 
4776  if (permanent)
4777  unlink(pgstat_stat_filename);
4778 
4779  /*
4780  * Now throw away the list of requests. Note that requests sent after we
4781  * started the write are still waiting on the network socket.
4782  */
4783  list_free(pending_write_requests);
4784  pending_write_requests = NIL;
4785 }
4786 
4787 /*
4788  * return the filename for a DB stat file; filename is the output buffer,
4789  * of length len.
4790  */
4791 static void
4792 get_dbstat_filename(bool permanent, bool tempname, Oid databaseid,
4793  char *filename, int len)
4794 {
4795  int printed;
4796 
4797  /* NB -- pgstat_reset_remove_files knows about the pattern this uses */
4798  printed = snprintf(filename, len, "%s/db_%u.%s",
4799  permanent ? PGSTAT_STAT_PERMANENT_DIRECTORY :
4801  databaseid,
4802  tempname ? "tmp" : "stat");
4803  if (printed > len)
4804  elog(ERROR, "overlength pgstat path");
4805 }
4806 
4807 /* ----------
4808  * pgstat_write_db_statsfile() -
4809  * Write the stat file for a single database.
4810  *
4811  * If writing to the permanent file (happens when the collector is
4812  * shutting down only), remove the temporary file so that backends
4813  * starting up under a new postmaster can't read the old data before
4814  * the new collector is ready.
4815  * ----------
4816  */
4817 static void
4819 {
4820  HASH_SEQ_STATUS tstat;
4821  HASH_SEQ_STATUS fstat;
4822  PgStat_StatTabEntry *tabentry;
4823  PgStat_StatFuncEntry *funcentry;
4824  FILE *fpout;
4825  int32 format_id;
4826  Oid dbid = dbentry->databaseid;
4827  int rc;
4828  char tmpfile[MAXPGPATH];
4829  char statfile[MAXPGPATH];
4830 
4831  get_dbstat_filename(permanent, true, dbid, tmpfile, MAXPGPATH);
4832  get_dbstat_filename(permanent, false, dbid, statfile, MAXPGPATH);
4833 
4834  elog(DEBUG2, "writing stats file \"%s\"", statfile);
4835 
4836  /*
4837  * Open the statistics temp file to write out the current values.
4838  */
4839  fpout = AllocateFile(tmpfile, PG_BINARY_W);
4840  if (fpout == NULL)
4841  {
4842  ereport(LOG,
4844  errmsg("could not open temporary statistics file \"%s\": %m",
4845  tmpfile)));
4846  return;
4847  }
4848 
4849  /*
4850  * Write the file header --- currently just a format ID.
4851  */
4852  format_id = PGSTAT_FILE_FORMAT_ID;
4853  rc = fwrite(&format_id, sizeof(format_id), 1, fpout);
4854  (void) rc; /* we'll check for error with ferror */
4855 
4856  /*
4857  * Walk through the database's access stats per table.
4858  */
4859  hash_seq_init(&tstat, dbentry->tables);
4860  while ((tabentry = (PgStat_StatTabEntry *) hash_seq_search(&tstat)) != NULL)
4861  {
4862  fputc('T', fpout);
4863  rc = fwrite(tabentry, sizeof(PgStat_StatTabEntry), 1, fpout);
4864  (void) rc; /* we'll check for error with ferror */
4865  }
4866 
4867  /*
4868  * Walk through the database's function stats table.
4869  */
4870  hash_seq_init(&fstat, dbentry->functions);
4871  while ((funcentry = (PgStat_StatFuncEntry *) hash_seq_search(&fstat)) != NULL)
4872  {
4873  fputc('F', fpout);
4874  rc = fwrite(funcentry, sizeof(PgStat_StatFuncEntry), 1, fpout);
4875  (void) rc; /* we'll check for error with ferror */
4876  }
4877 
4878  /*
4879  * No more output to be done. Close the temp file and replace the old
4880  * pgstat.stat with it. The ferror() check replaces testing for error
4881  * after each individual fputc or fwrite above.
4882  */
4883  fputc('E', fpout);
4884 
4885  if (ferror(fpout))
4886  {
4887  ereport(LOG,
4889  errmsg("could not write temporary statistics file \"%s\": %m",
4890  tmpfile)));
4891  FreeFile(fpout);
4892  unlink(tmpfile);
4893  }
4894  else if (FreeFile(fpout) < 0)
4895  {
4896  ereport(LOG,
4898  errmsg("could not close temporary statistics file \"%s\": %m",
4899  tmpfile)));
4900  unlink(tmpfile);
4901  }
4902  else if (rename(tmpfile, statfile) < 0)
4903  {
4904  ereport(LOG,
4906  errmsg("could not rename temporary statistics file \"%s\" to \"%s\": %m",
4907  tmpfile, statfile)));
4908  unlink(tmpfile);
4909  }
4910 
4911  if (permanent)
4912  {
4913  get_dbstat_filename(false, false, dbid, statfile, MAXPGPATH);
4914 
4915  elog(DEBUG2, "removing temporary stats file \"%s\"", statfile);
4916  unlink(statfile);
4917  }
4918 }
4919 
4920 /* ----------
4921  * pgstat_read_statsfiles() -
4922  *
4923  * Reads in some existing statistics collector files and returns the
4924  * databases hash table that is the top level of the data.
4925  *
4926  * If 'onlydb' is not InvalidOid, it means we only want data for that DB
4927  * plus the shared catalogs ("DB 0"). We'll still populate the DB hash
4928  * table for all databases, but we don't bother even creating table/function
4929  * hash tables for other databases.
4930  *
4931  * 'permanent' specifies reading from the permanent files not temporary ones.
4932  * When true (happens only when the collector is starting up), remove the
4933  * files after reading; the in-memory status is now authoritative, and the
4934  * files would be out of date in case somebody else reads them.
4935  *
4936  * If a 'deep' read is requested, table/function stats are read, otherwise
4937  * the table/function hash tables remain empty.
4938  * ----------
4939  */
4940 static HTAB *
4941 pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep)
4942 {
4943  PgStat_StatDBEntry *dbentry;
4944  PgStat_StatDBEntry dbbuf;
4945  HASHCTL hash_ctl;
4946  HTAB *dbhash;
4947  FILE *fpin;
4948  int32 format_id;
4949  bool found;
4950  const char *statfile = permanent ? PGSTAT_STAT_PERMANENT_FILENAME : pgstat_stat_filename;
4951 
4952  /*
4953  * The tables will live in pgStatLocalContext.
4954  */
4956 
4957  /*
4958  * Create the DB hashtable
4959  */
4960  memset(&hash_ctl, 0, sizeof(hash_ctl));
4961  hash_ctl.keysize = sizeof(Oid);
4962  hash_ctl.entrysize = sizeof(PgStat_StatDBEntry);
4963  hash_ctl.hcxt = pgStatLocalContext;
4964  dbhash = hash_create("Databases hash", PGSTAT_DB_HASH_SIZE, &hash_ctl,
4966 
4967  /*
4968  * Clear out global and archiver statistics so they start from zero in
4969  * case we can't load an existing statsfile.
4970  */
4971  memset(&globalStats, 0, sizeof(globalStats));
4972  memset(&archiverStats, 0, sizeof(archiverStats));
4973 
4974  /*
4975  * Set the current timestamp (will be kept only in case we can't load an
4976  * existing statsfile).
4977  */
4978  globalStats.stat_reset_timestamp = GetCurrentTimestamp();
4979  archiverStats.stat_reset_timestamp = globalStats.stat_reset_timestamp;
4980 
4981  /*
4982  * Try to open the stats file. If it doesn't exist, the backends simply
4983  * return zero for anything and the collector simply starts from scratch
4984  * with empty counters.
4985  *
4986  * ENOENT is a possibility if the stats collector is not running or has
4987  * not yet written the stats file the first time. Any other failure
4988  * condition is suspicious.
4989  */
4990  if ((fpin = AllocateFile(statfile, PG_BINARY_R)) == NULL)
4991  {
4992  if (errno != ENOENT)
4995  errmsg("could not open statistics file \"%s\": %m",
4996  statfile)));
4997  return dbhash;
4998  }
4999 
5000  /*
5001  * Verify it's of the expected format.
5002  */
5003  if (fread(&format_id, 1, sizeof(format_id), fpin) != sizeof(format_id) ||
5004  format_id != PGSTAT_FILE_FORMAT_ID)
5005  {
5007  (errmsg("corrupted statistics file \"%s\"", statfile)));
5008  goto done;
5009  }
5010 
5011  /*
5012  * Read global stats struct
5013  */
5014  if (fread(&globalStats, 1, sizeof(globalStats), fpin) != sizeof(globalStats))
5015  {
5017  (errmsg("corrupted statistics file \"%s\"", statfile)));
5018  memset(&globalStats, 0, sizeof(globalStats));
5019  goto done;
5020  }
5021 
5022  /*
5023  * In the collector, disregard the timestamp we read from the permanent
5024  * stats file; we should be willing to write a temp stats file immediately
5025  * upon the first request from any backend. This only matters if the old
5026  * file's timestamp is less than PGSTAT_STAT_INTERVAL ago, but that's not
5027  * an unusual scenario.
5028  */
5030  globalStats.stats_timestamp = 0;
5031 
5032  /*
5033  * Read archiver stats struct
5034  */
5035  if (fread(&archiverStats, 1, sizeof(archiverStats), fpin) != sizeof(archiverStats))
5036  {
5038  (errmsg("corrupted statistics file \"%s\"", statfile)));
5039  memset(&archiverStats, 0, sizeof(archiverStats));
5040  goto done;
5041  }
5042 
5043  /*
5044  * We found an existing collector stats file. Read it and put all the
5045  * hashtable entries into place.
5046  */
5047  for (;;)
5048  {
5049  switch (fgetc(fpin))
5050  {
5051  /*
5052  * 'D' A PgStat_StatDBEntry struct describing a database
5053  * follows.
5054  */
5055  case 'D':
5056  if (fread(&dbbuf, 1, offsetof(PgStat_StatDBEntry, tables),
5057  fpin) != offsetof(PgStat_StatDBEntry, tables))
5058  {
5060  (errmsg("corrupted statistics file \"%s\"",
5061  statfile)));
5062  goto done;
5063  }
5064 
5065  /*
5066  * Add to the DB hash
5067  */
5068  dbentry = (PgStat_StatDBEntry *) hash_search(dbhash,
5069  (void *) &dbbuf.databaseid,
5070  HASH_ENTER,
5071  &found);
5072  if (found)
5073  {
5075  (errmsg("corrupted statistics file \"%s\"",
5076  statfile)));
5077  goto done;
5078  }
5079 
5080  memcpy(dbentry, &dbbuf, sizeof(PgStat_StatDBEntry));
5081  dbentry->tables = NULL;
5082  dbentry->functions = NULL;
5083 
5084  /*
5085  * In the collector, disregard the timestamp we read from the
5086  * permanent stats file; we should be willing to write a temp
5087  * stats file immediately upon the first request from any
5088  * backend.
5089  */
5091  dbentry->stats_timestamp = 0;
5092 
5093  /*
5094  * Don't create tables/functions hashtables for uninteresting
5095  * databases.
5096  */
5097  if (onlydb != InvalidOid)
5098  {
5099  if (dbbuf.databaseid != onlydb &&
5100  dbbuf.databaseid != InvalidOid)
5101  break;
5102  }
5103 
5104  memset(&hash_ctl, 0, sizeof(hash_ctl));
5105  hash_ctl.keysize = sizeof(Oid);
5106  hash_ctl.entrysize = sizeof(PgStat_StatTabEntry);
5107  hash_ctl.hcxt = pgStatLocalContext;
5108  dbentry->tables = hash_create("Per-database table",
5110  &hash_ctl,
5112 
5113  hash_ctl.keysize = sizeof(Oid);
5114  hash_ctl.entrysize = sizeof(PgStat_StatFuncEntry);
5115  hash_ctl.hcxt = pgStatLocalContext;
5116  dbentry->functions = hash_create("Per-database function",
5118  &hash_ctl,
5120 
5121  /*
5122  * If requested, read the data from the database-specific
5123  * file. Otherwise we just leave the hashtables empty.
5124  */
5125  if (deep)
5127  dbentry->tables,
5128  dbentry->functions,
5129  permanent);
5130 
5131  break;
5132 
5133  case 'E':
5134  goto done;
5135 
5136  default:
5138  (errmsg("corrupted statistics file \"%s\"",
5139  statfile)));
5140  goto done;
5141  }
5142  }
5143 
5144 done:
5145  FreeFile(fpin);
5146 
5147  /* If requested to read the permanent file, also get rid of it. */
5148  if (permanent)
5149  {
5150  elog(DEBUG2, "removing permanent stats file \"%s\"", statfile);
5151  unlink(statfile);
5152  }
5153 
5154  return dbhash;
5155 }
5156 
5157 
5158 /* ----------
5159  * pgstat_read_db_statsfile() -
5160  *
5161  * Reads in the existing statistics collector file for the given database,
5162  * filling the passed-in tables and functions hash tables.
5163  *
5164  * As in pgstat_read_statsfiles, if the permanent file is requested, it is
5165  * removed after reading.
5166  *
5167  * Note: this code has the ability to skip storing per-table or per-function
5168  * data, if NULL is passed for the corresponding hashtable. That's not used
5169  * at the moment though.
5170  * ----------
5171  */
5172 static void
5173 pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash,
5174  bool permanent)
5175 {
5176  PgStat_StatTabEntry *tabentry;
5177  PgStat_StatTabEntry tabbuf;
5178  PgStat_StatFuncEntry funcbuf;
5179  PgStat_StatFuncEntry *funcentry;
5180  FILE *fpin;
5181  int32 format_id;
5182  bool found;
5183  char statfile[MAXPGPATH];
5184 
5185  get_dbstat_filename(permanent, false, databaseid, statfile, MAXPGPATH);
5186 
5187  /*
5188  * Try to open the stats file. If it doesn't exist, the backends simply
5189  * return zero for anything and the collector simply starts from scratch
5190  * with empty counters.
5191  *
5192  * ENOENT is a possibility if the stats collector is not running or has
5193  * not yet written the stats file the first time. Any other failure
5194  * condition is suspicious.
5195  */
5196  if ((fpin = AllocateFile(statfile, PG_BINARY_R)) == NULL)
5197  {
5198  if (errno != ENOENT)
5201  errmsg("could not open statistics file \"%s\": %m",
5202  statfile)));
5203  return;
5204  }
5205 
5206  /*
5207  * Verify it's of the expected format.
5208  */
5209  if (fread(&format_id, 1, sizeof(format_id), fpin) != sizeof(format_id) ||
5210  format_id != PGSTAT_FILE_FORMAT_ID)
5211  {
5213  (errmsg("corrupted statistics file \"%s\"", statfile)));
5214  goto done;
5215  }
5216 
5217  /*
5218  * We found an existing collector stats file. Read it and put all the
5219  * hashtable entries into place.
5220  */
5221  for (;;)
5222  {
5223  switch (fgetc(fpin))
5224  {
5225  /*
5226  * 'T' A PgStat_StatTabEntry follows.
5227  */
5228  case 'T':
5229  if (fread(&tabbuf, 1, sizeof(PgStat_StatTabEntry),
5230  fpin) != sizeof(PgStat_StatTabEntry))
5231  {
5233  (errmsg("corrupted statistics file \"%s\"",
5234  statfile)));
5235  goto done;
5236  }
5237 
5238  /*
5239  * Skip if table data not wanted.
5240  */
5241  if (tabhash == NULL)
5242  break;
5243 
5244  tabentry = (PgStat_StatTabEntry *) hash_search(tabhash,
5245  (void *) &tabbuf.tableid,
5246  HASH_ENTER, &found);
5247 
5248  if (found)
5249  {
5251  (errmsg("corrupted statistics file \"%s\"",
5252  statfile)));
5253  goto done;
5254  }
5255 
5256  memcpy(tabentry, &tabbuf, sizeof(tabbuf));
5257  break;
5258 
5259  /*
5260  * 'F' A PgStat_StatFuncEntry follows.
5261  */
5262  case 'F':
5263  if (fread(&funcbuf, 1, sizeof(PgStat_StatFuncEntry),
5264  fpin) != sizeof(PgStat_StatFuncEntry))
5265  {
5267  (errmsg("corrupted statistics file \"%s\"",
5268  statfile)));
5269  goto done;
5270  }
5271 
5272  /*
5273  * Skip if function data not wanted.
5274  */
5275  if (funchash == NULL)
5276  break;
5277 
5278  funcentry = (PgStat_StatFuncEntry *) hash_search(funchash,
5279  (void *) &funcbuf.functionid,
5280  HASH_ENTER, &found);
5281 
5282  if (found)
5283  {
5285  (errmsg("corrupted statistics file \"%s\"",
5286  statfile)));
5287  goto done;
5288  }
5289 
5290  memcpy(funcentry, &funcbuf, sizeof(funcbuf));
5291  break;
5292 
5293  /*
5294  * 'E' The EOF marker of a complete stats file.
5295  */
5296  case 'E':
5297  goto done;
5298 
5299  default:
5301  (errmsg("corrupted statistics file \"%s\"",
5302  statfile)));
5303  goto done;
5304  }
5305  }
5306 
5307 done:
5308  FreeFile(fpin);
5309 
5310  if (permanent)
5311  {
5312  elog(DEBUG2, "removing permanent stats file \"%s\"", statfile);
5313  unlink(statfile);
5314  }
5315 }
5316 
5317 /* ----------
5318  * pgstat_read_db_statsfile_timestamp() -
5319  *
5320  * Attempt to determine the timestamp of the last db statfile write.
5321  * Returns true if successful; the timestamp is stored in *ts.
5322  *
5323  * This needs to be careful about handling databases for which no stats file
5324  * exists, such as databases without a stat entry or those not yet written:
5325  *
5326  * - if there's a database entry in the global file, return the corresponding
5327  * stats_timestamp value.
5328  *
5329  * - if there's no db stat entry (e.g. for a new or inactive database),
5330  * there's no stats_timestamp value, but also nothing to write so we return
5331  * the timestamp of the global statfile.
5332  * ----------
5333  */
5334 static bool
5335 pgstat_read_db_statsfile_timestamp(Oid databaseid, bool permanent,
5336  TimestampTz *ts)
5337 {
5338  PgStat_StatDBEntry dbentry;
5339  PgStat_GlobalStats myGlobalStats;
5340  PgStat_ArchiverStats myArchiverStats;
5341  FILE *fpin;
5342  int32 format_id;
5343  const char *statfile = permanent ? PGSTAT_STAT_PERMANENT_FILENAME : pgstat_stat_filename;
5344 
5345  /*
5346  * Try to open the stats file. As above, anything but ENOENT is worthy of
5347  * complaining about.
5348  */
5349  if ((fpin = AllocateFile(statfile, PG_BINARY_R)) == NULL)
5350  {
5351  if (errno != ENOENT)
5354  errmsg("could not open statistics file \"%s\": %m",
5355  statfile)));
5356  return false;
5357  }
5358 
5359  /*
5360  * Verify it's of the expected format.
5361  */
5362  if (fread(&format_id, 1, sizeof(format_id), fpin) != sizeof(format_id) ||
5363  format_id != PGSTAT_FILE_FORMAT_ID)
5364  {
5366  (errmsg("corrupted statistics file \"%s\"", statfile)));
5367  FreeFile(fpin);
5368  return false;
5369  }
5370 
5371  /*
5372  * Read global stats struct
5373  */
5374  if (fread(&myGlobalStats, 1, sizeof(myGlobalStats),
5375  fpin) != sizeof(myGlobalStats))
5376  {
5378  (errmsg("corrupted statistics file \"%s\"", statfile)));
5379  FreeFile(fpin);
5380  return false;
5381  }
5382 
5383  /*
5384  * Read archiver stats struct
5385  */
5386  if (fread(&myArchiverStats, 1, sizeof(myArchiverStats),
5387  fpin) != sizeof(myArchiverStats))
5388  {
5390  (errmsg("corrupted statistics file \"%s\"", statfile)));
5391  FreeFile(fpin);
5392  return false;
5393  }
5394 
5395  /* By default, we're going to return the timestamp of the global file. */
5396  *ts = myGlobalStats.stats_timestamp;
5397 
5398  /*
5399  * We found an existing collector stats file. Read it and look for a
5400  * record for the requested database. If found, use its timestamp.
5401  */
5402  for (;;)
5403  {
5404  switch (fgetc(fpin))
5405  {
5406  /*
5407  * 'D' A PgStat_StatDBEntry struct describing a database
5408  * follows.
5409  */
5410  case 'D':
5411  if (fread(&dbentry, 1, offsetof(PgStat_StatDBEntry, tables),
5412  fpin) != offsetof(PgStat_StatDBEntry, tables))
5413  {
5415  (errmsg("corrupted statistics file \"%s\"",
5416  statfile)));
5417  goto done;
5418  }
5419 
5420  /*
5421  * If this is the DB we're looking for, save its timestamp and
5422  * we're done.
5423  */
5424  if (dbentry.databaseid == databaseid)
5425  {
5426  *ts = dbentry.stats_timestamp;
5427  goto done;
5428  }
5429 
5430  break;
5431 
5432  case 'E':
5433  goto done;
5434 
5435  default:
5437  (errmsg("corrupted statistics file \"%s\"",
5438  statfile)));
5439  goto done;
5440  }
5441  }
5442 
5443 done:
5444  FreeFile(fpin);
5445  return true;
5446 }
5447 
5448 /*
5449  * If not already done, read the statistics collector stats file into
5450  * some hash tables. The results will be kept until pgstat_clear_snapshot()
5451  * is called (typically, at end of transaction).
5452  */
5453 static void
5455 {
5456  TimestampTz min_ts = 0;
5457  TimestampTz ref_ts = 0;
5458  Oid inquiry_db;
5459  int count;
5460 
5461  /* already read it? */
5462  if (pgStatDBHash)
5463  return;
5465 
5466  /*
5467  * In a normal backend, we check staleness of the data for our own DB, and
5468  * so we send MyDatabaseId in inquiry messages. In the autovac launcher,
5469  * check staleness of the shared-catalog data, and send InvalidOid in
5470  * inquiry messages so as not to force writing unnecessary data.
5471  */
5473  inquiry_db = InvalidOid;
5474  else
5475  inquiry_db = MyDatabaseId;
5476 
5477  /*
5478  * Loop until fresh enough stats file is available or we ran out of time.
5479  * The stats inquiry message is sent repeatedly in case collector drops
5480  * it; but not every single time, as that just swamps the collector.
5481  */
5482  for (count = 0; count < PGSTAT_POLL_LOOP_COUNT; count++)
5483  {
5484  bool ok;
5485  TimestampTz file_ts = 0;
5486  TimestampTz cur_ts;
5487 
5489 
5490  ok = pgstat_read_db_statsfile_timestamp(inquiry_db, false, &file_ts);
5491 
5492  cur_ts = GetCurrentTimestamp();
5493  /* Calculate min acceptable timestamp, if we didn't already */
5494  if (count == 0 || cur_ts < ref_ts)
5495  {
5496  /*
5497  * We set the minimum acceptable timestamp to PGSTAT_STAT_INTERVAL
5498  * msec before now. This indirectly ensures that the collector
5499  * needn't write the file more often than PGSTAT_STAT_INTERVAL. In
5500  * an autovacuum worker, however, we want a lower delay to avoid
5501  * using stale data, so we use PGSTAT_RETRY_DELAY (since the
5502  * number of workers is low, this shouldn't be a problem).
5503  *
5504  * We don't recompute min_ts after sleeping, except in the
5505  * unlikely case that cur_ts went backwards. So we might end up
5506  * accepting a file a bit older than PGSTAT_STAT_INTERVAL. In
5507  * practice that shouldn't happen, though, as long as the sleep
5508  * time is less than PGSTAT_STAT_INTERVAL; and we don't want to
5509  * tell the collector that our cutoff time is less than what we'd
5510  * actually accept.
5511  */
5512  ref_ts = cur_ts;
5514  min_ts = TimestampTzPlusMilliseconds(ref_ts,
5516  else
5517  min_ts = TimestampTzPlusMilliseconds(ref_ts,
5519  }
5520 
5521  /*
5522  * If the file timestamp is actually newer than cur_ts, we must have
5523  * had a clock glitch (system time went backwards) or there is clock
5524  * skew between our processor and the stats collector's processor.
5525  * Accept the file, but send an inquiry message anyway to make
5526  * pgstat_recv_inquiry do a sanity check on the collector's time.
5527  */
5528  if (ok && file_ts > cur_ts)
5529  {
5530  /*
5531  * A small amount of clock skew between processors isn't terribly
5532  * surprising, but a large difference is worth logging. We
5533  * arbitrarily define "large" as 1000 msec.
5534  */
5535  if (file_ts >= TimestampTzPlusMilliseconds(cur_ts, 1000))
5536  {
5537  char *filetime;
5538  char *mytime;
5539 
5540  /* Copy because timestamptz_to_str returns a static buffer */
5541  filetime = pstrdup(timestamptz_to_str(file_ts));
5542  mytime = pstrdup(timestamptz_to_str(cur_ts));
5543  elog(LOG, "stats collector's time %s is later than backend local time %s",
5544  filetime, mytime);
5545  pfree(filetime);
5546  pfree(mytime);
5547  }
5548 
5549  pgstat_send_inquiry(cur_ts, min_ts, inquiry_db);
5550  break;
5551  }
5552 
5553  /* Normal acceptance case: file is not older than cutoff time */
5554  if (ok && file_ts >= min_ts)
5555  break;
5556 
5557  /* Not there or too old, so kick the collector and wait a bit */
5558  if ((count % PGSTAT_INQ_LOOP_COUNT) == 0)
5559  pgstat_send_inquiry(cur_ts, min_ts, inquiry_db);
5560 
5561  pg_usleep(PGSTAT_RETRY_DELAY * 1000L);
5562  }
5563 
5564  if (count >= PGSTAT_POLL_LOOP_COUNT)
5565  ereport(LOG,
5566  (errmsg("using stale statistics instead of current ones "
5567  "because stats collector is not responding")));
5568 
5569  /*
5570  * Autovacuum launcher wants stats about all databases, but a shallow read
5571  * is sufficient. Regular backends want a deep read for just the tables
5572  * they can see (MyDatabaseId + shared catalogs).
5573  */
5575  pgStatDBHash = pgstat_read_statsfiles(InvalidOid, false, false);
5576  else
5577  pgStatDBHash = pgstat_read_statsfiles(MyDatabaseId, false, true);
5578 }
5579 
5580 
5581 /* ----------
5582  * pgstat_setup_memcxt() -
5583  *
5584  * Create pgStatLocalContext, if not already done.
5585  * ----------
5586  */
5587 static void
5589 {
5590  if (!pgStatLocalContext)
5591  pgStatLocalContext = AllocSetContextCreate(TopMemoryContext,
5592  "Statistics snapshot",
5594 }
5595 
5596 
5597 /* ----------
5598  * pgstat_clear_snapshot() -
5599  *
5600  * Discard any data collected in the current transaction. Any subsequent
5601  * request will cause new snapshots to be read.
5602  *
5603  * This is also invoked during transaction commit or abort to discard
5604  * the no-longer-wanted snapshot.
5605  * ----------
5606  */
5607 void
5609 {
5610  /* Release memory, if any was allocated */
5611  if (pgStatLocalContext)
5612  MemoryContextDelete(pgStatLocalContext);
5613 
5614  /* Reset variables */
5615  pgStatLocalContext = NULL;
5616  pgStatDBHash = NULL;
5617  localBackendStatusTable = NULL;
5618  localNumBackends = 0;
5619 }
5620 
5621 
5622 /* ----------
5623  * pgstat_recv_inquiry() -
5624  *
5625  * Process stat inquiry requests.
5626  * ----------
5627  */
5628 static void
5630 {
5631  PgStat_StatDBEntry *dbentry;
5632 
5633  elog(DEBUG2, "received inquiry for database %u", msg->databaseid);
5634 
5635  /*
5636  * If there's already a write request for this DB, there's nothing to do.
5637  *
5638  * Note that if a request is found, we return early and skip the below
5639  * check for clock skew. This is okay, since the only way for a DB
5640  * request to be present in the list is that we have been here since the
5641  * last write round. It seems sufficient to check for clock skew once per
5642  * write round.
5643  */
5644  if (list_member_oid(pending_write_requests, msg->databaseid))
5645  return;
5646 
5647  /*
5648  * Check to see if we last wrote this database at a time >= the requested
5649  * cutoff time. If so, this is a stale request that was generated before
5650  * we updated the DB file, and we don't need to do so again.
5651  *
5652  * If the requestor's local clock time is older than stats_timestamp, we
5653  * should suspect a clock glitch, ie system time going backwards; though
5654  * the more likely explanation is just delayed message receipt. It is
5655  * worth expending a GetCurrentTimestamp call to be sure, since a large
5656  * retreat in the system clock reading could otherwise cause us to neglect
5657  * to update the stats file for a long time.
5658  */
5659  dbentry = pgstat_get_db_entry(msg->databaseid, false);
5660  if (dbentry == NULL)
5661  {
5662  /*
5663  * We have no data for this DB. Enter a write request anyway so that
5664  * the global stats will get updated. This is needed to prevent
5665  * backend_read_statsfile from waiting for data that we cannot supply,
5666  * in the case of a new DB that nobody has yet reported any stats for.
5667  * See the behavior of pgstat_read_db_statsfile_timestamp.
5668  */
5669  }
5670  else if (msg->clock_time < dbentry->stats_timestamp)
5671  {
5672  TimestampTz cur_ts = GetCurrentTimestamp();
5673 
5674  if (cur_ts < dbentry->stats_timestamp)
5675  {
5676  /*
5677  * Sure enough, time went backwards. Force a new stats file write
5678  * to get back in sync; but first, log a complaint.
5679  */
5680  char *writetime;
5681  char *mytime;
5682 
5683  /* Copy because timestamptz_to_str returns a static buffer */
5684  writetime = pstrdup(timestamptz_to_str(dbentry->stats_timestamp));
5685  mytime = pstrdup(timestamptz_to_str(cur_ts));
5686  elog(LOG,
5687  "stats_timestamp %s is later than collector's time %s for database %u",
5688  writetime, mytime, dbentry->databaseid);
5689  pfree(writetime);
5690  pfree(mytime);
5691  }
5692  else
5693  {
5694  /*
5695  * Nope, it's just an old request. Assuming msg's clock_time is
5696  * >= its cutoff_time, it must be stale, so we can ignore it.
5697  */
5698  return;
5699  }
5700  }
5701  else if (msg->cutoff_time <= dbentry->stats_timestamp)
5702  {
5703  /* Stale request, ignore it */
5704  return;
5705  }
5706 
5707  /*
5708  * We need to write this DB, so create a request.
5709  */
5710  pending_write_requests = lappend_oid(pending_write_requests,
5711  msg->databaseid);
5712 }
5713 
5714 
5715 /* ----------
5716  * pgstat_recv_tabstat() -
5717  *
5718  * Count what the backend has done.
5719  * ----------
5720  */
5721 static void
5723 {
5724  PgStat_StatDBEntry *dbentry;
5725  PgStat_StatTabEntry *tabentry;
5726  int i;
5727  bool found;
5728 
5729  dbentry = pgstat_get_db_entry(msg->m_databaseid, true);
5730 
5731  /*
5732  * Update database-wide stats.
5733  */
5734  dbentry->n_xact_commit += (PgStat_Counter) (msg->m_xact_commit);
5735  dbentry->n_xact_rollback += (PgStat_Counter) (msg->m_xact_rollback);
5736  dbentry->n_block_read_time += msg->m_block_read_time;
5737  dbentry->n_block_write_time += msg->m_block_write_time;
5738 
5739  /*
5740  * Process all table entries in the message.
5741  */
5742  for (i = 0; i < msg->m_nentries; i++)
5743  {
5744  PgStat_TableEntry *tabmsg = &(msg->m_entry[i]);
5745 
5746  tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
5747  (void *) &(tabmsg->t_id),
5748  HASH_ENTER, &found);
5749 
5750  if (!found)
5751  {
5752  /*
5753  * If it's a new table entry, initialize counters to the values we
5754  * just got.
5755  */
5756  tabentry->numscans = tabmsg->t_counts.t_numscans;
5757  tabentry->tuples_returned = tabmsg->t_counts.t_tuples_returned;
5758  tabentry->tuples_fetched = tabmsg->t_counts.t_tuples_fetched;
5759  tabentry->tuples_inserted = tabmsg->t_counts.t_tuples_inserted;
5760  tabentry->tuples_updated = tabmsg->t_counts.t_tuples_updated;
5761  tabentry->tuples_deleted = tabmsg->t_counts.t_tuples_deleted;
5762  tabentry->tuples_hot_updated = tabmsg->t_counts.t_tuples_hot_updated;
5763  tabentry->n_live_tuples = tabmsg->t_counts.t_delta_live_tuples;
5764  tabentry->n_dead_tuples = tabmsg->t_counts.t_delta_dead_tuples;
5765  tabentry->changes_since_analyze = tabmsg->t_counts.t_changed_tuples;
5766  tabentry->blocks_fetched = tabmsg->t_counts.t_blocks_fetched;
5767  tabentry->blocks_hit = tabmsg->t_counts.t_blocks_hit;
5768 
5769  tabentry->vacuum_timestamp = 0;
5770  tabentry->vacuum_count = 0;
5771  tabentry->autovac_vacuum_timestamp = 0;
5772  tabentry->autovac_vacuum_count = 0;
5773  tabentry->analyze_timestamp = 0;
5774  tabentry->analyze_count = 0;
5775  tabentry->autovac_analyze_timestamp = 0;
5776  tabentry->autovac_analyze_count = 0;
5777  }
5778  else
5779  {
5780  /*
5781  * Otherwise add the values to the existing entry.
5782  */
5783  tabentry->numscans += tabmsg->t_counts.t_numscans;
5784  tabentry->tuples_returned += tabmsg->t_counts.t_tuples_returned;
5785  tabentry->tuples_fetched += tabmsg->t_counts.t_tuples_fetched;
5786  tabentry->tuples_inserted += tabmsg->t_counts.t_tuples_inserted;
5787  tabentry->tuples_updated += tabmsg->t_counts.t_tuples_updated;
5788  tabentry->tuples_deleted += tabmsg->t_counts.t_tuples_deleted;
5789  tabentry->tuples_hot_updated += tabmsg->t_counts.t_tuples_hot_updated;
5790  /* If table was truncated, first reset the live/dead counters */
5791  if (tabmsg->t_counts.t_truncated)
5792  {
5793  tabentry->n_live_tuples = 0;
5794  tabentry->n_dead_tuples = 0;
5795  }
5796  tabentry->n_live_tuples += tabmsg->t_counts.t_delta_live_tuples;
5797  tabentry->n_dead_tuples += tabmsg->t_counts.t_delta_dead_tuples;
5798  tabentry->changes_since_analyze += tabmsg->t_counts.t_changed_tuples;
5799  tabentry->blocks_fetched += tabmsg->t_counts.t_blocks_fetched;
5800  tabentry->blocks_hit += tabmsg->t_counts.t_blocks_hit;
5801  }
5802 
5803  /* Clamp n_live_tuples in case of negative delta_live_tuples */
5804  tabentry->n_live_tuples = Max(tabentry->n_live_tuples, 0);
5805  /* Likewise for n_dead_tuples */
5806  tabentry->n_dead_tuples = Max(tabentry->n_dead_tuples, 0);
5807 
5808  /*
5809  * Add per-table stats to the per-database entry, too.
5810  */
5811  dbentry->n_tuples_returned += tabmsg->t_counts.t_tuples_returned;
5812  dbentry->n_tuples_fetched += tabmsg->t_counts.t_tuples_fetched;
5813  dbentry->n_tuples_inserted += tabmsg->t_counts.t_tuples_inserted;
5814  dbentry->n_tuples_updated += tabmsg->t_counts.t_tuples_updated;
5815  dbentry->n_tuples_deleted += tabmsg->t_counts.t_tuples_deleted;
5816  dbentry->n_blocks_fetched += tabmsg->t_counts.t_blocks_fetched;
5817  dbentry->n_blocks_hit += tabmsg->t_counts.t_blocks_hit;
5818  }
5819 }
5820 
5821 
5822 /* ----------
5823  * pgstat_recv_tabpurge() -
5824  *
5825  * Arrange for dead table removal.
5826  * ----------
5827  */
5828 static void
5830 {
5831  PgStat_StatDBEntry *dbentry;
5832  int i;
5833 
5834  dbentry = pgstat_get_db_entry(msg->m_databaseid, false);
5835 
5836  /*
5837  * No need to purge if we don't even know the database.
5838  */
5839  if (!dbentry || !dbentry->tables)
5840  return;
5841 
5842  /*
5843  * Process all table entries in the message.
5844  */
5845  for (i = 0; i < msg->m_nentries; i++)
5846  {
5847  /* Remove from hashtable if present; we don't care if it's not. */
5848  (void) hash_search(dbentry->tables,
5849  (void *) &(msg->m_tableid[i]),
5850  HASH_REMOVE, NULL);
5851  }
5852 }
5853 
5854 
5855 /* ----------
5856  * pgstat_recv_dropdb() -
5857  *
5858  * Arrange for dead database removal
5859  * ----------
5860  */
5861 static void
5863 {
5864  Oid dbid = msg->m_databaseid;
5865  PgStat_StatDBEntry *dbentry;
5866 
5867  /*
5868  * Lookup the database in the hashtable.
5869  */
5870  dbentry = pgstat_get_db_entry(dbid, false);
5871 
5872  /*
5873  * If found, remove it (along with the db statfile).
5874  */
5875  if (dbentry)
5876  {
5877  char statfile[MAXPGPATH];
5878 
5879  get_dbstat_filename(false, false, dbid, statfile, MAXPGPATH);
5880 
5881  elog(DEBUG2, "removing stats file \"%s\"", statfile);
5882  unlink(statfile);
5883 
5884  if (dbentry->tables != NULL)
5885