PostgreSQL Source Code  git master
portalmem.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * portalmem.c
4  * backend portal memory management
5  *
6  * Portals are objects representing the execution state of a query.
7  * This module provides memory management services for portals, but it
8  * doesn't actually run the executor for them.
9  *
10  *
11  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
12  * Portions Copyright (c) 1994, Regents of the University of California
13  *
14  * IDENTIFICATION
15  * src/backend/utils/mmgr/portalmem.c
16  *
17  *-------------------------------------------------------------------------
18  */
19 #include "postgres.h"
20 
21 #include "access/xact.h"
22 #include "catalog/pg_type.h"
23 #include "commands/portalcmds.h"
24 #include "funcapi.h"
25 #include "miscadmin.h"
26 #include "storage/ipc.h"
27 #include "utils/builtins.h"
28 #include "utils/memutils.h"
29 #include "utils/snapmgr.h"
30 #include "utils/timestamp.h"
31 
32 /*
33  * Estimate of the maximum number of open portals a user would have,
34  * used in initially sizing the PortalHashTable in EnablePortalManager().
35  * Since the hash table can expand, there's no need to make this overly
36  * generous, and keeping it small avoids unnecessary overhead in the
37  * hash_seq_search() calls executed during transaction end.
38  */
39 #define PORTALS_PER_USER 16
40 
41 
42 /* ----------------
43  * Global state
44  * ----------------
45  */
46 
47 #define MAX_PORTALNAME_LEN NAMEDATALEN
48 
49 typedef struct portalhashent
50 {
54 
55 static HTAB *PortalHashTable = NULL;
56 
57 #define PortalHashTableLookup(NAME, PORTAL) \
58 do { \
59  PortalHashEnt *hentry; \
60  \
61  hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
62  (NAME), HASH_FIND, NULL); \
63  if (hentry) \
64  PORTAL = hentry->portal; \
65  else \
66  PORTAL = NULL; \
67 } while(0)
68 
69 #define PortalHashTableInsert(PORTAL, NAME) \
70 do { \
71  PortalHashEnt *hentry; bool found; \
72  \
73  hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
74  (NAME), HASH_ENTER, &found); \
75  if (found) \
76  elog(ERROR, "duplicate portal name"); \
77  hentry->portal = PORTAL; \
78  /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
79  PORTAL->name = hentry->portalname; \
80 } while(0)
81 
82 #define PortalHashTableDelete(PORTAL) \
83 do { \
84  PortalHashEnt *hentry; \
85  \
86  hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
87  PORTAL->name, HASH_REMOVE, NULL); \
88  if (hentry == NULL) \
89  elog(WARNING, "trying to delete portal name that does not exist"); \
90 } while(0)
91 
93 
94 
95 /* ----------------------------------------------------------------
96  * public portal interface functions
97  * ----------------------------------------------------------------
98  */
99 
100 /*
101  * EnablePortalManager
102  * Enables the portal management module at backend startup.
103  */
104 void
106 {
107  HASHCTL ctl;
108 
109  Assert(TopPortalContext == NULL);
110 
112  "TopPortalContext",
114 
116  ctl.entrysize = sizeof(PortalHashEnt);
117 
118  /*
119  * use PORTALS_PER_USER as a guess of how many hash table entries to
120  * create, initially
121  */
123  &ctl, HASH_ELEM | HASH_STRINGS);
124 }
125 
126 /*
127  * GetPortalByName
128  * Returns a portal given a portal name, or NULL if name not found.
129  */
130 Portal
131 GetPortalByName(const char *name)
132 {
133  Portal portal;
134 
135  if (PointerIsValid(name))
136  PortalHashTableLookup(name, portal);
137  else
138  portal = NULL;
139 
140  return portal;
141 }
142 
143 /*
144  * PortalGetPrimaryStmt
145  * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
146  *
147  * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
148  * portal are marked canSetTag, returns the first one. Neither of these
149  * cases should occur in present usages of this function.
150  */
151 PlannedStmt *
153 {
154  ListCell *lc;
155 
156  foreach(lc, portal->stmts)
157  {
159 
160  if (stmt->canSetTag)
161  return stmt;
162  }
163  return NULL;
164 }
165 
166 /*
167  * CreatePortal
168  * Returns a new portal given a name.
169  *
170  * allowDup: if true, automatically drop any pre-existing portal of the
171  * same name (if false, an error is raised).
172  *
173  * dupSilent: if true, don't even emit a WARNING.
174  */
175 Portal
176 CreatePortal(const char *name, bool allowDup, bool dupSilent)
177 {
178  Portal portal;
179 
181 
182  portal = GetPortalByName(name);
183  if (PortalIsValid(portal))
184  {
185  if (!allowDup)
186  ereport(ERROR,
187  (errcode(ERRCODE_DUPLICATE_CURSOR),
188  errmsg("cursor \"%s\" already exists", name)));
189  if (!dupSilent)
191  (errcode(ERRCODE_DUPLICATE_CURSOR),
192  errmsg("closing existing cursor \"%s\"",
193  name)));
194  PortalDrop(portal, false);
195  }
196 
197  /* make new portal structure */
198  portal = (Portal) MemoryContextAllocZero(TopPortalContext, sizeof *portal);
199 
200  /* initialize portal context; typically it won't store much */
202  "PortalContext",
204 
205  /* create a resource owner for the portal */
207  "Portal");
208 
209  /* initialize portal fields that don't start off zero */
210  portal->status = PORTAL_NEW;
211  portal->cleanup = PortalCleanup;
213  portal->activeSubid = portal->createSubid;
215  portal->strategy = PORTAL_MULTI_QUERY;
217  portal->atStart = true;
218  portal->atEnd = true; /* disallow fetches until query is set */
219  portal->visible = true;
221 
222  /* put portal in table (sets portal->name) */
223  PortalHashTableInsert(portal, name);
224 
225  /* for named portals reuse portal->name copy */
226  MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : "<unnamed>");
227 
228  return portal;
229 }
230 
231 /*
232  * CreateNewPortal
233  * Create a new portal, assigning it a random nonconflicting name.
234  */
235 Portal
237 {
238  static unsigned int unnamed_portal_count = 0;
239 
240  char portalname[MAX_PORTALNAME_LEN];
241 
242  /* Select a nonconflicting name */
243  for (;;)
244  {
245  unnamed_portal_count++;
246  sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
247  if (GetPortalByName(portalname) == NULL)
248  break;
249  }
250 
251  return CreatePortal(portalname, false, false);
252 }
253 
254 /*
255  * PortalDefineQuery
256  * A simple subroutine to establish a portal's query.
257  *
258  * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
259  * allowed anymore to pass NULL. (If you really don't have source text,
260  * you can pass a constant string, perhaps "(query not available)".)
261  *
262  * commandTag shall be NULL if and only if the original query string
263  * (before rewriting) was an empty string. Also, the passed commandTag must
264  * be a pointer to a constant string, since it is not copied.
265  *
266  * If cplan is provided, then it is a cached plan containing the stmts, and
267  * the caller must have done GetCachedPlan(), causing a refcount increment.
268  * The refcount will be released when the portal is destroyed.
269  *
270  * If cplan is NULL, then it is the caller's responsibility to ensure that
271  * the passed plan trees have adequate lifetime. Typically this is done by
272  * copying them into the portal's context.
273  *
274  * The caller is also responsible for ensuring that the passed prepStmtName
275  * (if not NULL) and sourceText have adequate lifetime.
276  *
277  * NB: this function mustn't do much beyond storing the passed values; in
278  * particular don't do anything that risks elog(ERROR). If that were to
279  * happen here before storing the cplan reference, we'd leak the plancache
280  * refcount that the caller is trying to hand off to us.
281  */
282 void
284  const char *prepStmtName,
285  const char *sourceText,
286  CommandTag commandTag,
287  List *stmts,
288  CachedPlan *cplan)
289 {
290  Assert(PortalIsValid(portal));
291  Assert(portal->status == PORTAL_NEW);
292 
293  Assert(sourceText != NULL);
294  Assert(commandTag != CMDTAG_UNKNOWN || stmts == NIL);
295 
296  portal->prepStmtName = prepStmtName;
297  portal->sourceText = sourceText;
298  portal->qc.commandTag = commandTag;
299  portal->qc.nprocessed = 0;
300  portal->commandTag = commandTag;
301  portal->stmts = stmts;
302  portal->cplan = cplan;
303  portal->status = PORTAL_DEFINED;
304 }
305 
306 /*
307  * PortalReleaseCachedPlan
308  * Release a portal's reference to its cached plan, if any.
309  */
310 static void
312 {
313  if (portal->cplan)
314  {
315  ReleaseCachedPlan(portal->cplan, NULL);
316  portal->cplan = NULL;
317 
318  /*
319  * We must also clear portal->stmts which is now a dangling reference
320  * to the cached plan's plan list. This protects any code that might
321  * try to examine the Portal later.
322  */
323  portal->stmts = NIL;
324  }
325 }
326 
327 /*
328  * PortalCreateHoldStore
329  * Create the tuplestore for a portal.
330  */
331 void
333 {
334  MemoryContext oldcxt;
335 
336  Assert(portal->holdContext == NULL);
337  Assert(portal->holdStore == NULL);
338  Assert(portal->holdSnapshot == NULL);
339 
340  /*
341  * Create the memory context that is used for storage of the tuple set.
342  * Note this is NOT a child of the portal's portalContext.
343  */
344  portal->holdContext =
346  "PortalHoldContext",
348 
349  /*
350  * Create the tuple store, selecting cross-transaction temp files, and
351  * enabling random access only if cursor requires scrolling.
352  *
353  * XXX: Should maintenance_work_mem be used for the portal size?
354  */
355  oldcxt = MemoryContextSwitchTo(portal->holdContext);
356 
357  portal->holdStore =
359  true, work_mem);
360 
361  MemoryContextSwitchTo(oldcxt);
362 }
363 
364 /*
365  * PinPortal
366  * Protect a portal from dropping.
367  *
368  * A pinned portal is still unpinned and dropped at transaction or
369  * subtransaction abort.
370  */
371 void
373 {
374  if (portal->portalPinned)
375  elog(ERROR, "portal already pinned");
376 
377  portal->portalPinned = true;
378 }
379 
380 void
382 {
383  if (!portal->portalPinned)
384  elog(ERROR, "portal not pinned");
385 
386  portal->portalPinned = false;
387 }
388 
389 /*
390  * MarkPortalActive
391  * Transition a portal from READY to ACTIVE state.
392  *
393  * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
394  */
395 void
397 {
398  /* For safety, this is a runtime test not just an Assert */
399  if (portal->status != PORTAL_READY)
400  ereport(ERROR,
401  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
402  errmsg("portal \"%s\" cannot be run", portal->name)));
403  /* Perform the state transition */
404  portal->status = PORTAL_ACTIVE;
406 }
407 
408 /*
409  * MarkPortalDone
410  * Transition a portal from ACTIVE to DONE state.
411  *
412  * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
413  */
414 void
416 {
417  /* Perform the state transition */
418  Assert(portal->status == PORTAL_ACTIVE);
419  portal->status = PORTAL_DONE;
420 
421  /*
422  * Allow portalcmds.c to clean up the state it knows about. We might as
423  * well do that now, since the portal can't be executed any more.
424  *
425  * In some cases involving execution of a ROLLBACK command in an already
426  * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
427  * with the cleanup hook still unexecuted.
428  */
429  if (PointerIsValid(portal->cleanup))
430  {
431  portal->cleanup(portal);
432  portal->cleanup = NULL;
433  }
434 }
435 
436 /*
437  * MarkPortalFailed
438  * Transition a portal into FAILED state.
439  *
440  * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
441  */
442 void
444 {
445  /* Perform the state transition */
446  Assert(portal->status != PORTAL_DONE);
447  portal->status = PORTAL_FAILED;
448 
449  /*
450  * Allow portalcmds.c to clean up the state it knows about. We might as
451  * well do that now, since the portal can't be executed any more.
452  *
453  * In some cases involving cleanup of an already aborted transaction, this
454  * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
455  * still unexecuted.
456  */
457  if (PointerIsValid(portal->cleanup))
458  {
459  portal->cleanup(portal);
460  portal->cleanup = NULL;
461  }
462 }
463 
464 /*
465  * PortalDrop
466  * Destroy the portal.
467  */
468 void
469 PortalDrop(Portal portal, bool isTopCommit)
470 {
471  Assert(PortalIsValid(portal));
472 
473  /*
474  * Don't allow dropping a pinned portal, it's still needed by whoever
475  * pinned it.
476  */
477  if (portal->portalPinned)
478  ereport(ERROR,
479  (errcode(ERRCODE_INVALID_CURSOR_STATE),
480  errmsg("cannot drop pinned portal \"%s\"", portal->name)));
481 
482  /*
483  * Not sure if the PORTAL_ACTIVE case can validly happen or not...
484  */
485  if (portal->status == PORTAL_ACTIVE)
486  ereport(ERROR,
487  (errcode(ERRCODE_INVALID_CURSOR_STATE),
488  errmsg("cannot drop active portal \"%s\"", portal->name)));
489 
490  /*
491  * Allow portalcmds.c to clean up the state it knows about, in particular
492  * shutting down the executor if still active. This step potentially runs
493  * user-defined code so failure has to be expected. It's the cleanup
494  * hook's responsibility to not try to do that more than once, in the case
495  * that failure occurs and then we come back to drop the portal again
496  * during transaction abort.
497  *
498  * Note: in most paths of control, this will have been done already in
499  * MarkPortalDone or MarkPortalFailed. We're just making sure.
500  */
501  if (PointerIsValid(portal->cleanup))
502  {
503  portal->cleanup(portal);
504  portal->cleanup = NULL;
505  }
506 
507  /* There shouldn't be an active snapshot anymore, except after error */
508  Assert(portal->portalSnapshot == NULL || !isTopCommit);
509 
510  /*
511  * Remove portal from hash table. Because we do this here, we will not
512  * come back to try to remove the portal again if there's any error in the
513  * subsequent steps. Better to leak a little memory than to get into an
514  * infinite error-recovery loop.
515  */
516  PortalHashTableDelete(portal);
517 
518  /* drop cached plan reference, if any */
519  PortalReleaseCachedPlan(portal);
520 
521  /*
522  * If portal has a snapshot protecting its data, release that. This needs
523  * a little care since the registration will be attached to the portal's
524  * resowner; if the portal failed, we will already have released the
525  * resowner (and the snapshot) during transaction abort.
526  */
527  if (portal->holdSnapshot)
528  {
529  if (portal->resowner)
531  portal->resowner);
532  portal->holdSnapshot = NULL;
533  }
534 
535  /*
536  * Release any resources still attached to the portal. There are several
537  * cases being covered here:
538  *
539  * Top transaction commit (indicated by isTopCommit): normally we should
540  * do nothing here and let the regular end-of-transaction resource
541  * releasing mechanism handle these resources too. However, if we have a
542  * FAILED portal (eg, a cursor that got an error), we'd better clean up
543  * its resources to avoid resource-leakage warning messages.
544  *
545  * Sub transaction commit: never comes here at all, since we don't kill
546  * any portals in AtSubCommit_Portals().
547  *
548  * Main or sub transaction abort: we will do nothing here because
549  * portal->resowner was already set NULL; the resources were already
550  * cleaned up in transaction abort.
551  *
552  * Ordinary portal drop: must release resources. However, if the portal
553  * is not FAILED then we do not release its locks. The locks become the
554  * responsibility of the transaction's ResourceOwner (since it is the
555  * parent of the portal's owner) and will be released when the transaction
556  * eventually ends.
557  */
558  if (portal->resowner &&
559  (!isTopCommit || portal->status == PORTAL_FAILED))
560  {
561  bool isCommit = (portal->status != PORTAL_FAILED);
562 
565  isCommit, false);
568  isCommit, false);
571  isCommit, false);
572  ResourceOwnerDelete(portal->resowner);
573  }
574  portal->resowner = NULL;
575 
576  /*
577  * Delete tuplestore if present. We should do this even under error
578  * conditions; since the tuplestore would have been using cross-
579  * transaction storage, its temp files need to be explicitly deleted.
580  */
581  if (portal->holdStore)
582  {
583  MemoryContext oldcontext;
584 
585  oldcontext = MemoryContextSwitchTo(portal->holdContext);
586  tuplestore_end(portal->holdStore);
587  MemoryContextSwitchTo(oldcontext);
588  portal->holdStore = NULL;
589  }
590 
591  /* delete tuplestore storage, if any */
592  if (portal->holdContext)
594 
595  /* release subsidiary storage */
597 
598  /* release portal struct (it's in TopPortalContext) */
599  pfree(portal);
600 }
601 
602 /*
603  * Delete all declared cursors.
604  *
605  * Used by commands: CLOSE ALL, DISCARD ALL
606  */
607 void
609 {
610  HASH_SEQ_STATUS status;
611  PortalHashEnt *hentry;
612 
613  if (PortalHashTable == NULL)
614  return;
615 
616  hash_seq_init(&status, PortalHashTable);
617  while ((hentry = hash_seq_search(&status)) != NULL)
618  {
619  Portal portal = hentry->portal;
620 
621  /* Can't close the active portal (the one running the command) */
622  if (portal->status == PORTAL_ACTIVE)
623  continue;
624 
625  PortalDrop(portal, false);
626 
627  /* Restart the iteration in case that led to other drops */
628  hash_seq_term(&status);
629  hash_seq_init(&status, PortalHashTable);
630  }
631 }
632 
633 /*
634  * "Hold" a portal. Prepare it for access by later transactions.
635  */
636 static void
638 {
639  /*
640  * Note that PersistHoldablePortal() must release all resources used by
641  * the portal that are local to the creating transaction.
642  */
643  PortalCreateHoldStore(portal);
644  PersistHoldablePortal(portal);
645 
646  /* drop cached plan reference, if any */
647  PortalReleaseCachedPlan(portal);
648 
649  /*
650  * Any resources belonging to the portal will be released in the upcoming
651  * transaction-wide cleanup; the portal will no longer have its own
652  * resources.
653  */
654  portal->resowner = NULL;
655 
656  /*
657  * Having successfully exported the holdable cursor, mark it as not
658  * belonging to this transaction.
659  */
662  portal->createLevel = 0;
663 }
664 
665 /*
666  * Pre-commit processing for portals.
667  *
668  * Holdable cursors created in this transaction need to be converted to
669  * materialized form, since we are going to close down the executor and
670  * release locks. Non-holdable portals created in this transaction are
671  * simply removed. Portals remaining from prior transactions should be
672  * left untouched.
673  *
674  * Returns true if any portals changed state (possibly causing user-defined
675  * code to be run), false if not.
676  */
677 bool
678 PreCommit_Portals(bool isPrepare)
679 {
680  bool result = false;
681  HASH_SEQ_STATUS status;
682  PortalHashEnt *hentry;
683 
684  hash_seq_init(&status, PortalHashTable);
685 
686  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
687  {
688  Portal portal = hentry->portal;
689 
690  /*
691  * There should be no pinned portals anymore. Complain if someone
692  * leaked one. Auto-held portals are allowed; we assume that whoever
693  * pinned them is managing them.
694  */
695  if (portal->portalPinned && !portal->autoHeld)
696  elog(ERROR, "cannot commit while a portal is pinned");
697 
698  /*
699  * Do not touch active portals --- this can only happen in the case of
700  * a multi-transaction utility command, such as VACUUM, or a commit in
701  * a procedure.
702  *
703  * Note however that any resource owner attached to such a portal is
704  * still going to go away, so don't leave a dangling pointer. Also
705  * unregister any snapshots held by the portal, mainly to avoid
706  * snapshot leak warnings from ResourceOwnerRelease().
707  */
708  if (portal->status == PORTAL_ACTIVE)
709  {
710  if (portal->holdSnapshot)
711  {
712  if (portal->resowner)
714  portal->resowner);
715  portal->holdSnapshot = NULL;
716  }
717  portal->resowner = NULL;
718  /* Clear portalSnapshot too, for cleanliness */
719  portal->portalSnapshot = NULL;
720  continue;
721  }
722 
723  /* Is it a holdable portal created in the current xact? */
724  if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
726  portal->status == PORTAL_READY)
727  {
728  /*
729  * We are exiting the transaction that created a holdable cursor.
730  * Instead of dropping the portal, prepare it for access by later
731  * transactions.
732  *
733  * However, if this is PREPARE TRANSACTION rather than COMMIT,
734  * refuse PREPARE, because the semantics seem pretty unclear.
735  */
736  if (isPrepare)
737  ereport(ERROR,
738  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
739  errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
740 
741  HoldPortal(portal);
742 
743  /* Report we changed state */
744  result = true;
745  }
746  else if (portal->createSubid == InvalidSubTransactionId)
747  {
748  /*
749  * Do nothing to cursors held over from a previous transaction
750  * (including ones we just froze in a previous cycle of this loop)
751  */
752  continue;
753  }
754  else
755  {
756  /* Zap all non-holdable portals */
757  PortalDrop(portal, true);
758 
759  /* Report we changed state */
760  result = true;
761  }
762 
763  /*
764  * After either freezing or dropping a portal, we have to restart the
765  * iteration, because we could have invoked user-defined code that
766  * caused a drop of the next portal in the hash chain.
767  */
768  hash_seq_term(&status);
769  hash_seq_init(&status, PortalHashTable);
770  }
771 
772  return result;
773 }
774 
775 /*
776  * Abort processing for portals.
777  *
778  * At this point we run the cleanup hook if present, but we can't release the
779  * portal's memory until the cleanup call.
780  */
781 void
783 {
784  HASH_SEQ_STATUS status;
785  PortalHashEnt *hentry;
786 
787  hash_seq_init(&status, PortalHashTable);
788 
789  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
790  {
791  Portal portal = hentry->portal;
792 
793  /*
794  * When elog(FATAL) is progress, we need to set the active portal to
795  * failed, so that PortalCleanup() doesn't run the executor shutdown.
796  */
797  if (portal->status == PORTAL_ACTIVE && shmem_exit_inprogress)
798  MarkPortalFailed(portal);
799 
800  /*
801  * Do nothing else to cursors held over from a previous transaction.
802  */
803  if (portal->createSubid == InvalidSubTransactionId)
804  continue;
805 
806  /*
807  * Do nothing to auto-held cursors. This is similar to the case of a
808  * cursor from a previous transaction, but it could also be that the
809  * cursor was auto-held in this transaction, so it wants to live on.
810  */
811  if (portal->autoHeld)
812  continue;
813 
814  /*
815  * If it was created in the current transaction, we can't do normal
816  * shutdown on a READY portal either; it might refer to objects
817  * created in the failed transaction. See comments in
818  * AtSubAbort_Portals.
819  */
820  if (portal->status == PORTAL_READY)
821  MarkPortalFailed(portal);
822 
823  /*
824  * Allow portalcmds.c to clean up the state it knows about, if we
825  * haven't already.
826  */
827  if (PointerIsValid(portal->cleanup))
828  {
829  portal->cleanup(portal);
830  portal->cleanup = NULL;
831  }
832 
833  /* drop cached plan reference, if any */
834  PortalReleaseCachedPlan(portal);
835 
836  /*
837  * Any resources belonging to the portal will be released in the
838  * upcoming transaction-wide cleanup; they will be gone before we run
839  * PortalDrop.
840  */
841  portal->resowner = NULL;
842 
843  /*
844  * Although we can't delete the portal data structure proper, we can
845  * release any memory in subsidiary contexts, such as executor state.
846  * The cleanup hook was the last thing that might have needed data
847  * there. But leave active portals alone.
848  */
849  if (portal->status != PORTAL_ACTIVE)
851  }
852 }
853 
854 /*
855  * Post-abort cleanup for portals.
856  *
857  * Delete all portals not held over from prior transactions. */
858 void
860 {
861  HASH_SEQ_STATUS status;
862  PortalHashEnt *hentry;
863 
864  hash_seq_init(&status, PortalHashTable);
865 
866  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
867  {
868  Portal portal = hentry->portal;
869 
870  /*
871  * Do not touch active portals --- this can only happen in the case of
872  * a multi-transaction command.
873  */
874  if (portal->status == PORTAL_ACTIVE)
875  continue;
876 
877  /*
878  * Do nothing to cursors held over from a previous transaction or
879  * auto-held ones.
880  */
881  if (portal->createSubid == InvalidSubTransactionId || portal->autoHeld)
882  {
883  Assert(portal->status != PORTAL_ACTIVE);
884  Assert(portal->resowner == NULL);
885  continue;
886  }
887 
888  /*
889  * If a portal is still pinned, forcibly unpin it. PortalDrop will not
890  * let us drop the portal otherwise. Whoever pinned the portal was
891  * interrupted by the abort too and won't try to use it anymore.
892  */
893  if (portal->portalPinned)
894  portal->portalPinned = false;
895 
896  /*
897  * We had better not call any user-defined code during cleanup, so if
898  * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
899  */
900  if (PointerIsValid(portal->cleanup))
901  {
902  elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
903  portal->cleanup = NULL;
904  }
905 
906  /* Zap it. */
907  PortalDrop(portal, false);
908  }
909 }
910 
911 /*
912  * Portal-related cleanup when we return to the main loop on error.
913  *
914  * This is different from the cleanup at transaction abort. Auto-held portals
915  * are cleaned up on error but not on transaction abort.
916  */
917 void
919 {
920  HASH_SEQ_STATUS status;
921  PortalHashEnt *hentry;
922 
923  hash_seq_init(&status, PortalHashTable);
924 
925  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
926  {
927  Portal portal = hentry->portal;
928 
929  if (portal->autoHeld)
930  {
931  portal->portalPinned = false;
932  PortalDrop(portal, false);
933  }
934  }
935 }
936 
937 /*
938  * Pre-subcommit processing for portals.
939  *
940  * Reassign portals created or used in the current subtransaction to the
941  * parent subtransaction.
942  */
943 void
945  SubTransactionId parentSubid,
946  int parentLevel,
947  ResourceOwner parentXactOwner)
948 {
949  HASH_SEQ_STATUS status;
950  PortalHashEnt *hentry;
951 
952  hash_seq_init(&status, PortalHashTable);
953 
954  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
955  {
956  Portal portal = hentry->portal;
957 
958  if (portal->createSubid == mySubid)
959  {
960  portal->createSubid = parentSubid;
961  portal->createLevel = parentLevel;
962  if (portal->resowner)
963  ResourceOwnerNewParent(portal->resowner, parentXactOwner);
964  }
965  if (portal->activeSubid == mySubid)
966  portal->activeSubid = parentSubid;
967  }
968 }
969 
970 /*
971  * Subtransaction abort handling for portals.
972  *
973  * Deactivate portals created or used during the failed subtransaction.
974  * Note that per AtSubCommit_Portals, this will catch portals created/used
975  * in descendants of the subtransaction too.
976  *
977  * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
978  */
979 void
981  SubTransactionId parentSubid,
982  ResourceOwner myXactOwner,
983  ResourceOwner parentXactOwner)
984 {
985  HASH_SEQ_STATUS status;
986  PortalHashEnt *hentry;
987 
988  hash_seq_init(&status, PortalHashTable);
989 
990  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
991  {
992  Portal portal = hentry->portal;
993 
994  /* Was it created in this subtransaction? */
995  if (portal->createSubid != mySubid)
996  {
997  /* No, but maybe it was used in this subtransaction? */
998  if (portal->activeSubid == mySubid)
999  {
1000  /* Maintain activeSubid until the portal is removed */
1001  portal->activeSubid = parentSubid;
1002 
1003  /*
1004  * A MarkPortalActive() caller ran an upper-level portal in
1005  * this subtransaction and left the portal ACTIVE. This can't
1006  * happen, but force the portal into FAILED state for the same
1007  * reasons discussed below.
1008  *
1009  * We assume we can get away without forcing upper-level READY
1010  * portals to fail, even if they were run and then suspended.
1011  * In theory a suspended upper-level portal could have
1012  * acquired some references to objects that are about to be
1013  * destroyed, but there should be sufficient defenses against
1014  * such cases: the portal's original query cannot contain such
1015  * references, and any references within, say, cached plans of
1016  * PL/pgSQL functions are not from active queries and should
1017  * be protected by revalidation logic.
1018  */
1019  if (portal->status == PORTAL_ACTIVE)
1020  MarkPortalFailed(portal);
1021 
1022  /*
1023  * Also, if we failed it during the current subtransaction
1024  * (either just above, or earlier), reattach its resource
1025  * owner to the current subtransaction's resource owner, so
1026  * that any resources it still holds will be released while
1027  * cleaning up this subtransaction. This prevents some corner
1028  * cases wherein we might get Asserts or worse while cleaning
1029  * up objects created during the current subtransaction
1030  * (because they're still referenced within this portal).
1031  */
1032  if (portal->status == PORTAL_FAILED && portal->resowner)
1033  {
1034  ResourceOwnerNewParent(portal->resowner, myXactOwner);
1035  portal->resowner = NULL;
1036  }
1037  }
1038  /* Done if it wasn't created in this subtransaction */
1039  continue;
1040  }
1041 
1042  /*
1043  * Force any live portals of my own subtransaction into FAILED state.
1044  * We have to do this because they might refer to objects created or
1045  * changed in the failed subtransaction, leading to crashes within
1046  * ExecutorEnd when portalcmds.c tries to close down the portal.
1047  * Currently, every MarkPortalActive() caller ensures it updates the
1048  * portal status again before relinquishing control, so ACTIVE can't
1049  * happen here. If it does happen, dispose the portal like existing
1050  * MarkPortalActive() callers would.
1051  */
1052  if (portal->status == PORTAL_READY ||
1053  portal->status == PORTAL_ACTIVE)
1054  MarkPortalFailed(portal);
1055 
1056  /*
1057  * Allow portalcmds.c to clean up the state it knows about, if we
1058  * haven't already.
1059  */
1060  if (PointerIsValid(portal->cleanup))
1061  {
1062  portal->cleanup(portal);
1063  portal->cleanup = NULL;
1064  }
1065 
1066  /* drop cached plan reference, if any */
1067  PortalReleaseCachedPlan(portal);
1068 
1069  /*
1070  * Any resources belonging to the portal will be released in the
1071  * upcoming transaction-wide cleanup; they will be gone before we run
1072  * PortalDrop.
1073  */
1074  portal->resowner = NULL;
1075 
1076  /*
1077  * Although we can't delete the portal data structure proper, we can
1078  * release any memory in subsidiary contexts, such as executor state.
1079  * The cleanup hook was the last thing that might have needed data
1080  * there.
1081  */
1083  }
1084 }
1085 
1086 /*
1087  * Post-subabort cleanup for portals.
1088  *
1089  * Drop all portals created in the failed subtransaction (but note that
1090  * we will not drop any that were reassigned to the parent above).
1091  */
1092 void
1094 {
1095  HASH_SEQ_STATUS status;
1096  PortalHashEnt *hentry;
1097 
1098  hash_seq_init(&status, PortalHashTable);
1099 
1100  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1101  {
1102  Portal portal = hentry->portal;
1103 
1104  if (portal->createSubid != mySubid)
1105  continue;
1106 
1107  /*
1108  * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1109  * let us drop the portal otherwise. Whoever pinned the portal was
1110  * interrupted by the abort too and won't try to use it anymore.
1111  */
1112  if (portal->portalPinned)
1113  portal->portalPinned = false;
1114 
1115  /*
1116  * We had better not call any user-defined code during cleanup, so if
1117  * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1118  */
1119  if (PointerIsValid(portal->cleanup))
1120  {
1121  elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
1122  portal->cleanup = NULL;
1123  }
1124 
1125  /* Zap it. */
1126  PortalDrop(portal, false);
1127  }
1128 }
1129 
1130 /* Find all available cursors */
1131 Datum
1133 {
1134  ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1135  HASH_SEQ_STATUS hash_seq;
1136  PortalHashEnt *hentry;
1137 
1138  /*
1139  * We put all the tuples into a tuplestore in one scan of the hashtable.
1140  * This avoids any issue of the hashtable possibly changing between calls.
1141  */
1142  InitMaterializedSRF(fcinfo, 0);
1143 
1144  hash_seq_init(&hash_seq, PortalHashTable);
1145  while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1146  {
1147  Portal portal = hentry->portal;
1148  Datum values[6];
1149  bool nulls[6] = {0};
1150 
1151  /* report only "visible" entries */
1152  if (!portal->visible)
1153  continue;
1154 
1155  values[0] = CStringGetTextDatum(portal->name);
1156  values[1] = CStringGetTextDatum(portal->sourceText);
1161 
1162  tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
1163  }
1164 
1165  return (Datum) 0;
1166 }
1167 
1168 bool
1170 {
1171  HASH_SEQ_STATUS status;
1172  PortalHashEnt *hentry;
1173 
1174  hash_seq_init(&status, PortalHashTable);
1175 
1176  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1177  {
1178  Portal portal = hentry->portal;
1179 
1180  if (portal->status == PORTAL_READY)
1181  return false;
1182  }
1183 
1184  return true;
1185 }
1186 
1187 /*
1188  * Hold all pinned portals.
1189  *
1190  * When initiating a COMMIT or ROLLBACK inside a procedure, this must be
1191  * called to protect internally-generated cursors from being dropped during
1192  * the transaction shutdown. Currently, SPI calls this automatically; PLs
1193  * that initiate COMMIT or ROLLBACK some other way are on the hook to do it
1194  * themselves. (Note that we couldn't do this in, say, AtAbort_Portals
1195  * because we need to run user-defined code while persisting a portal.
1196  * It's too late to do that once transaction abort has started.)
1197  *
1198  * We protect such portals by converting them to held cursors. We mark them
1199  * as "auto-held" so that exception exit knows to clean them up. (In normal,
1200  * non-exception code paths, the PL needs to clean such portals itself, since
1201  * transaction end won't do it anymore; but that should be normal practice
1202  * anyway.)
1203  */
1204 void
1206 {
1207  HASH_SEQ_STATUS status;
1208  PortalHashEnt *hentry;
1209 
1210  hash_seq_init(&status, PortalHashTable);
1211 
1212  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1213  {
1214  Portal portal = hentry->portal;
1215 
1216  if (portal->portalPinned && !portal->autoHeld)
1217  {
1218  /*
1219  * Doing transaction control, especially abort, inside a cursor
1220  * loop that is not read-only, for example using UPDATE ...
1221  * RETURNING, has weird semantics issues. Also, this
1222  * implementation wouldn't work, because such portals cannot be
1223  * held. (The core grammar enforces that only SELECT statements
1224  * can drive a cursor, but for example PL/pgSQL does not restrict
1225  * it.)
1226  */
1227  if (portal->strategy != PORTAL_ONE_SELECT)
1228  ereport(ERROR,
1229  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1230  errmsg("cannot perform transaction commands inside a cursor loop that is not read-only")));
1231 
1232  /* Verify it's in a suitable state to be held */
1233  if (portal->status != PORTAL_READY)
1234  elog(ERROR, "pinned portal is not ready to be auto-held");
1235 
1236  HoldPortal(portal);
1237  portal->autoHeld = true;
1238  }
1239  }
1240 }
1241 
1242 /*
1243  * Drop the outer active snapshots for all portals, so that no snapshots
1244  * remain active.
1245  *
1246  * Like HoldPinnedPortals, this must be called when initiating a COMMIT or
1247  * ROLLBACK inside a procedure. This has to be separate from that since it
1248  * should not be run until we're done with steps that are likely to fail.
1249  *
1250  * It's tempting to fold this into PreCommit_Portals, but to do so, we'd
1251  * need to clean up snapshot management in VACUUM and perhaps other places.
1252  */
1253 void
1255 {
1256  HASH_SEQ_STATUS status;
1257  PortalHashEnt *hentry;
1258  int numPortalSnaps = 0;
1259  int numActiveSnaps = 0;
1260 
1261  /* First, scan PortalHashTable and clear portalSnapshot fields */
1262  hash_seq_init(&status, PortalHashTable);
1263 
1264  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1265  {
1266  Portal portal = hentry->portal;
1267 
1268  if (portal->portalSnapshot != NULL)
1269  {
1270  portal->portalSnapshot = NULL;
1271  numPortalSnaps++;
1272  }
1273  /* portal->holdSnapshot will be cleaned up in PreCommit_Portals */
1274  }
1275 
1276  /*
1277  * Now, pop all the active snapshots, which should be just those that were
1278  * portal snapshots. Ideally we'd drive this directly off the portal
1279  * scan, but there's no good way to visit the portals in the correct
1280  * order. So just cross-check after the fact.
1281  */
1282  while (ActiveSnapshotSet())
1283  {
1285  numActiveSnaps++;
1286  }
1287 
1288  if (numPortalSnaps != numActiveSnaps)
1289  elog(ERROR, "portal snapshots (%d) did not account for all active snapshots (%d)",
1290  numPortalSnaps, numActiveSnaps);
1291 }
static Datum values[MAXATTR]
Definition: bootstrap.c:156
#define CStringGetTextDatum(s)
Definition: builtins.h:94
uint32 SubTransactionId
Definition: c.h:645
#define InvalidSubTransactionId
Definition: c.h:647
#define PointerIsValid(pointer)
Definition: c.h:752
CommandTag
Definition: cmdtag.h:23
void hash_seq_term(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1507
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:350
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1431
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1421
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define WARNING
Definition: elog.h:36
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
#define PG_FUNCTION_ARGS
Definition: fmgr.h:193
void InitMaterializedSRF(FunctionCallInfo fcinfo, bits32 flags)
Definition: funcapi.c:76
int work_mem
Definition: globals.c:127
#define HASH_STRINGS
Definition: hsearch.h:96
#define HASH_ELEM
Definition: hsearch.h:95
#define stmt
Definition: indent_codes.h:59
bool shmem_exit_inprogress
Definition: ipc.c:45
Assert(fmt[strlen(fmt) - 1] !='\n')
void pfree(void *pointer)
Definition: mcxt.c:1456
void MemoryContextDeleteChildren(MemoryContext context)
Definition: mcxt.c:448
MemoryContext TopMemoryContext
Definition: mcxt.c:141
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1064
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:403
void MemoryContextSetIdentifier(MemoryContext context, const char *id)
Definition: mcxt.c:521
#define AllocSetContextCreate
Definition: memutils.h:126
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:150
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:160
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:138
#define CURSOR_OPT_SCROLL
Definition: parsenodes.h:3132
#define CURSOR_OPT_HOLD
Definition: parsenodes.h:3136
#define CURSOR_OPT_BINARY
Definition: parsenodes.h:3131
#define CURSOR_OPT_NO_SCROLL
Definition: parsenodes.h:3133
#define lfirst_node(type, lc)
Definition: pg_list.h:176
#define NIL
Definition: pg_list.h:68
void ReleaseCachedPlan(CachedPlan *plan, ResourceOwner owner)
Definition: plancache.c:1293
#define sprintf
Definition: port.h:240
@ PORTAL_FAILED
Definition: portal.h:110
@ PORTAL_NEW
Definition: portal.h:105
@ PORTAL_ACTIVE
Definition: portal.h:108
@ PORTAL_DONE
Definition: portal.h:109
@ PORTAL_READY
Definition: portal.h:107
@ PORTAL_DEFINED
Definition: portal.h:106
struct PortalData * Portal
Definition: portal.h:113
@ PORTAL_MULTI_QUERY
Definition: portal.h:95
@ PORTAL_ONE_SELECT
Definition: portal.h:91
#define PortalIsValid(p)
Definition: portal.h:212
void PortalCleanup(Portal portal)
Definition: portalcmds.c:263
void PersistHoldablePortal(Portal portal)
Definition: portalcmds.c:316
void AtAbort_Portals(void)
Definition: portalmem.c:782
void AtSubAbort_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, ResourceOwner myXactOwner, ResourceOwner parentXactOwner)
Definition: portalmem.c:980
void EnablePortalManager(void)
Definition: portalmem.c:105
void MarkPortalDone(Portal portal)
Definition: portalmem.c:415
#define MAX_PORTALNAME_LEN
Definition: portalmem.c:47
void PinPortal(Portal portal)
Definition: portalmem.c:372
Datum pg_cursor(PG_FUNCTION_ARGS)
Definition: portalmem.c:1132
static HTAB * PortalHashTable
Definition: portalmem.c:55
#define PortalHashTableInsert(PORTAL, NAME)
Definition: portalmem.c:69
Portal CreateNewPortal(void)
Definition: portalmem.c:236
bool PreCommit_Portals(bool isPrepare)
Definition: portalmem.c:678
static MemoryContext TopPortalContext
Definition: portalmem.c:92
void MarkPortalFailed(Portal portal)
Definition: portalmem.c:443
static void PortalReleaseCachedPlan(Portal portal)
Definition: portalmem.c:311
void UnpinPortal(Portal portal)
Definition: portalmem.c:381
void HoldPinnedPortals(void)
Definition: portalmem.c:1205
void MarkPortalActive(Portal portal)
Definition: portalmem.c:396
void PortalDrop(Portal portal, bool isTopCommit)
Definition: portalmem.c:469
#define PortalHashTableLookup(NAME, PORTAL)
Definition: portalmem.c:57
bool ThereAreNoReadyPortals(void)
Definition: portalmem.c:1169
Portal GetPortalByName(const char *name)
Definition: portalmem.c:131
void AtSubCommit_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, int parentLevel, ResourceOwner parentXactOwner)
Definition: portalmem.c:944
#define PortalHashTableDelete(PORTAL)
Definition: portalmem.c:82
void AtCleanup_Portals(void)
Definition: portalmem.c:859
void PortalDefineQuery(Portal portal, const char *prepStmtName, const char *sourceText, CommandTag commandTag, List *stmts, CachedPlan *cplan)
Definition: portalmem.c:283
void PortalHashTableDeleteAll(void)
Definition: portalmem.c:608
static void HoldPortal(Portal portal)
Definition: portalmem.c:637
Portal CreatePortal(const char *name, bool allowDup, bool dupSilent)
Definition: portalmem.c:176
void AtSubCleanup_Portals(SubTransactionId mySubid)
Definition: portalmem.c:1093
void PortalErrorCleanup(void)
Definition: portalmem.c:918
void ForgetPortalSnapshots(void)
Definition: portalmem.c:1254
PlannedStmt * PortalGetPrimaryStmt(Portal portal)
Definition: portalmem.c:152
struct portalhashent PortalHashEnt
void PortalCreateHoldStore(Portal portal)
Definition: portalmem.c:332
#define PORTALS_PER_USER
Definition: portalmem.c:39
uintptr_t Datum
Definition: postgres.h:64
static Datum BoolGetDatum(bool X)
Definition: postgres.h:102
void ResourceOwnerNewParent(ResourceOwner owner, ResourceOwner newparent)
Definition: resowner.c:898
ResourceOwner ResourceOwnerCreate(ResourceOwner parent, const char *name)
Definition: resowner.c:419
void ResourceOwnerRelease(ResourceOwner owner, ResourceReleasePhase phase, bool isCommit, bool isTopLevel)
Definition: resowner.c:649
void ResourceOwnerDelete(ResourceOwner owner)
Definition: resowner.c:855
ResourceOwner CurTransactionResourceOwner
Definition: resowner.c:165
@ RESOURCE_RELEASE_LOCKS
Definition: resowner.h:55
@ RESOURCE_RELEASE_BEFORE_LOCKS
Definition: resowner.h:54
@ RESOURCE_RELEASE_AFTER_LOCKS
Definition: resowner.h:56
void UnregisterSnapshotFromOwner(Snapshot snapshot, ResourceOwner owner)
Definition: snapmgr.c:856
bool ActiveSnapshotSet(void)
Definition: snapmgr.c:789
void PopActiveSnapshot(void)
Definition: snapmgr.c:750
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
Definition: dynahash.c:220
Definition: pg_list.h:54
SubTransactionId createSubid
Definition: portal.h:131
Snapshot portalSnapshot
Definition: portal.h:170
SubTransactionId activeSubid
Definition: portal.h:132
CommandTag commandTag
Definition: portal.h:137
const char * sourceText
Definition: portal.h:136
bool atEnd
Definition: portal.h:200
bool atStart
Definition: portal.h:199
List * stmts
Definition: portal.h:139
ResourceOwner resowner
Definition: portal.h:121
TimestampTz creation_time
Definition: portal.h:204
bool autoHeld
Definition: portal.h:153
bool portalPinned
Definition: portal.h:152
int createLevel
Definition: portal.h:133
MemoryContext holdContext
Definition: portal.h:178
QueryCompletion qc
Definition: portal.h:138
MemoryContext portalContext
Definition: portal.h:120
bool visible
Definition: portal.h:205
Snapshot holdSnapshot
Definition: portal.h:188
const char * name
Definition: portal.h:118
const char * prepStmtName
Definition: portal.h:119
CachedPlan * cplan
Definition: portal.h:140
Tuplestorestate * holdStore
Definition: portal.h:177
int cursorOptions
Definition: portal.h:147
void(* cleanup)(Portal portal)
Definition: portal.h:122
PortalStrategy strategy
Definition: portal.h:146
PortalStatus status
Definition: portal.h:151
uint64 nprocessed
Definition: cmdtag.h:33
CommandTag commandTag
Definition: cmdtag.h:32
TupleDesc setDesc
Definition: execnodes.h:333
Tuplestorestate * setResult
Definition: execnodes.h:332
Portal portal
Definition: portalmem.c:52
char portalname[MAX_PORTALNAME_LEN]
Definition: portalmem.c:51
Tuplestorestate * tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
Definition: tuplestore.c:318
void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, const Datum *values, const bool *isnull)
Definition: tuplestore.c:750
void tuplestore_end(Tuplestorestate *state)
Definition: tuplestore.c:453
static Datum TimestampTzGetDatum(TimestampTz X)
Definition: timestamp.h:52
const char * name
SubTransactionId GetCurrentSubTransactionId(void)
Definition: xact.c:780
int GetCurrentTransactionNestLevel(void)
Definition: xact.c:914
TimestampTz GetCurrentStatementStartTimestamp(void)
Definition: xact.c:864