PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
execMain.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  * top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  * ExecutorStart()
8  * ExecutorRun()
9  * ExecutorFinish()
10  * ExecutorEnd()
11  *
12  * These four procedures are the external interface to the executor.
13  * In each case, the query descriptor is required as an argument.
14  *
15  * ExecutorStart must be called at the beginning of execution of any
16  * query plan and ExecutorEnd must always be called at the end of
17  * execution of a plan (unless it is aborted due to error).
18  *
19  * ExecutorRun accepts direction and count arguments that specify whether
20  * the plan is to be executed forwards, backwards, and for how many tuples.
21  * In some cases ExecutorRun may be called multiple times to process all
22  * the tuples for a plan. It is also acceptable to stop short of executing
23  * the whole plan (but only if it is a SELECT).
24  *
25  * ExecutorFinish must be called after the final ExecutorRun call and
26  * before ExecutorEnd. This can be omitted only in case of EXPLAIN,
27  * which should also omit ExecutorRun.
28  *
29  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
30  * Portions Copyright (c) 1994, Regents of the University of California
31  *
32  *
33  * IDENTIFICATION
34  * src/backend/executor/execMain.c
35  *
36  *-------------------------------------------------------------------------
37  */
38 #include "postgres.h"
39 
40 #include "access/htup_details.h"
41 #include "access/sysattr.h"
42 #include "access/transam.h"
43 #include "access/xact.h"
44 #include "catalog/namespace.h"
45 #include "catalog/partition.h"
46 #include "catalog/pg_publication.h"
47 #include "commands/matview.h"
48 #include "commands/trigger.h"
49 #include "executor/execdebug.h"
50 #include "foreign/fdwapi.h"
51 #include "mb/pg_wchar.h"
52 #include "miscadmin.h"
53 #include "optimizer/clauses.h"
54 #include "parser/parsetree.h"
55 #include "rewrite/rewriteManip.h"
56 #include "storage/bufmgr.h"
57 #include "storage/lmgr.h"
58 #include "tcop/utility.h"
59 #include "utils/acl.h"
60 #include "utils/lsyscache.h"
61 #include "utils/memutils.h"
62 #include "utils/rls.h"
63 #include "utils/ruleutils.h"
64 #include "utils/snapmgr.h"
65 #include "utils/tqual.h"
66 
67 
68 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
73 
74 /* Hook for plugin to get control in ExecCheckRTPerms() */
76 
77 /* decls for local routines only used within this module */
78 static void InitPlan(QueryDesc *queryDesc, int eflags);
79 static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
80 static void ExecPostprocessPlan(EState *estate);
81 static void ExecEndPlan(PlanState *planstate, EState *estate);
82 static void ExecutePlan(EState *estate, PlanState *planstate,
83  bool use_parallel_mode,
84  CmdType operation,
85  bool sendTuples,
86  uint64 numberTuples,
87  ScanDirection direction,
88  DestReceiver *dest,
89  bool execute_once);
90 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
91 static bool ExecCheckRTEPermsModified(Oid relOid, Oid userid,
92  Bitmapset *modifiedCols,
93  AclMode requiredPerms);
94 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
95 static char *ExecBuildSlotValueDescription(Oid reloid,
96  TupleTableSlot *slot,
97  TupleDesc tupdesc,
98  Bitmapset *modifiedCols,
99  int maxfieldlen);
101  Datum *values,
102  bool *isnull,
103  int maxfieldlen);
104 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
105  Plan *planTree);
106 static void ExecPartitionCheck(ResultRelInfo *resultRelInfo,
107  TupleTableSlot *slot, EState *estate);
108 
109 /*
110  * Note that GetUpdatedColumns() also exists in commands/trigger.c. There does
111  * not appear to be any good header to put it into, given the structures that
112  * it uses, so we let them be duplicated. Be sure to update both if one needs
113  * to be changed, however.
114  */
115 #define GetInsertedColumns(relinfo, estate) \
116  (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->insertedCols)
117 #define GetUpdatedColumns(relinfo, estate) \
118  (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
119 
120 /* end of local decls */
121 
122 
123 /* ----------------------------------------------------------------
124  * ExecutorStart
125  *
126  * This routine must be called at the beginning of any execution of any
127  * query plan
128  *
129  * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
130  * only because some places use QueryDescs for utility commands). The tupDesc
131  * field of the QueryDesc is filled in to describe the tuples that will be
132  * returned, and the internal fields (estate and planstate) are set up.
133  *
134  * eflags contains flag bits as described in executor.h.
135  *
136  * NB: the CurrentMemoryContext when this is called will become the parent
137  * of the per-query context used for this Executor invocation.
138  *
139  * We provide a function hook variable that lets loadable plugins
140  * get control when ExecutorStart is called. Such a plugin would
141  * normally call standard_ExecutorStart().
142  *
143  * ----------------------------------------------------------------
144  */
145 void
146 ExecutorStart(QueryDesc *queryDesc, int eflags)
147 {
148  if (ExecutorStart_hook)
149  (*ExecutorStart_hook) (queryDesc, eflags);
150  else
151  standard_ExecutorStart(queryDesc, eflags);
152 }
153 
154 void
155 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
156 {
157  EState *estate;
158  MemoryContext oldcontext;
159 
160  /* sanity checks: queryDesc must not be started already */
161  Assert(queryDesc != NULL);
162  Assert(queryDesc->estate == NULL);
163 
164  /*
165  * If the transaction is read-only, we need to check if any writes are
166  * planned to non-temporary tables. EXPLAIN is considered read-only.
167  *
168  * Don't allow writes in parallel mode. Supporting UPDATE and DELETE
169  * would require (a) storing the combocid hash in shared memory, rather
170  * than synchronizing it just once at the start of parallelism, and (b) an
171  * alternative to heap_update()'s reliance on xmax for mutual exclusion.
172  * INSERT may have no such troubles, but we forbid it to simplify the
173  * checks.
174  *
175  * We have lower-level defenses in CommandCounterIncrement and elsewhere
176  * against performing unsafe operations in parallel mode, but this gives a
177  * more user-friendly error message.
178  */
179  if ((XactReadOnly || IsInParallelMode()) &&
180  !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
182 
183  /*
184  * Build EState, switch into per-query memory context for startup.
185  */
186  estate = CreateExecutorState();
187  queryDesc->estate = estate;
188 
189  oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
190 
191  /*
192  * Fill in external parameters, if any, from queryDesc; and allocate
193  * workspace for internal parameters
194  */
195  estate->es_param_list_info = queryDesc->params;
196 
197  if (queryDesc->plannedstmt->nParamExec > 0)
198  estate->es_param_exec_vals = (ParamExecData *)
199  palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
200 
201  estate->es_sourceText = queryDesc->sourceText;
202 
203  /*
204  * Fill in the query environment, if any, from queryDesc.
205  */
206  estate->es_queryEnv = queryDesc->queryEnv;
207 
208  /*
209  * If non-read-only query, set the command ID to mark output tuples with
210  */
211  switch (queryDesc->operation)
212  {
213  case CMD_SELECT:
214 
215  /*
216  * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
217  * tuples
218  */
219  if (queryDesc->plannedstmt->rowMarks != NIL ||
220  queryDesc->plannedstmt->hasModifyingCTE)
221  estate->es_output_cid = GetCurrentCommandId(true);
222 
223  /*
224  * A SELECT without modifying CTEs can't possibly queue triggers,
225  * so force skip-triggers mode. This is just a marginal efficiency
226  * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
227  * all that expensive, but we might as well do it.
228  */
229  if (!queryDesc->plannedstmt->hasModifyingCTE)
230  eflags |= EXEC_FLAG_SKIP_TRIGGERS;
231  break;
232 
233  case CMD_INSERT:
234  case CMD_DELETE:
235  case CMD_UPDATE:
236  estate->es_output_cid = GetCurrentCommandId(true);
237  break;
238 
239  default:
240  elog(ERROR, "unrecognized operation code: %d",
241  (int) queryDesc->operation);
242  break;
243  }
244 
245  /*
246  * Copy other important information into the EState
247  */
248  estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
250  estate->es_top_eflags = eflags;
251  estate->es_instrument = queryDesc->instrument_options;
252 
253  /*
254  * Initialize the plan state tree
255  */
256  InitPlan(queryDesc, eflags);
257 
258  /*
259  * Set up an AFTER-trigger statement context, unless told not to, or
260  * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
261  */
262  if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
264 
265  MemoryContextSwitchTo(oldcontext);
266 }
267 
268 /* ----------------------------------------------------------------
269  * ExecutorRun
270  *
271  * This is the main routine of the executor module. It accepts
272  * the query descriptor from the traffic cop and executes the
273  * query plan.
274  *
275  * ExecutorStart must have been called already.
276  *
277  * If direction is NoMovementScanDirection then nothing is done
278  * except to start up/shut down the destination. Otherwise,
279  * we retrieve up to 'count' tuples in the specified direction.
280  *
281  * Note: count = 0 is interpreted as no portal limit, i.e., run to
282  * completion. Also note that the count limit is only applied to
283  * retrieved tuples, not for instance to those inserted/updated/deleted
284  * by a ModifyTable plan node.
285  *
286  * There is no return value, but output tuples (if any) are sent to
287  * the destination receiver specified in the QueryDesc; and the number
288  * of tuples processed at the top level can be found in
289  * estate->es_processed.
290  *
291  * We provide a function hook variable that lets loadable plugins
292  * get control when ExecutorRun is called. Such a plugin would
293  * normally call standard_ExecutorRun().
294  *
295  * ----------------------------------------------------------------
296  */
297 void
299  ScanDirection direction, uint64 count,
300  bool execute_once)
301 {
302  if (ExecutorRun_hook)
303  (*ExecutorRun_hook) (queryDesc, direction, count, execute_once);
304  else
305  standard_ExecutorRun(queryDesc, direction, count, execute_once);
306 }
307 
308 void
310  ScanDirection direction, uint64 count, bool execute_once)
311 {
312  EState *estate;
313  CmdType operation;
314  DestReceiver *dest;
315  bool sendTuples;
316  MemoryContext oldcontext;
317 
318  /* sanity checks */
319  Assert(queryDesc != NULL);
320 
321  estate = queryDesc->estate;
322 
323  Assert(estate != NULL);
325 
326  /*
327  * Switch into per-query memory context
328  */
329  oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
330 
331  /* Allow instrumentation of Executor overall runtime */
332  if (queryDesc->totaltime)
333  InstrStartNode(queryDesc->totaltime);
334 
335  /*
336  * extract information from the query descriptor and the query feature.
337  */
338  operation = queryDesc->operation;
339  dest = queryDesc->dest;
340 
341  /*
342  * startup tuple receiver, if we will be emitting tuples
343  */
344  estate->es_processed = 0;
345  estate->es_lastoid = InvalidOid;
346 
347  sendTuples = (operation == CMD_SELECT ||
348  queryDesc->plannedstmt->hasReturning);
349 
350  if (sendTuples)
351  (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
352 
353  /*
354  * run plan
355  */
356  if (!ScanDirectionIsNoMovement(direction))
357  {
358  if (execute_once && queryDesc->already_executed)
359  elog(ERROR, "can't re-execute query flagged for single execution");
360  queryDesc->already_executed = true;
361 
362  ExecutePlan(estate,
363  queryDesc->planstate,
364  queryDesc->plannedstmt->parallelModeNeeded,
365  operation,
366  sendTuples,
367  count,
368  direction,
369  dest,
370  execute_once);
371  }
372 
373  /*
374  * shutdown tuple receiver, if we started it
375  */
376  if (sendTuples)
377  (*dest->rShutdown) (dest);
378 
379  if (queryDesc->totaltime)
380  InstrStopNode(queryDesc->totaltime, estate->es_processed);
381 
382  MemoryContextSwitchTo(oldcontext);
383 }
384 
385 /* ----------------------------------------------------------------
386  * ExecutorFinish
387  *
388  * This routine must be called after the last ExecutorRun call.
389  * It performs cleanup such as firing AFTER triggers. It is
390  * separate from ExecutorEnd because EXPLAIN ANALYZE needs to
391  * include these actions in the total runtime.
392  *
393  * We provide a function hook variable that lets loadable plugins
394  * get control when ExecutorFinish is called. Such a plugin would
395  * normally call standard_ExecutorFinish().
396  *
397  * ----------------------------------------------------------------
398  */
399 void
401 {
403  (*ExecutorFinish_hook) (queryDesc);
404  else
405  standard_ExecutorFinish(queryDesc);
406 }
407 
408 void
410 {
411  EState *estate;
412  MemoryContext oldcontext;
413 
414  /* sanity checks */
415  Assert(queryDesc != NULL);
416 
417  estate = queryDesc->estate;
418 
419  Assert(estate != NULL);
421 
422  /* This should be run once and only once per Executor instance */
423  Assert(!estate->es_finished);
424 
425  /* Switch into per-query memory context */
426  oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
427 
428  /* Allow instrumentation of Executor overall runtime */
429  if (queryDesc->totaltime)
430  InstrStartNode(queryDesc->totaltime);
431 
432  /* Run ModifyTable nodes to completion */
433  ExecPostprocessPlan(estate);
434 
435  /* Execute queued AFTER triggers, unless told not to */
436  if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
437  AfterTriggerEndQuery(estate);
438 
439  if (queryDesc->totaltime)
440  InstrStopNode(queryDesc->totaltime, 0);
441 
442  MemoryContextSwitchTo(oldcontext);
443 
444  estate->es_finished = true;
445 }
446 
447 /* ----------------------------------------------------------------
448  * ExecutorEnd
449  *
450  * This routine must be called at the end of execution of any
451  * query plan
452  *
453  * We provide a function hook variable that lets loadable plugins
454  * get control when ExecutorEnd is called. Such a plugin would
455  * normally call standard_ExecutorEnd().
456  *
457  * ----------------------------------------------------------------
458  */
459 void
461 {
462  if (ExecutorEnd_hook)
463  (*ExecutorEnd_hook) (queryDesc);
464  else
465  standard_ExecutorEnd(queryDesc);
466 }
467 
468 void
470 {
471  EState *estate;
472  MemoryContext oldcontext;
473 
474  /* sanity checks */
475  Assert(queryDesc != NULL);
476 
477  estate = queryDesc->estate;
478 
479  Assert(estate != NULL);
480 
481  /*
482  * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
483  * Assert is needed because ExecutorFinish is new as of 9.1, and callers
484  * might forget to call it.
485  */
486  Assert(estate->es_finished ||
488 
489  /*
490  * Switch into per-query memory context to run ExecEndPlan
491  */
492  oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
493 
494  ExecEndPlan(queryDesc->planstate, estate);
495 
496  /* do away with our snapshots */
499 
500  /*
501  * Must switch out of context before destroying it
502  */
503  MemoryContextSwitchTo(oldcontext);
504 
505  /*
506  * Release EState and per-query memory context. This should release
507  * everything the executor has allocated.
508  */
509  FreeExecutorState(estate);
510 
511  /* Reset queryDesc fields that no longer point to anything */
512  queryDesc->tupDesc = NULL;
513  queryDesc->estate = NULL;
514  queryDesc->planstate = NULL;
515  queryDesc->totaltime = NULL;
516 }
517 
518 /* ----------------------------------------------------------------
519  * ExecutorRewind
520  *
521  * This routine may be called on an open queryDesc to rewind it
522  * to the start.
523  * ----------------------------------------------------------------
524  */
525 void
527 {
528  EState *estate;
529  MemoryContext oldcontext;
530 
531  /* sanity checks */
532  Assert(queryDesc != NULL);
533 
534  estate = queryDesc->estate;
535 
536  Assert(estate != NULL);
537 
538  /* It's probably not sensible to rescan updating queries */
539  Assert(queryDesc->operation == CMD_SELECT);
540 
541  /*
542  * Switch into per-query memory context
543  */
544  oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
545 
546  /*
547  * rescan plan
548  */
549  ExecReScan(queryDesc->planstate);
550 
551  MemoryContextSwitchTo(oldcontext);
552 }
553 
554 
555 /*
556  * ExecCheckRTPerms
557  * Check access permissions for all relations listed in a range table.
558  *
559  * Returns true if permissions are adequate. Otherwise, throws an appropriate
560  * error if ereport_on_violation is true, or simply returns false otherwise.
561  *
562  * Note that this does NOT address row level security policies (aka: RLS). If
563  * rows will be returned to the user as a result of this permission check
564  * passing, then RLS also needs to be consulted (and check_enable_rls()).
565  *
566  * See rewrite/rowsecurity.c.
567  */
568 bool
569 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
570 {
571  ListCell *l;
572  bool result = true;
573 
574  foreach(l, rangeTable)
575  {
576  RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
577 
578  result = ExecCheckRTEPerms(rte);
579  if (!result)
580  {
581  Assert(rte->rtekind == RTE_RELATION);
582  if (ereport_on_violation)
584  get_rel_name(rte->relid));
585  return false;
586  }
587  }
588 
590  result = (*ExecutorCheckPerms_hook) (rangeTable,
591  ereport_on_violation);
592  return result;
593 }
594 
595 /*
596  * ExecCheckRTEPerms
597  * Check access permissions for a single RTE.
598  */
599 static bool
601 {
602  AclMode requiredPerms;
603  AclMode relPerms;
604  AclMode remainingPerms;
605  Oid relOid;
606  Oid userid;
607 
608  /*
609  * Only plain-relation RTEs need to be checked here. Function RTEs are
610  * checked when the function is prepared for execution. Join, subquery,
611  * and special RTEs need no checks.
612  */
613  if (rte->rtekind != RTE_RELATION)
614  return true;
615 
616  /*
617  * No work if requiredPerms is empty.
618  */
619  requiredPerms = rte->requiredPerms;
620  if (requiredPerms == 0)
621  return true;
622 
623  relOid = rte->relid;
624 
625  /*
626  * userid to check as: current user unless we have a setuid indication.
627  *
628  * Note: GetUserId() is presently fast enough that there's no harm in
629  * calling it separately for each RTE. If that stops being true, we could
630  * call it once in ExecCheckRTPerms and pass the userid down from there.
631  * But for now, no need for the extra clutter.
632  */
633  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
634 
635  /*
636  * We must have *all* the requiredPerms bits, but some of the bits can be
637  * satisfied from column-level rather than relation-level permissions.
638  * First, remove any bits that are satisfied by relation permissions.
639  */
640  relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
641  remainingPerms = requiredPerms & ~relPerms;
642  if (remainingPerms != 0)
643  {
644  int col = -1;
645 
646  /*
647  * If we lack any permissions that exist only as relation permissions,
648  * we can fail straight away.
649  */
650  if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
651  return false;
652 
653  /*
654  * Check to see if we have the needed privileges at column level.
655  *
656  * Note: failures just report a table-level error; it would be nicer
657  * to report a column-level error if we have some but not all of the
658  * column privileges.
659  */
660  if (remainingPerms & ACL_SELECT)
661  {
662  /*
663  * When the query doesn't explicitly reference any columns (for
664  * example, SELECT COUNT(*) FROM table), allow the query if we
665  * have SELECT on any column of the rel, as per SQL spec.
666  */
667  if (bms_is_empty(rte->selectedCols))
668  {
669  if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
671  return false;
672  }
673 
674  while ((col = bms_next_member(rte->selectedCols, col)) >= 0)
675  {
676  /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
678 
679  if (attno == InvalidAttrNumber)
680  {
681  /* Whole-row reference, must have priv on all cols */
682  if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
684  return false;
685  }
686  else
687  {
688  if (pg_attribute_aclcheck(relOid, attno, userid,
689  ACL_SELECT) != ACLCHECK_OK)
690  return false;
691  }
692  }
693  }
694 
695  /*
696  * Basically the same for the mod columns, for both INSERT and UPDATE
697  * privilege as specified by remainingPerms.
698  */
699  if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
700  userid,
701  rte->insertedCols,
702  ACL_INSERT))
703  return false;
704 
705  if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
706  userid,
707  rte->updatedCols,
708  ACL_UPDATE))
709  return false;
710  }
711  return true;
712 }
713 
714 /*
715  * ExecCheckRTEPermsModified
716  * Check INSERT or UPDATE access permissions for a single RTE (these
717  * are processed uniformly).
718  */
719 static bool
720 ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
721  AclMode requiredPerms)
722 {
723  int col = -1;
724 
725  /*
726  * When the query doesn't explicitly update any columns, allow the query
727  * if we have permission on any column of the rel. This is to handle
728  * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
729  */
730  if (bms_is_empty(modifiedCols))
731  {
732  if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
734  return false;
735  }
736 
737  while ((col = bms_next_member(modifiedCols, col)) >= 0)
738  {
739  /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
741 
742  if (attno == InvalidAttrNumber)
743  {
744  /* whole-row reference can't happen here */
745  elog(ERROR, "whole-row update is not implemented");
746  }
747  else
748  {
749  if (pg_attribute_aclcheck(relOid, attno, userid,
750  requiredPerms) != ACLCHECK_OK)
751  return false;
752  }
753  }
754  return true;
755 }
756 
757 /*
758  * Check that the query does not imply any writes to non-temp tables;
759  * unless we're in parallel mode, in which case don't even allow writes
760  * to temp tables.
761  *
762  * Note: in a Hot Standby slave this would need to reject writes to temp
763  * tables just as we do in parallel mode; but an HS slave can't have created
764  * any temp tables in the first place, so no need to check that.
765  */
766 static void
768 {
769  ListCell *l;
770 
771  /*
772  * Fail if write permissions are requested in parallel mode for table
773  * (temp or non-temp), otherwise fail for any non-temp table.
774  */
775  foreach(l, plannedstmt->rtable)
776  {
777  RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
778 
779  if (rte->rtekind != RTE_RELATION)
780  continue;
781 
782  if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
783  continue;
784 
786  continue;
787 
789  }
790 
791  if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
793 }
794 
795 
796 /* ----------------------------------------------------------------
797  * InitPlan
798  *
799  * Initializes the query plan: open files, allocate storage
800  * and start up the rule manager
801  * ----------------------------------------------------------------
802  */
803 static void
804 InitPlan(QueryDesc *queryDesc, int eflags)
805 {
806  CmdType operation = queryDesc->operation;
807  PlannedStmt *plannedstmt = queryDesc->plannedstmt;
808  Plan *plan = plannedstmt->planTree;
809  List *rangeTable = plannedstmt->rtable;
810  EState *estate = queryDesc->estate;
811  PlanState *planstate;
812  TupleDesc tupType;
813  ListCell *l;
814  int i;
815 
816  /*
817  * Do permissions checks
818  */
819  ExecCheckRTPerms(rangeTable, true);
820 
821  /*
822  * initialize the node's execution state
823  */
824  estate->es_range_table = rangeTable;
825  estate->es_plannedstmt = plannedstmt;
826 
827  /*
828  * initialize result relation stuff, and open/lock the result rels.
829  *
830  * We must do this before initializing the plan tree, else we might try to
831  * do a lock upgrade if a result rel is also a source rel.
832  */
833  if (plannedstmt->resultRelations)
834  {
835  List *resultRelations = plannedstmt->resultRelations;
836  int numResultRelations = list_length(resultRelations);
837  ResultRelInfo *resultRelInfos;
838  ResultRelInfo *resultRelInfo;
839 
840  resultRelInfos = (ResultRelInfo *)
841  palloc(numResultRelations * sizeof(ResultRelInfo));
842  resultRelInfo = resultRelInfos;
843  foreach(l, resultRelations)
844  {
845  Index resultRelationIndex = lfirst_int(l);
846  Oid resultRelationOid;
847  Relation resultRelation;
848 
849  resultRelationOid = getrelid(resultRelationIndex, rangeTable);
850  resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
851 
852  InitResultRelInfo(resultRelInfo,
853  resultRelation,
854  resultRelationIndex,
855  NULL,
856  estate->es_instrument);
857  resultRelInfo++;
858  }
859  estate->es_result_relations = resultRelInfos;
860  estate->es_num_result_relations = numResultRelations;
861  /* es_result_relation_info is NULL except when within ModifyTable */
862  estate->es_result_relation_info = NULL;
863 
864  /*
865  * In the partitioned result relation case, lock the non-leaf result
866  * relations too. A subset of these are the roots of respective
867  * partitioned tables, for which we also allocate ResulRelInfos.
868  */
869  estate->es_root_result_relations = NULL;
870  estate->es_num_root_result_relations = 0;
871  if (plannedstmt->nonleafResultRelations)
872  {
873  int num_roots = list_length(plannedstmt->rootResultRelations);
874 
875  /*
876  * Firstly, build ResultRelInfos for all the partitioned table
877  * roots, because we will need them to fire the statement-level
878  * triggers, if any.
879  */
880  resultRelInfos = (ResultRelInfo *)
881  palloc(num_roots * sizeof(ResultRelInfo));
882  resultRelInfo = resultRelInfos;
883  foreach(l, plannedstmt->rootResultRelations)
884  {
885  Index resultRelIndex = lfirst_int(l);
886  Oid resultRelOid;
887  Relation resultRelDesc;
888 
889  resultRelOid = getrelid(resultRelIndex, rangeTable);
890  resultRelDesc = heap_open(resultRelOid, RowExclusiveLock);
891  InitResultRelInfo(resultRelInfo,
892  resultRelDesc,
893  lfirst_int(l),
894  NULL,
895  estate->es_instrument);
896  resultRelInfo++;
897  }
898 
899  estate->es_root_result_relations = resultRelInfos;
900  estate->es_num_root_result_relations = num_roots;
901 
902  /* Simply lock the rest of them. */
903  foreach(l, plannedstmt->nonleafResultRelations)
904  {
905  Index resultRelIndex = lfirst_int(l);
906 
907  /* We locked the roots above. */
908  if (!list_member_int(plannedstmt->rootResultRelations,
909  resultRelIndex))
910  LockRelationOid(getrelid(resultRelIndex, rangeTable),
912  }
913  }
914  }
915  else
916  {
917  /*
918  * if no result relation, then set state appropriately
919  */
920  estate->es_result_relations = NULL;
921  estate->es_num_result_relations = 0;
922  estate->es_result_relation_info = NULL;
923  estate->es_root_result_relations = NULL;
924  estate->es_num_root_result_relations = 0;
925  }
926 
927  /*
928  * Similarly, we have to lock relations selected FOR [KEY] UPDATE/SHARE
929  * before we initialize the plan tree, else we'd be risking lock upgrades.
930  * While we are at it, build the ExecRowMark list. Any partitioned child
931  * tables are ignored here (because isParent=true) and will be locked by
932  * the first Append or MergeAppend node that references them. (Note that
933  * the RowMarks corresponding to partitioned child tables are present in
934  * the same list as the rest, i.e., plannedstmt->rowMarks.)
935  */
936  estate->es_rowMarks = NIL;
937  foreach(l, plannedstmt->rowMarks)
938  {
939  PlanRowMark *rc = (PlanRowMark *) lfirst(l);
940  Oid relid;
941  Relation relation;
942  ExecRowMark *erm;
943 
944  /* ignore "parent" rowmarks; they are irrelevant at runtime */
945  if (rc->isParent)
946  continue;
947 
948  /* get relation's OID (will produce InvalidOid if subquery) */
949  relid = getrelid(rc->rti, rangeTable);
950 
951  /*
952  * If you change the conditions under which rel locks are acquired
953  * here, be sure to adjust ExecOpenScanRelation to match.
954  */
955  switch (rc->markType)
956  {
957  case ROW_MARK_EXCLUSIVE:
959  case ROW_MARK_SHARE:
960  case ROW_MARK_KEYSHARE:
961  relation = heap_open(relid, RowShareLock);
962  break;
963  case ROW_MARK_REFERENCE:
964  relation = heap_open(relid, AccessShareLock);
965  break;
966  case ROW_MARK_COPY:
967  /* no physical table access is required */
968  relation = NULL;
969  break;
970  default:
971  elog(ERROR, "unrecognized markType: %d", rc->markType);
972  relation = NULL; /* keep compiler quiet */
973  break;
974  }
975 
976  /* Check that relation is a legal target for marking */
977  if (relation)
978  CheckValidRowMarkRel(relation, rc->markType);
979 
980  erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
981  erm->relation = relation;
982  erm->relid = relid;
983  erm->rti = rc->rti;
984  erm->prti = rc->prti;
985  erm->rowmarkId = rc->rowmarkId;
986  erm->markType = rc->markType;
987  erm->strength = rc->strength;
988  erm->waitPolicy = rc->waitPolicy;
989  erm->ermActive = false;
991  erm->ermExtra = NULL;
992  estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
993  }
994 
995  /*
996  * Initialize the executor's tuple table to empty.
997  */
998  estate->es_tupleTable = NIL;
999  estate->es_trig_tuple_slot = NULL;
1000  estate->es_trig_oldtup_slot = NULL;
1001  estate->es_trig_newtup_slot = NULL;
1002 
1003  /* mark EvalPlanQual not active */
1004  estate->es_epqTuple = NULL;
1005  estate->es_epqTupleSet = NULL;
1006  estate->es_epqScanDone = NULL;
1007 
1008  /*
1009  * Initialize private state information for each SubPlan. We must do this
1010  * before running ExecInitNode on the main query tree, since
1011  * ExecInitSubPlan expects to be able to find these entries.
1012  */
1013  Assert(estate->es_subplanstates == NIL);
1014  i = 1; /* subplan indices count from 1 */
1015  foreach(l, plannedstmt->subplans)
1016  {
1017  Plan *subplan = (Plan *) lfirst(l);
1018  PlanState *subplanstate;
1019  int sp_eflags;
1020 
1021  /*
1022  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
1023  * it is a parameterless subplan (not initplan), we suggest that it be
1024  * prepared to handle REWIND efficiently; otherwise there is no need.
1025  */
1026  sp_eflags = eflags
1028  if (bms_is_member(i, plannedstmt->rewindPlanIDs))
1029  sp_eflags |= EXEC_FLAG_REWIND;
1030 
1031  subplanstate = ExecInitNode(subplan, estate, sp_eflags);
1032 
1033  estate->es_subplanstates = lappend(estate->es_subplanstates,
1034  subplanstate);
1035 
1036  i++;
1037  }
1038 
1039  /*
1040  * Initialize the private state information for all the nodes in the query
1041  * tree. This opens files, allocates storage and leaves us ready to start
1042  * processing tuples.
1043  */
1044  planstate = ExecInitNode(plan, estate, eflags);
1045 
1046  /*
1047  * Get the tuple descriptor describing the type of tuples to return.
1048  */
1049  tupType = ExecGetResultType(planstate);
1050 
1051  /*
1052  * Initialize the junk filter if needed. SELECT queries need a filter if
1053  * there are any junk attrs in the top-level tlist.
1054  */
1055  if (operation == CMD_SELECT)
1056  {
1057  bool junk_filter_needed = false;
1058  ListCell *tlist;
1059 
1060  foreach(tlist, plan->targetlist)
1061  {
1062  TargetEntry *tle = (TargetEntry *) lfirst(tlist);
1063 
1064  if (tle->resjunk)
1065  {
1066  junk_filter_needed = true;
1067  break;
1068  }
1069  }
1070 
1071  if (junk_filter_needed)
1072  {
1073  JunkFilter *j;
1074 
1075  j = ExecInitJunkFilter(planstate->plan->targetlist,
1076  tupType->tdhasoid,
1077  ExecInitExtraTupleSlot(estate));
1078  estate->es_junkFilter = j;
1079 
1080  /* Want to return the cleaned tuple type */
1081  tupType = j->jf_cleanTupType;
1082  }
1083  }
1084 
1085  queryDesc->tupDesc = tupType;
1086  queryDesc->planstate = planstate;
1087 }
1088 
1089 /*
1090  * Check that a proposed result relation is a legal target for the operation
1091  *
1092  * Generally the parser and/or planner should have noticed any such mistake
1093  * already, but let's make sure.
1094  *
1095  * Note: when changing this function, you probably also need to look at
1096  * CheckValidRowMarkRel.
1097  */
1098 void
1099 CheckValidResultRel(Relation resultRel, CmdType operation)
1100 {
1101  TriggerDesc *trigDesc = resultRel->trigdesc;
1102  FdwRoutine *fdwroutine;
1103 
1104  switch (resultRel->rd_rel->relkind)
1105  {
1106  case RELKIND_RELATION:
1108  CheckCmdReplicaIdentity(resultRel, operation);
1109  break;
1110  case RELKIND_SEQUENCE:
1111  ereport(ERROR,
1112  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1113  errmsg("cannot change sequence \"%s\"",
1114  RelationGetRelationName(resultRel))));
1115  break;
1116  case RELKIND_TOASTVALUE:
1117  ereport(ERROR,
1118  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1119  errmsg("cannot change TOAST relation \"%s\"",
1120  RelationGetRelationName(resultRel))));
1121  break;
1122  case RELKIND_VIEW:
1123 
1124  /*
1125  * Okay only if there's a suitable INSTEAD OF trigger. Messages
1126  * here should match rewriteHandler.c's rewriteTargetView, except
1127  * that we omit errdetail because we haven't got the information
1128  * handy (and given that we really shouldn't get here anyway, it's
1129  * not worth great exertion to get).
1130  */
1131  switch (operation)
1132  {
1133  case CMD_INSERT:
1134  if (!trigDesc || !trigDesc->trig_insert_instead_row)
1135  ereport(ERROR,
1136  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1137  errmsg("cannot insert into view \"%s\"",
1138  RelationGetRelationName(resultRel)),
1139  errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
1140  break;
1141  case CMD_UPDATE:
1142  if (!trigDesc || !trigDesc->trig_update_instead_row)
1143  ereport(ERROR,
1144  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1145  errmsg("cannot update view \"%s\"",
1146  RelationGetRelationName(resultRel)),
1147  errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
1148  break;
1149  case CMD_DELETE:
1150  if (!trigDesc || !trigDesc->trig_delete_instead_row)
1151  ereport(ERROR,
1152  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1153  errmsg("cannot delete from view \"%s\"",
1154  RelationGetRelationName(resultRel)),
1155  errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
1156  break;
1157  default:
1158  elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1159  break;
1160  }
1161  break;
1162  case RELKIND_MATVIEW:
1164  ereport(ERROR,
1165  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1166  errmsg("cannot change materialized view \"%s\"",
1167  RelationGetRelationName(resultRel))));
1168  break;
1169  case RELKIND_FOREIGN_TABLE:
1170  /* Okay only if the FDW supports it */
1171  fdwroutine = GetFdwRoutineForRelation(resultRel, false);
1172  switch (operation)
1173  {
1174  case CMD_INSERT:
1175  if (fdwroutine->ExecForeignInsert == NULL)
1176  ereport(ERROR,
1177  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1178  errmsg("cannot insert into foreign table \"%s\"",
1179  RelationGetRelationName(resultRel))));
1180  if (fdwroutine->IsForeignRelUpdatable != NULL &&
1181  (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1182  ereport(ERROR,
1183  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1184  errmsg("foreign table \"%s\" does not allow inserts",
1185  RelationGetRelationName(resultRel))));
1186  break;
1187  case CMD_UPDATE:
1188  if (fdwroutine->ExecForeignUpdate == NULL)
1189  ereport(ERROR,
1190  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1191  errmsg("cannot update foreign table \"%s\"",
1192  RelationGetRelationName(resultRel))));
1193  if (fdwroutine->IsForeignRelUpdatable != NULL &&
1194  (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1195  ereport(ERROR,
1196  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1197  errmsg("foreign table \"%s\" does not allow updates",
1198  RelationGetRelationName(resultRel))));
1199  break;
1200  case CMD_DELETE:
1201  if (fdwroutine->ExecForeignDelete == NULL)
1202  ereport(ERROR,
1203  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1204  errmsg("cannot delete from foreign table \"%s\"",
1205  RelationGetRelationName(resultRel))));
1206  if (fdwroutine->IsForeignRelUpdatable != NULL &&
1207  (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1208  ereport(ERROR,
1209  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1210  errmsg("foreign table \"%s\" does not allow deletes",
1211  RelationGetRelationName(resultRel))));
1212  break;
1213  default:
1214  elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1215  break;
1216  }
1217  break;
1218  default:
1219  ereport(ERROR,
1220  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1221  errmsg("cannot change relation \"%s\"",
1222  RelationGetRelationName(resultRel))));
1223  break;
1224  }
1225 }
1226 
1227 /*
1228  * Check that a proposed rowmark target relation is a legal target
1229  *
1230  * In most cases parser and/or planner should have noticed this already, but
1231  * they don't cover all cases.
1232  */
1233 static void
1235 {
1236  FdwRoutine *fdwroutine;
1237 
1238  switch (rel->rd_rel->relkind)
1239  {
1240  case RELKIND_RELATION:
1242  /* OK */
1243  break;
1244  case RELKIND_SEQUENCE:
1245  /* Must disallow this because we don't vacuum sequences */
1246  ereport(ERROR,
1247  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1248  errmsg("cannot lock rows in sequence \"%s\"",
1249  RelationGetRelationName(rel))));
1250  break;
1251  case RELKIND_TOASTVALUE:
1252  /* We could allow this, but there seems no good reason to */
1253  ereport(ERROR,
1254  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1255  errmsg("cannot lock rows in TOAST relation \"%s\"",
1256  RelationGetRelationName(rel))));
1257  break;
1258  case RELKIND_VIEW:
1259  /* Should not get here; planner should have expanded the view */
1260  ereport(ERROR,
1261  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1262  errmsg("cannot lock rows in view \"%s\"",
1263  RelationGetRelationName(rel))));
1264  break;
1265  case RELKIND_MATVIEW:
1266  /* Allow referencing a matview, but not actual locking clauses */
1267  if (markType != ROW_MARK_REFERENCE)
1268  ereport(ERROR,
1269  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1270  errmsg("cannot lock rows in materialized view \"%s\"",
1271  RelationGetRelationName(rel))));
1272  break;
1273  case RELKIND_FOREIGN_TABLE:
1274  /* Okay only if the FDW supports it */
1275  fdwroutine = GetFdwRoutineForRelation(rel, false);
1276  if (fdwroutine->RefetchForeignRow == NULL)
1277  ereport(ERROR,
1278  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1279  errmsg("cannot lock rows in foreign table \"%s\"",
1280  RelationGetRelationName(rel))));
1281  break;
1282  default:
1283  ereport(ERROR,
1284  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1285  errmsg("cannot lock rows in relation \"%s\"",
1286  RelationGetRelationName(rel))));
1287  break;
1288  }
1289 }
1290 
1291 /*
1292  * Initialize ResultRelInfo data for one result relation
1293  *
1294  * Caution: before Postgres 9.1, this function included the relkind checking
1295  * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1296  * appropriate. Be sure callers cover those needs.
1297  */
1298 void
1300  Relation resultRelationDesc,
1301  Index resultRelationIndex,
1302  Relation partition_root,
1303  int instrument_options)
1304 {
1305  List *partition_check = NIL;
1306 
1307  MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1308  resultRelInfo->type = T_ResultRelInfo;
1309  resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1310  resultRelInfo->ri_RelationDesc = resultRelationDesc;
1311  resultRelInfo->ri_NumIndices = 0;
1312  resultRelInfo->ri_IndexRelationDescs = NULL;
1313  resultRelInfo->ri_IndexRelationInfo = NULL;
1314  /* make a copy so as not to depend on relcache info not changing... */
1315  resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1316  if (resultRelInfo->ri_TrigDesc)
1317  {
1318  int n = resultRelInfo->ri_TrigDesc->numtriggers;
1319 
1320  resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1321  palloc0(n * sizeof(FmgrInfo));
1322  resultRelInfo->ri_TrigWhenExprs = (ExprState **)
1323  palloc0(n * sizeof(ExprState *));
1324  if (instrument_options)
1325  resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
1326  }
1327  else
1328  {
1329  resultRelInfo->ri_TrigFunctions = NULL;
1330  resultRelInfo->ri_TrigWhenExprs = NULL;
1331  resultRelInfo->ri_TrigInstrument = NULL;
1332  }
1333  if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1334  resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1335  else
1336  resultRelInfo->ri_FdwRoutine = NULL;
1337  resultRelInfo->ri_FdwState = NULL;
1338  resultRelInfo->ri_usesFdwDirectModify = false;
1339  resultRelInfo->ri_ConstraintExprs = NULL;
1340  resultRelInfo->ri_junkFilter = NULL;
1341  resultRelInfo->ri_projectReturning = NULL;
1342 
1343  /*
1344  * Partition constraint, which also includes the partition constraint of
1345  * all the ancestors that are partitions. Note that it will be checked
1346  * even in the case of tuple-routing where this table is the target leaf
1347  * partition, if there any BR triggers defined on the table. Although
1348  * tuple-routing implicitly preserves the partition constraint of the
1349  * target partition for a given row, the BR triggers may change the row
1350  * such that the constraint is no longer satisfied, which we must fail for
1351  * by checking it explicitly.
1352  *
1353  * If this is a partitioned table, the partition constraint (if any) of a
1354  * given row will be checked just before performing tuple-routing.
1355  */
1356  partition_check = RelationGetPartitionQual(resultRelationDesc);
1357 
1358  resultRelInfo->ri_PartitionCheck = partition_check;
1359  resultRelInfo->ri_PartitionRoot = partition_root;
1360 }
1361 
1362 /*
1363  * ExecGetTriggerResultRel
1364  *
1365  * Get a ResultRelInfo for a trigger target relation. Most of the time,
1366  * triggers are fired on one of the result relations of the query, and so
1367  * we can just return a member of the es_result_relations array. (Note: in
1368  * self-join situations there might be multiple members with the same OID;
1369  * if so it doesn't matter which one we pick.) However, it is sometimes
1370  * necessary to fire triggers on other relations; this happens mainly when an
1371  * RI update trigger queues additional triggers on other relations, which will
1372  * be processed in the context of the outer query. For efficiency's sake,
1373  * we want to have a ResultRelInfo for those triggers too; that can avoid
1374  * repeated re-opening of the relation. (It also provides a way for EXPLAIN
1375  * ANALYZE to report the runtimes of such triggers.) So we make additional
1376  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1377  */
1378 ResultRelInfo *
1380 {
1381  ResultRelInfo *rInfo;
1382  int nr;
1383  ListCell *l;
1384  Relation rel;
1385  MemoryContext oldcontext;
1386 
1387  /* First, search through the query result relations */
1388  rInfo = estate->es_result_relations;
1389  nr = estate->es_num_result_relations;
1390  while (nr > 0)
1391  {
1392  if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1393  return rInfo;
1394  rInfo++;
1395  nr--;
1396  }
1397  /* Nope, but maybe we already made an extra ResultRelInfo for it */
1398  foreach(l, estate->es_trig_target_relations)
1399  {
1400  rInfo = (ResultRelInfo *) lfirst(l);
1401  if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1402  return rInfo;
1403  }
1404  /* Nope, so we need a new one */
1405 
1406  /*
1407  * Open the target relation's relcache entry. We assume that an
1408  * appropriate lock is still held by the backend from whenever the trigger
1409  * event got queued, so we need take no new lock here. Also, we need not
1410  * recheck the relkind, so no need for CheckValidResultRel.
1411  */
1412  rel = heap_open(relid, NoLock);
1413 
1414  /*
1415  * Make the new entry in the right context.
1416  */
1417  oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1418  rInfo = makeNode(ResultRelInfo);
1419  InitResultRelInfo(rInfo,
1420  rel,
1421  0, /* dummy rangetable index */
1422  NULL,
1423  estate->es_instrument);
1424  estate->es_trig_target_relations =
1425  lappend(estate->es_trig_target_relations, rInfo);
1426  MemoryContextSwitchTo(oldcontext);
1427 
1428  /*
1429  * Currently, we don't need any index information in ResultRelInfos used
1430  * only for triggers, so no need to call ExecOpenIndices.
1431  */
1432 
1433  return rInfo;
1434 }
1435 
1436 /*
1437  * Close any relations that have been opened by ExecGetTriggerResultRel().
1438  */
1439 void
1441 {
1442  ListCell *l;
1443 
1444  foreach(l, estate->es_trig_target_relations)
1445  {
1446  ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
1447 
1448  /* Close indices and then the relation itself */
1449  ExecCloseIndices(resultRelInfo);
1450  heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1451  }
1452 }
1453 
1454 /*
1455  * ExecContextForcesOids
1456  *
1457  * This is pretty grotty: when doing INSERT, UPDATE, or CREATE TABLE AS,
1458  * we need to ensure that result tuples have space for an OID iff they are
1459  * going to be stored into a relation that has OIDs. In other contexts
1460  * we are free to choose whether to leave space for OIDs in result tuples
1461  * (we generally don't want to, but we do if a physical-tlist optimization
1462  * is possible). This routine checks the plan context and returns TRUE if the
1463  * choice is forced, FALSE if the choice is not forced. In the TRUE case,
1464  * *hasoids is set to the required value.
1465  *
1466  * One reason this is ugly is that all plan nodes in the plan tree will emit
1467  * tuples with space for an OID, though we really only need the topmost node
1468  * to do so. However, node types like Sort don't project new tuples but just
1469  * return their inputs, and in those cases the requirement propagates down
1470  * to the input node. Eventually we might make this code smart enough to
1471  * recognize how far down the requirement really goes, but for now we just
1472  * make all plan nodes do the same thing if the top level forces the choice.
1473  *
1474  * We assume that if we are generating tuples for INSERT or UPDATE,
1475  * estate->es_result_relation_info is already set up to describe the target
1476  * relation. Note that in an UPDATE that spans an inheritance tree, some of
1477  * the target relations may have OIDs and some not. We have to make the
1478  * decisions on a per-relation basis as we initialize each of the subplans of
1479  * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1480  * while initializing each subplan.
1481  *
1482  * CREATE TABLE AS is even uglier, because we don't have the target relation's
1483  * descriptor available when this code runs; we have to look aside at the
1484  * flags passed to ExecutorStart().
1485  */
1486 bool
1487 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1488 {
1489  ResultRelInfo *ri = planstate->state->es_result_relation_info;
1490 
1491  if (ri != NULL)
1492  {
1493  Relation rel = ri->ri_RelationDesc;
1494 
1495  if (rel != NULL)
1496  {
1497  *hasoids = rel->rd_rel->relhasoids;
1498  return true;
1499  }
1500  }
1501 
1502  if (planstate->state->es_top_eflags & EXEC_FLAG_WITH_OIDS)
1503  {
1504  *hasoids = true;
1505  return true;
1506  }
1507  if (planstate->state->es_top_eflags & EXEC_FLAG_WITHOUT_OIDS)
1508  {
1509  *hasoids = false;
1510  return true;
1511  }
1512 
1513  return false;
1514 }
1515 
1516 /* ----------------------------------------------------------------
1517  * ExecPostprocessPlan
1518  *
1519  * Give plan nodes a final chance to execute before shutdown
1520  * ----------------------------------------------------------------
1521  */
1522 static void
1524 {
1525  ListCell *lc;
1526 
1527  /*
1528  * Make sure nodes run forward.
1529  */
1531 
1532  /*
1533  * Run any secondary ModifyTable nodes to completion, in case the main
1534  * query did not fetch all rows from them. (We do this to ensure that
1535  * such nodes have predictable results.)
1536  */
1537  foreach(lc, estate->es_auxmodifytables)
1538  {
1539  PlanState *ps = (PlanState *) lfirst(lc);
1540 
1541  for (;;)
1542  {
1543  TupleTableSlot *slot;
1544 
1545  /* Reset the per-output-tuple exprcontext each time */
1546  ResetPerTupleExprContext(estate);
1547 
1548  slot = ExecProcNode(ps);
1549 
1550  if (TupIsNull(slot))
1551  break;
1552  }
1553  }
1554 }
1555 
1556 /* ----------------------------------------------------------------
1557  * ExecEndPlan
1558  *
1559  * Cleans up the query plan -- closes files and frees up storage
1560  *
1561  * NOTE: we are no longer very worried about freeing storage per se
1562  * in this code; FreeExecutorState should be guaranteed to release all
1563  * memory that needs to be released. What we are worried about doing
1564  * is closing relations and dropping buffer pins. Thus, for example,
1565  * tuple tables must be cleared or dropped to ensure pins are released.
1566  * ----------------------------------------------------------------
1567  */
1568 static void
1569 ExecEndPlan(PlanState *planstate, EState *estate)
1570 {
1571  ResultRelInfo *resultRelInfo;
1572  int i;
1573  ListCell *l;
1574 
1575  /*
1576  * shut down the node-type-specific query processing
1577  */
1578  ExecEndNode(planstate);
1579 
1580  /*
1581  * for subplans too
1582  */
1583  foreach(l, estate->es_subplanstates)
1584  {
1585  PlanState *subplanstate = (PlanState *) lfirst(l);
1586 
1587  ExecEndNode(subplanstate);
1588  }
1589 
1590  /*
1591  * destroy the executor's tuple table. Actually we only care about
1592  * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1593  * the TupleTableSlots, since the containing memory context is about to go
1594  * away anyway.
1595  */
1596  ExecResetTupleTable(estate->es_tupleTable, false);
1597 
1598  /*
1599  * close the result relation(s) if any, but hold locks until xact commit.
1600  */
1601  resultRelInfo = estate->es_result_relations;
1602  for (i = estate->es_num_result_relations; i > 0; i--)
1603  {
1604  /* Close indices and then the relation itself */
1605  ExecCloseIndices(resultRelInfo);
1606  heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1607  resultRelInfo++;
1608  }
1609 
1610  /* Close the root target relation(s). */
1611  resultRelInfo = estate->es_root_result_relations;
1612  for (i = estate->es_num_root_result_relations; i > 0; i--)
1613  {
1614  heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1615  resultRelInfo++;
1616  }
1617 
1618  /* likewise close any trigger target relations */
1619  ExecCleanUpTriggerState(estate);
1620 
1621  /*
1622  * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
1623  * locks
1624  */
1625  foreach(l, estate->es_rowMarks)
1626  {
1627  ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1628 
1629  if (erm->relation)
1630  heap_close(erm->relation, NoLock);
1631  }
1632 }
1633 
1634 /* ----------------------------------------------------------------
1635  * ExecutePlan
1636  *
1637  * Processes the query plan until we have retrieved 'numberTuples' tuples,
1638  * moving in the specified direction.
1639  *
1640  * Runs to completion if numberTuples is 0
1641  *
1642  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1643  * user can see it
1644  * ----------------------------------------------------------------
1645  */
1646 static void
1648  PlanState *planstate,
1649  bool use_parallel_mode,
1650  CmdType operation,
1651  bool sendTuples,
1652  uint64 numberTuples,
1653  ScanDirection direction,
1654  DestReceiver *dest,
1655  bool execute_once)
1656 {
1657  TupleTableSlot *slot;
1658  uint64 current_tuple_count;
1659 
1660  /*
1661  * initialize local variables
1662  */
1663  current_tuple_count = 0;
1664 
1665  /*
1666  * Set the direction.
1667  */
1668  estate->es_direction = direction;
1669 
1670  /*
1671  * If the plan might potentially be executed multiple times, we must force
1672  * it to run without parallelism, because we might exit early. Also
1673  * disable parallelism when writing into a relation, because no database
1674  * changes are allowed in parallel mode.
1675  */
1676  if (!execute_once || dest->mydest == DestIntoRel)
1677  use_parallel_mode = false;
1678 
1679  if (use_parallel_mode)
1681 
1682  /*
1683  * Loop until we've processed the proper number of tuples from the plan.
1684  */
1685  for (;;)
1686  {
1687  /* Reset the per-output-tuple exprcontext */
1688  ResetPerTupleExprContext(estate);
1689 
1690  /*
1691  * Execute the plan and obtain a tuple
1692  */
1693  slot = ExecProcNode(planstate);
1694 
1695  /*
1696  * if the tuple is null, then we assume there is nothing more to
1697  * process so we just end the loop...
1698  */
1699  if (TupIsNull(slot))
1700  {
1701  /* Allow nodes to release or shut down resources. */
1702  (void) ExecShutdownNode(planstate);
1703  break;
1704  }
1705 
1706  /*
1707  * If we have a junk filter, then project a new tuple with the junk
1708  * removed.
1709  *
1710  * Store this new "clean" tuple in the junkfilter's resultSlot.
1711  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1712  * because that tuple slot has the wrong descriptor.)
1713  */
1714  if (estate->es_junkFilter != NULL)
1715  slot = ExecFilterJunk(estate->es_junkFilter, slot);
1716 
1717  /*
1718  * If we are supposed to send the tuple somewhere, do so. (In
1719  * practice, this is probably always the case at this point.)
1720  */
1721  if (sendTuples)
1722  {
1723  /*
1724  * If we are not able to send the tuple, we assume the destination
1725  * has closed and no more tuples can be sent. If that's the case,
1726  * end the loop.
1727  */
1728  if (!((*dest->receiveSlot) (slot, dest)))
1729  break;
1730  }
1731 
1732  /*
1733  * Count tuples processed, if this is a SELECT. (For other operation
1734  * types, the ModifyTable plan node must count the appropriate
1735  * events.)
1736  */
1737  if (operation == CMD_SELECT)
1738  (estate->es_processed)++;
1739 
1740  /*
1741  * check our tuple count.. if we've processed the proper number then
1742  * quit, else loop again and process more tuples. Zero numberTuples
1743  * means no limit.
1744  */
1745  current_tuple_count++;
1746  if (numberTuples && numberTuples == current_tuple_count)
1747  {
1748  /* Allow nodes to release or shut down resources. */
1749  (void) ExecShutdownNode(planstate);
1750  break;
1751  }
1752  }
1753 
1754  if (use_parallel_mode)
1755  ExitParallelMode();
1756 }
1757 
1758 
1759 /*
1760  * ExecRelCheck --- check that tuple meets constraints for result relation
1761  *
1762  * Returns NULL if OK, else name of failed check constraint
1763  */
1764 static const char *
1766  TupleTableSlot *slot, EState *estate)
1767 {
1768  Relation rel = resultRelInfo->ri_RelationDesc;
1769  int ncheck = rel->rd_att->constr->num_check;
1770  ConstrCheck *check = rel->rd_att->constr->check;
1771  ExprContext *econtext;
1772  MemoryContext oldContext;
1773  int i;
1774 
1775  /*
1776  * If first time through for this result relation, build expression
1777  * nodetrees for rel's constraint expressions. Keep them in the per-query
1778  * memory context so they'll survive throughout the query.
1779  */
1780  if (resultRelInfo->ri_ConstraintExprs == NULL)
1781  {
1782  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1783  resultRelInfo->ri_ConstraintExprs =
1784  (ExprState **) palloc(ncheck * sizeof(ExprState *));
1785  for (i = 0; i < ncheck; i++)
1786  {
1787  Expr *checkconstr;
1788 
1789  checkconstr = stringToNode(check[i].ccbin);
1790  resultRelInfo->ri_ConstraintExprs[i] =
1791  ExecPrepareExpr(checkconstr, estate);
1792  }
1793  MemoryContextSwitchTo(oldContext);
1794  }
1795 
1796  /*
1797  * We will use the EState's per-tuple context for evaluating constraint
1798  * expressions (creating it if it's not already there).
1799  */
1800  econtext = GetPerTupleExprContext(estate);
1801 
1802  /* Arrange for econtext's scan tuple to be the tuple under test */
1803  econtext->ecxt_scantuple = slot;
1804 
1805  /* And evaluate the constraints */
1806  for (i = 0; i < ncheck; i++)
1807  {
1808  ExprState *checkconstr = resultRelInfo->ri_ConstraintExprs[i];
1809 
1810  /*
1811  * NOTE: SQL specifies that a NULL result from a constraint expression
1812  * is not to be treated as a failure. Therefore, use ExecCheck not
1813  * ExecQual.
1814  */
1815  if (!ExecCheck(checkconstr, econtext))
1816  return check[i].ccname;
1817  }
1818 
1819  /* NULL result means no error */
1820  return NULL;
1821 }
1822 
1823 /*
1824  * ExecPartitionCheck --- check that tuple meets the partition constraint.
1825  */
1826 static void
1828  EState *estate)
1829 {
1830  Relation rel = resultRelInfo->ri_RelationDesc;
1831  TupleDesc tupdesc = RelationGetDescr(rel);
1832  Bitmapset *modifiedCols;
1833  Bitmapset *insertedCols;
1834  Bitmapset *updatedCols;
1835  ExprContext *econtext;
1836 
1837  /*
1838  * If first time through, build expression state tree for the partition
1839  * check expression. Keep it in the per-query memory context so they'll
1840  * survive throughout the query.
1841  */
1842  if (resultRelInfo->ri_PartitionCheckExpr == NULL)
1843  {
1844  List *qual = resultRelInfo->ri_PartitionCheck;
1845 
1846  resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate);
1847  }
1848 
1849  /*
1850  * We will use the EState's per-tuple context for evaluating constraint
1851  * expressions (creating it if it's not already there).
1852  */
1853  econtext = GetPerTupleExprContext(estate);
1854 
1855  /* Arrange for econtext's scan tuple to be the tuple under test */
1856  econtext->ecxt_scantuple = slot;
1857 
1858  /*
1859  * As in case of the catalogued constraints, we treat a NULL result as
1860  * success here, not a failure.
1861  */
1862  if (!ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext))
1863  {
1864  char *val_desc;
1865  Relation orig_rel = rel;
1866 
1867  /* See the comment above. */
1868  if (resultRelInfo->ri_PartitionRoot)
1869  {
1870  HeapTuple tuple = ExecFetchSlotTuple(slot);
1871  TupleDesc old_tupdesc = RelationGetDescr(rel);
1872  TupleConversionMap *map;
1873 
1874  rel = resultRelInfo->ri_PartitionRoot;
1875  tupdesc = RelationGetDescr(rel);
1876  /* a reverse map */
1877  map = convert_tuples_by_name(old_tupdesc, tupdesc,
1878  gettext_noop("could not convert row type"));
1879  if (map != NULL)
1880  {
1881  tuple = do_convert_tuple(tuple, map);
1882  ExecStoreTuple(tuple, slot, InvalidBuffer, false);
1883  }
1884  }
1885 
1886  insertedCols = GetInsertedColumns(resultRelInfo, estate);
1887  updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1888  modifiedCols = bms_union(insertedCols, updatedCols);
1890  slot,
1891  tupdesc,
1892  modifiedCols,
1893  64);
1894  ereport(ERROR,
1895  (errcode(ERRCODE_CHECK_VIOLATION),
1896  errmsg("new row for relation \"%s\" violates partition constraint",
1897  RelationGetRelationName(orig_rel)),
1898  val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
1899  }
1900 }
1901 
1902 /*
1903  * ExecConstraints - check constraints of the tuple in 'slot'
1904  *
1905  * This checks the traditional NOT NULL and check constraints, as well as
1906  * the partition constraint, if any.
1907  *
1908  * Note: 'slot' contains the tuple to check the constraints of, which may
1909  * have been converted from the original input tuple after tuple routing.
1910  * 'resultRelInfo' is the original result relation, before tuple routing.
1911  */
1912 void
1914  TupleTableSlot *slot, EState *estate)
1915 {
1916  Relation rel = resultRelInfo->ri_RelationDesc;
1917  TupleDesc tupdesc = RelationGetDescr(rel);
1918  TupleConstr *constr = tupdesc->constr;
1919  Bitmapset *modifiedCols;
1920  Bitmapset *insertedCols;
1921  Bitmapset *updatedCols;
1922 
1923  Assert(constr || resultRelInfo->ri_PartitionCheck);
1924 
1925  if (constr && constr->has_not_null)
1926  {
1927  int natts = tupdesc->natts;
1928  int attrChk;
1929 
1930  for (attrChk = 1; attrChk <= natts; attrChk++)
1931  {
1932  if (tupdesc->attrs[attrChk - 1]->attnotnull &&
1933  slot_attisnull(slot, attrChk))
1934  {
1935  char *val_desc;
1936  Relation orig_rel = rel;
1937  TupleDesc orig_tupdesc = RelationGetDescr(rel);
1938 
1939  /*
1940  * If the tuple has been routed, it's been converted to the
1941  * partition's rowtype, which might differ from the root
1942  * table's. We must convert it back to the root table's
1943  * rowtype so that val_desc shown error message matches the
1944  * input tuple.
1945  */
1946  if (resultRelInfo->ri_PartitionRoot)
1947  {
1948  HeapTuple tuple = ExecFetchSlotTuple(slot);
1949  TupleConversionMap *map;
1950 
1951  rel = resultRelInfo->ri_PartitionRoot;
1952  tupdesc = RelationGetDescr(rel);
1953  /* a reverse map */
1954  map = convert_tuples_by_name(orig_tupdesc, tupdesc,
1955  gettext_noop("could not convert row type"));
1956  if (map != NULL)
1957  {
1958  tuple = do_convert_tuple(tuple, map);
1959  ExecStoreTuple(tuple, slot, InvalidBuffer, false);
1960  }
1961  }
1962 
1963  insertedCols = GetInsertedColumns(resultRelInfo, estate);
1964  updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1965  modifiedCols = bms_union(insertedCols, updatedCols);
1967  slot,
1968  tupdesc,
1969  modifiedCols,
1970  64);
1971 
1972  ereport(ERROR,
1973  (errcode(ERRCODE_NOT_NULL_VIOLATION),
1974  errmsg("null value in column \"%s\" violates not-null constraint",
1975  NameStr(orig_tupdesc->attrs[attrChk - 1]->attname)),
1976  val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1977  errtablecol(orig_rel, attrChk)));
1978  }
1979  }
1980  }
1981 
1982  if (constr && constr->num_check > 0)
1983  {
1984  const char *failed;
1985 
1986  if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1987  {
1988  char *val_desc;
1989  Relation orig_rel = rel;
1990 
1991  /* See the comment above. */
1992  if (resultRelInfo->ri_PartitionRoot)
1993  {
1994  HeapTuple tuple = ExecFetchSlotTuple(slot);
1995  TupleDesc old_tupdesc = RelationGetDescr(rel);
1996  TupleConversionMap *map;
1997 
1998  rel = resultRelInfo->ri_PartitionRoot;
1999  tupdesc = RelationGetDescr(rel);
2000  /* a reverse map */
2001  map = convert_tuples_by_name(old_tupdesc, tupdesc,
2002  gettext_noop("could not convert row type"));
2003  if (map != NULL)
2004  {
2005  tuple = do_convert_tuple(tuple, map);
2006  ExecStoreTuple(tuple, slot, InvalidBuffer, false);
2007  }
2008  }
2009 
2010  insertedCols = GetInsertedColumns(resultRelInfo, estate);
2011  updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2012  modifiedCols = bms_union(insertedCols, updatedCols);
2014  slot,
2015  tupdesc,
2016  modifiedCols,
2017  64);
2018  ereport(ERROR,
2019  (errcode(ERRCODE_CHECK_VIOLATION),
2020  errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2021  RelationGetRelationName(orig_rel), failed),
2022  val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2023  errtableconstraint(orig_rel, failed)));
2024  }
2025  }
2026 
2027  if (resultRelInfo->ri_PartitionCheck)
2028  ExecPartitionCheck(resultRelInfo, slot, estate);
2029 }
2030 
2031 
2032 /*
2033  * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
2034  * of the specified kind.
2035  *
2036  * Note that this needs to be called multiple times to ensure that all kinds of
2037  * WITH CHECK OPTIONs are handled (both those from views which have the WITH
2038  * CHECK OPTION set and from row level security policies). See ExecInsert()
2039  * and ExecUpdate().
2040  */
2041 void
2043  TupleTableSlot *slot, EState *estate)
2044 {
2045  Relation rel = resultRelInfo->ri_RelationDesc;
2046  TupleDesc tupdesc = RelationGetDescr(rel);
2047  ExprContext *econtext;
2048  ListCell *l1,
2049  *l2;
2050 
2051  /*
2052  * We will use the EState's per-tuple context for evaluating constraint
2053  * expressions (creating it if it's not already there).
2054  */
2055  econtext = GetPerTupleExprContext(estate);
2056 
2057  /* Arrange for econtext's scan tuple to be the tuple under test */
2058  econtext->ecxt_scantuple = slot;
2059 
2060  /* Check each of the constraints */
2061  forboth(l1, resultRelInfo->ri_WithCheckOptions,
2062  l2, resultRelInfo->ri_WithCheckOptionExprs)
2063  {
2064  WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
2065  ExprState *wcoExpr = (ExprState *) lfirst(l2);
2066 
2067  /*
2068  * Skip any WCOs which are not the kind we are looking for at this
2069  * time.
2070  */
2071  if (wco->kind != kind)
2072  continue;
2073 
2074  /*
2075  * WITH CHECK OPTION checks are intended to ensure that the new tuple
2076  * is visible (in the case of a view) or that it passes the
2077  * 'with-check' policy (in the case of row security). If the qual
2078  * evaluates to NULL or FALSE, then the new tuple won't be included in
2079  * the view or doesn't pass the 'with-check' policy for the table.
2080  */
2081  if (!ExecQual(wcoExpr, econtext))
2082  {
2083  char *val_desc;
2084  Bitmapset *modifiedCols;
2085  Bitmapset *insertedCols;
2086  Bitmapset *updatedCols;
2087 
2088  switch (wco->kind)
2089  {
2090  /*
2091  * For WITH CHECK OPTIONs coming from views, we might be
2092  * able to provide the details on the row, depending on
2093  * the permissions on the relation (that is, if the user
2094  * could view it directly anyway). For RLS violations, we
2095  * don't include the data since we don't know if the user
2096  * should be able to view the tuple as that depends on the
2097  * USING policy.
2098  */
2099  case WCO_VIEW_CHECK:
2100  insertedCols = GetInsertedColumns(resultRelInfo, estate);
2101  updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2102  modifiedCols = bms_union(insertedCols, updatedCols);
2104  slot,
2105  tupdesc,
2106  modifiedCols,
2107  64);
2108 
2109  ereport(ERROR,
2110  (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
2111  errmsg("new row violates check option for view \"%s\"",
2112  wco->relname),
2113  val_desc ? errdetail("Failing row contains %s.",
2114  val_desc) : 0));
2115  break;
2116  case WCO_RLS_INSERT_CHECK:
2117  case WCO_RLS_UPDATE_CHECK:
2118  if (wco->polname != NULL)
2119  ereport(ERROR,
2120  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2121  errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2122  wco->polname, wco->relname)));
2123  else
2124  ereport(ERROR,
2125  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2126  errmsg("new row violates row-level security policy for table \"%s\"",
2127  wco->relname)));
2128  break;
2130  if (wco->polname != NULL)
2131  ereport(ERROR,
2132  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2133  errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2134  wco->polname, wco->relname)));
2135  else
2136  ereport(ERROR,
2137  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2138  errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2139  wco->relname)));
2140  break;
2141  default:
2142  elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
2143  break;
2144  }
2145  }
2146  }
2147 }
2148 
2149 /*
2150  * ExecBuildSlotValueDescription -- construct a string representing a tuple
2151  *
2152  * This is intentionally very similar to BuildIndexValueDescription, but
2153  * unlike that function, we truncate long field values (to at most maxfieldlen
2154  * bytes). That seems necessary here since heap field values could be very
2155  * long, whereas index entries typically aren't so wide.
2156  *
2157  * Also, unlike the case with index entries, we need to be prepared to ignore
2158  * dropped columns. We used to use the slot's tuple descriptor to decode the
2159  * data, but the slot's descriptor doesn't identify dropped columns, so we
2160  * now need to be passed the relation's descriptor.
2161  *
2162  * Note that, like BuildIndexValueDescription, if the user does not have
2163  * permission to view any of the columns involved, a NULL is returned. Unlike
2164  * BuildIndexValueDescription, if the user has access to view a subset of the
2165  * column involved, that subset will be returned with a key identifying which
2166  * columns they are.
2167  */
2168 static char *
2170  TupleTableSlot *slot,
2171  TupleDesc tupdesc,
2172  Bitmapset *modifiedCols,
2173  int maxfieldlen)
2174 {
2176  StringInfoData collist;
2177  bool write_comma = false;
2178  bool write_comma_collist = false;
2179  int i;
2180  AclResult aclresult;
2181  bool table_perm = false;
2182  bool any_perm = false;
2183 
2184  /*
2185  * Check if RLS is enabled and should be active for the relation; if so,
2186  * then don't return anything. Otherwise, go through normal permission
2187  * checks.
2188  */
2189  if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
2190  return NULL;
2191 
2192  initStringInfo(&buf);
2193 
2194  appendStringInfoChar(&buf, '(');
2195 
2196  /*
2197  * Check if the user has permissions to see the row. Table-level SELECT
2198  * allows access to all columns. If the user does not have table-level
2199  * SELECT then we check each column and include those the user has SELECT
2200  * rights on. Additionally, we always include columns the user provided
2201  * data for.
2202  */
2203  aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
2204  if (aclresult != ACLCHECK_OK)
2205  {
2206  /* Set up the buffer for the column list */
2207  initStringInfo(&collist);
2208  appendStringInfoChar(&collist, '(');
2209  }
2210  else
2211  table_perm = any_perm = true;
2212 
2213  /* Make sure the tuple is fully deconstructed */
2214  slot_getallattrs(slot);
2215 
2216  for (i = 0; i < tupdesc->natts; i++)
2217  {
2218  bool column_perm = false;
2219  char *val;
2220  int vallen;
2221 
2222  /* ignore dropped columns */
2223  if (tupdesc->attrs[i]->attisdropped)
2224  continue;
2225 
2226  if (!table_perm)
2227  {
2228  /*
2229  * No table-level SELECT, so need to make sure they either have
2230  * SELECT rights on the column or that they have provided the data
2231  * for the column. If not, omit this column from the error
2232  * message.
2233  */
2234  aclresult = pg_attribute_aclcheck(reloid, tupdesc->attrs[i]->attnum,
2235  GetUserId(), ACL_SELECT);
2236  if (bms_is_member(tupdesc->attrs[i]->attnum - FirstLowInvalidHeapAttributeNumber,
2237  modifiedCols) || aclresult == ACLCHECK_OK)
2238  {
2239  column_perm = any_perm = true;
2240 
2241  if (write_comma_collist)
2242  appendStringInfoString(&collist, ", ");
2243  else
2244  write_comma_collist = true;
2245 
2246  appendStringInfoString(&collist, NameStr(tupdesc->attrs[i]->attname));
2247  }
2248  }
2249 
2250  if (table_perm || column_perm)
2251  {
2252  if (slot->tts_isnull[i])
2253  val = "null";
2254  else
2255  {
2256  Oid foutoid;
2257  bool typisvarlena;
2258 
2259  getTypeOutputInfo(tupdesc->attrs[i]->atttypid,
2260  &foutoid, &typisvarlena);
2261  val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
2262  }
2263 
2264  if (write_comma)
2265  appendStringInfoString(&buf, ", ");
2266  else
2267  write_comma = true;
2268 
2269  /* truncate if needed */
2270  vallen = strlen(val);
2271  if (vallen <= maxfieldlen)
2272  appendStringInfoString(&buf, val);
2273  else
2274  {
2275  vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2276  appendBinaryStringInfo(&buf, val, vallen);
2277  appendStringInfoString(&buf, "...");
2278  }
2279  }
2280  }
2281 
2282  /* If we end up with zero columns being returned, then return NULL. */
2283  if (!any_perm)
2284  return NULL;
2285 
2286  appendStringInfoChar(&buf, ')');
2287 
2288  if (!table_perm)
2289  {
2290  appendStringInfoString(&collist, ") = ");
2291  appendStringInfoString(&collist, buf.data);
2292 
2293  return collist.data;
2294  }
2295 
2296  return buf.data;
2297 }
2298 
2299 
2300 /*
2301  * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2302  * given ResultRelInfo
2303  */
2306 {
2307  Bitmapset *keyCols;
2308  Bitmapset *updatedCols;
2309 
2310  /*
2311  * Compute lock mode to use. If columns that are part of the key have not
2312  * been modified, then we can use a weaker lock, allowing for better
2313  * concurrency.
2314  */
2315  updatedCols = GetUpdatedColumns(relinfo, estate);
2316  keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2318 
2319  if (bms_overlap(keyCols, updatedCols))
2320  return LockTupleExclusive;
2321 
2322  return LockTupleNoKeyExclusive;
2323 }
2324 
2325 /*
2326  * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2327  *
2328  * If no such struct, either return NULL or throw error depending on missing_ok
2329  */
2330 ExecRowMark *
2331 ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2332 {
2333  ListCell *lc;
2334 
2335  foreach(lc, estate->es_rowMarks)
2336  {
2337  ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
2338 
2339  if (erm->rti == rti)
2340  return erm;
2341  }
2342  if (!missing_ok)
2343  elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2344  return NULL;
2345 }
2346 
2347 /*
2348  * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2349  *
2350  * Inputs are the underlying ExecRowMark struct and the targetlist of the
2351  * input plan node (not planstate node!). We need the latter to find out
2352  * the column numbers of the resjunk columns.
2353  */
2356 {
2357  ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2358  char resname[32];
2359 
2360  aerm->rowmark = erm;
2361 
2362  /* Look up the resjunk columns associated with this rowmark */
2363  if (erm->markType != ROW_MARK_COPY)
2364  {
2365  /* need ctid for all methods other than COPY */
2366  snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
2367  aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2368  resname);
2369  if (!AttributeNumberIsValid(aerm->ctidAttNo))
2370  elog(ERROR, "could not find junk %s column", resname);
2371  }
2372  else
2373  {
2374  /* need wholerow if COPY */
2375  snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
2376  aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2377  resname);
2378  if (!AttributeNumberIsValid(aerm->wholeAttNo))
2379  elog(ERROR, "could not find junk %s column", resname);
2380  }
2381 
2382  /* if child rel, need tableoid */
2383  if (erm->rti != erm->prti)
2384  {
2385  snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2386  aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2387  resname);
2388  if (!AttributeNumberIsValid(aerm->toidAttNo))
2389  elog(ERROR, "could not find junk %s column", resname);
2390  }
2391 
2392  return aerm;
2393 }
2394 
2395 
2396 /*
2397  * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2398  * process the updated version under READ COMMITTED rules.
2399  *
2400  * See backend/executor/README for some info about how this works.
2401  */
2402 
2403 
2404 /*
2405  * Check a modified tuple to see if we want to process its updated version
2406  * under READ COMMITTED rules.
2407  *
2408  * estate - outer executor state data
2409  * epqstate - state for EvalPlanQual rechecking
2410  * relation - table containing tuple
2411  * rti - rangetable index of table containing tuple
2412  * lockmode - requested tuple lock mode
2413  * *tid - t_ctid from the outdated tuple (ie, next updated version)
2414  * priorXmax - t_xmax from the outdated tuple
2415  *
2416  * *tid is also an output parameter: it's modified to hold the TID of the
2417  * latest version of the tuple (note this may be changed even on failure)
2418  *
2419  * Returns a slot containing the new candidate update/delete tuple, or
2420  * NULL if we determine we shouldn't process the row.
2421  *
2422  * Note: properly, lockmode should be declared as enum LockTupleMode,
2423  * but we use "int" to avoid having to include heapam.h in executor.h.
2424  */
2426 EvalPlanQual(EState *estate, EPQState *epqstate,
2427  Relation relation, Index rti, int lockmode,
2428  ItemPointer tid, TransactionId priorXmax)
2429 {
2430  TupleTableSlot *slot;
2431  HeapTuple copyTuple;
2432 
2433  Assert(rti > 0);
2434 
2435  /*
2436  * Get and lock the updated version of the row; if fail, return NULL.
2437  */
2438  copyTuple = EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
2439  tid, priorXmax);
2440 
2441  if (copyTuple == NULL)
2442  return NULL;
2443 
2444  /*
2445  * For UPDATE/DELETE we have to return tid of actual row we're executing
2446  * PQ for.
2447  */
2448  *tid = copyTuple->t_self;
2449 
2450  /*
2451  * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
2452  */
2453  EvalPlanQualBegin(epqstate, estate);
2454 
2455  /*
2456  * Free old test tuple, if any, and store new tuple where relation's scan
2457  * node will see it
2458  */
2459  EvalPlanQualSetTuple(epqstate, rti, copyTuple);
2460 
2461  /*
2462  * Fetch any non-locked source rows
2463  */
2464  EvalPlanQualFetchRowMarks(epqstate);
2465 
2466  /*
2467  * Run the EPQ query. We assume it will return at most one tuple.
2468  */
2469  slot = EvalPlanQualNext(epqstate);
2470 
2471  /*
2472  * If we got a tuple, force the slot to materialize the tuple so that it
2473  * is not dependent on any local state in the EPQ query (in particular,
2474  * it's highly likely that the slot contains references to any pass-by-ref
2475  * datums that may be present in copyTuple). As with the next step, this
2476  * is to guard against early re-use of the EPQ query.
2477  */
2478  if (!TupIsNull(slot))
2479  (void) ExecMaterializeSlot(slot);
2480 
2481  /*
2482  * Clear out the test tuple. This is needed in case the EPQ query is
2483  * re-used to test a tuple for a different relation. (Not clear that can
2484  * really happen, but let's be safe.)
2485  */
2486  EvalPlanQualSetTuple(epqstate, rti, NULL);
2487 
2488  return slot;
2489 }
2490 
2491 /*
2492  * Fetch a copy of the newest version of an outdated tuple
2493  *
2494  * estate - executor state data
2495  * relation - table containing tuple
2496  * lockmode - requested tuple lock mode
2497  * wait_policy - requested lock wait policy
2498  * *tid - t_ctid from the outdated tuple (ie, next updated version)
2499  * priorXmax - t_xmax from the outdated tuple
2500  *
2501  * Returns a palloc'd copy of the newest tuple version, or NULL if we find
2502  * that there is no newest version (ie, the row was deleted not updated).
2503  * We also return NULL if the tuple is locked and the wait policy is to skip
2504  * such tuples.
2505  *
2506  * If successful, we have locked the newest tuple version, so caller does not
2507  * need to worry about it changing anymore.
2508  *
2509  * Note: properly, lockmode should be declared as enum LockTupleMode,
2510  * but we use "int" to avoid having to include heapam.h in executor.h.
2511  */
2512 HeapTuple
2513 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
2514  LockWaitPolicy wait_policy,
2515  ItemPointer tid, TransactionId priorXmax)
2516 {
2517  HeapTuple copyTuple = NULL;
2518  HeapTupleData tuple;
2519  SnapshotData SnapshotDirty;
2520 
2521  /*
2522  * fetch target tuple
2523  *
2524  * Loop here to deal with updated or busy tuples
2525  */
2526  InitDirtySnapshot(SnapshotDirty);
2527  tuple.t_self = *tid;
2528  for (;;)
2529  {
2530  Buffer buffer;
2531 
2532  if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2533  {
2534  HTSU_Result test;
2535  HeapUpdateFailureData hufd;
2536 
2537  /*
2538  * If xmin isn't what we're expecting, the slot must have been
2539  * recycled and reused for an unrelated tuple. This implies that
2540  * the latest version of the row was deleted, so we need do
2541  * nothing. (Should be safe to examine xmin without getting
2542  * buffer's content lock. We assume reading a TransactionId to be
2543  * atomic, and Xmin never changes in an existing tuple, except to
2544  * invalid or frozen, and neither of those can match priorXmax.)
2545  */
2547  priorXmax))
2548  {
2549  ReleaseBuffer(buffer);
2550  return NULL;
2551  }
2552 
2553  /* otherwise xmin should not be dirty... */
2554  if (TransactionIdIsValid(SnapshotDirty.xmin))
2555  elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2556 
2557  /*
2558  * If tuple is being updated by other transaction then we have to
2559  * wait for its commit/abort, or die trying.
2560  */
2561  if (TransactionIdIsValid(SnapshotDirty.xmax))
2562  {
2563  ReleaseBuffer(buffer);
2564  switch (wait_policy)
2565  {
2566  case LockWaitBlock:
2567  XactLockTableWait(SnapshotDirty.xmax,
2568  relation, &tuple.t_self,
2570  break;
2571  case LockWaitSkip:
2572  if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2573  return NULL; /* skip instead of waiting */
2574  break;
2575  case LockWaitError:
2576  if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2577  ereport(ERROR,
2578  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
2579  errmsg("could not obtain lock on row in relation \"%s\"",
2580  RelationGetRelationName(relation))));
2581  break;
2582  }
2583  continue; /* loop back to repeat heap_fetch */
2584  }
2585 
2586  /*
2587  * If tuple was inserted by our own transaction, we have to check
2588  * cmin against es_output_cid: cmin >= current CID means our
2589  * command cannot see the tuple, so we should ignore it. Otherwise
2590  * heap_lock_tuple() will throw an error, and so would any later
2591  * attempt to update or delete the tuple. (We need not check cmax
2592  * because HeapTupleSatisfiesDirty will consider a tuple deleted
2593  * by our transaction dead, regardless of cmax.) We just checked
2594  * that priorXmax == xmin, so we can test that variable instead of
2595  * doing HeapTupleHeaderGetXmin again.
2596  */
2597  if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2598  HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2599  {
2600  ReleaseBuffer(buffer);
2601  return NULL;
2602  }
2603 
2604  /*
2605  * This is a live tuple, so now try to lock it.
2606  */
2607  test = heap_lock_tuple(relation, &tuple,
2608  estate->es_output_cid,
2609  lockmode, wait_policy,
2610  false, &buffer, &hufd);
2611  /* We now have two pins on the buffer, get rid of one */
2612  ReleaseBuffer(buffer);
2613 
2614  switch (test)
2615  {
2616  case HeapTupleSelfUpdated:
2617 
2618  /*
2619  * The target tuple was already updated or deleted by the
2620  * current command, or by a later command in the current
2621  * transaction. We *must* ignore the tuple in the former
2622  * case, so as to avoid the "Halloween problem" of
2623  * repeated update attempts. In the latter case it might
2624  * be sensible to fetch the updated tuple instead, but
2625  * doing so would require changing heap_update and
2626  * heap_delete to not complain about updating "invisible"
2627  * tuples, which seems pretty scary (heap_lock_tuple will
2628  * not complain, but few callers expect
2629  * HeapTupleInvisible, and we're not one of them). So for
2630  * now, treat the tuple as deleted and do not process.
2631  */
2632  ReleaseBuffer(buffer);
2633  return NULL;
2634 
2635  case HeapTupleMayBeUpdated:
2636  /* successfully locked */
2637  break;
2638 
2639  case HeapTupleUpdated:
2640  ReleaseBuffer(buffer);
2642  ereport(ERROR,
2643  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2644  errmsg("could not serialize access due to concurrent update")));
2645 
2646  /* Should not encounter speculative tuple on recheck */
2648  if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2649  {
2650  /* it was updated, so look at the updated version */
2651  tuple.t_self = hufd.ctid;
2652  /* updated row should have xmin matching this xmax */
2653  priorXmax = hufd.xmax;
2654  continue;
2655  }
2656  /* tuple was deleted, so give up */
2657  return NULL;
2658 
2659  case HeapTupleWouldBlock:
2660  ReleaseBuffer(buffer);
2661  return NULL;
2662 
2663  case HeapTupleInvisible:
2664  elog(ERROR, "attempted to lock invisible tuple");
2665 
2666  default:
2667  ReleaseBuffer(buffer);
2668  elog(ERROR, "unrecognized heap_lock_tuple status: %u",
2669  test);
2670  return NULL; /* keep compiler quiet */
2671  }
2672 
2673  /*
2674  * We got tuple - now copy it for use by recheck query.
2675  */
2676  copyTuple = heap_copytuple(&tuple);
2677  ReleaseBuffer(buffer);
2678  break;
2679  }
2680 
2681  /*
2682  * If the referenced slot was actually empty, the latest version of
2683  * the row must have been deleted, so we need do nothing.
2684  */
2685  if (tuple.t_data == NULL)
2686  {
2687  ReleaseBuffer(buffer);
2688  return NULL;
2689  }
2690 
2691  /*
2692  * As above, if xmin isn't what we're expecting, do nothing.
2693  */
2695  priorXmax))
2696  {
2697  ReleaseBuffer(buffer);
2698  return NULL;
2699  }
2700 
2701  /*
2702  * If we get here, the tuple was found but failed SnapshotDirty.
2703  * Assuming the xmin is either a committed xact or our own xact (as it
2704  * certainly should be if we're trying to modify the tuple), this must
2705  * mean that the row was updated or deleted by either a committed xact
2706  * or our own xact. If it was deleted, we can ignore it; if it was
2707  * updated then chain up to the next version and repeat the whole
2708  * process.
2709  *
2710  * As above, it should be safe to examine xmax and t_ctid without the
2711  * buffer content lock, because they can't be changing.
2712  */
2713  if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2714  {
2715  /* deleted, so forget about it */
2716  ReleaseBuffer(buffer);
2717  return NULL;
2718  }
2719 
2720  /* updated, so look at the updated row */
2721  tuple.t_self = tuple.t_data->t_ctid;
2722  /* updated row should have xmin matching this xmax */
2723  priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
2724  ReleaseBuffer(buffer);
2725  /* loop back to fetch next in chain */
2726  }
2727 
2728  /*
2729  * Return the copied tuple
2730  */
2731  return copyTuple;
2732 }
2733 
2734 /*
2735  * EvalPlanQualInit -- initialize during creation of a plan state node
2736  * that might need to invoke EPQ processing.
2737  *
2738  * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2739  * with EvalPlanQualSetPlan.
2740  */
2741 void
2742 EvalPlanQualInit(EPQState *epqstate, EState *estate,
2743  Plan *subplan, List *auxrowmarks, int epqParam)
2744 {
2745  /* Mark the EPQ state inactive */
2746  epqstate->estate = NULL;
2747  epqstate->planstate = NULL;
2748  epqstate->origslot = NULL;
2749  /* ... and remember data that EvalPlanQualBegin will need */
2750  epqstate->plan = subplan;
2751  epqstate->arowMarks = auxrowmarks;
2752  epqstate->epqParam = epqParam;
2753 }
2754 
2755 /*
2756  * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2757  *
2758  * We need this so that ModifyTable can deal with multiple subplans.
2759  */
2760 void
2761 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2762 {
2763  /* If we have a live EPQ query, shut it down */
2764  EvalPlanQualEnd(epqstate);
2765  /* And set/change the plan pointer */
2766  epqstate->plan = subplan;
2767  /* The rowmarks depend on the plan, too */
2768  epqstate->arowMarks = auxrowmarks;
2769 }
2770 
2771 /*
2772  * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
2773  *
2774  * NB: passed tuple must be palloc'd; it may get freed later
2775  */
2776 void
2778 {
2779  EState *estate = epqstate->estate;
2780 
2781  Assert(rti > 0);
2782 
2783  /*
2784  * free old test tuple, if any, and store new tuple where relation's scan
2785  * node will see it
2786  */
2787  if (estate->es_epqTuple[rti - 1] != NULL)
2788  heap_freetuple(estate->es_epqTuple[rti - 1]);
2789  estate->es_epqTuple[rti - 1] = tuple;
2790  estate->es_epqTupleSet[rti - 1] = true;
2791 }
2792 
2793 /*
2794  * Fetch back the current test tuple (if any) for the specified RTI
2795  */
2796 HeapTuple
2798 {
2799  EState *estate = epqstate->estate;
2800 
2801  Assert(rti > 0);
2802 
2803  return estate->es_epqTuple[rti - 1];
2804 }
2805 
2806 /*
2807  * Fetch the current row values for any non-locked relations that need
2808  * to be scanned by an EvalPlanQual operation. origslot must have been set
2809  * to contain the current result row (top-level row) that we need to recheck.
2810  */
2811 void
2813 {
2814  ListCell *l;
2815 
2816  Assert(epqstate->origslot != NULL);
2817 
2818  foreach(l, epqstate->arowMarks)
2819  {
2820  ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
2821  ExecRowMark *erm = aerm->rowmark;
2822  Datum datum;
2823  bool isNull;
2824  HeapTupleData tuple;
2825 
2827  elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2828 
2829  /* clear any leftover test tuple for this rel */
2830  EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
2831 
2832  /* if child rel, must check whether it produced this row */
2833  if (erm->rti != erm->prti)
2834  {
2835  Oid tableoid;
2836 
2837  datum = ExecGetJunkAttribute(epqstate->origslot,
2838  aerm->toidAttNo,
2839  &isNull);
2840  /* non-locked rels could be on the inside of outer joins */
2841  if (isNull)
2842  continue;
2843  tableoid = DatumGetObjectId(datum);
2844 
2845  Assert(OidIsValid(erm->relid));
2846  if (tableoid != erm->relid)
2847  {
2848  /* this child is inactive right now */
2849  continue;
2850  }
2851  }
2852 
2853  if (erm->markType == ROW_MARK_REFERENCE)
2854  {
2855  HeapTuple copyTuple;
2856 
2857  Assert(erm->relation != NULL);
2858 
2859  /* fetch the tuple's ctid */
2860  datum = ExecGetJunkAttribute(epqstate->origslot,
2861  aerm->ctidAttNo,
2862  &isNull);
2863  /* non-locked rels could be on the inside of outer joins */
2864  if (isNull)
2865  continue;
2866 
2867  /* fetch requests on foreign tables must be passed to their FDW */
2868  if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2869  {
2870  FdwRoutine *fdwroutine;
2871  bool updated = false;
2872 
2873  fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2874  /* this should have been checked already, but let's be safe */
2875  if (fdwroutine->RefetchForeignRow == NULL)
2876  ereport(ERROR,
2877  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2878  errmsg("cannot lock rows in foreign table \"%s\"",
2880  copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
2881  erm,
2882  datum,
2883  &updated);
2884  if (copyTuple == NULL)
2885  elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2886 
2887  /*
2888  * Ideally we'd insist on updated == false here, but that
2889  * assumes that FDWs can track that exactly, which they might
2890  * not be able to. So just ignore the flag.
2891  */
2892  }
2893  else
2894  {
2895  /* ordinary table, fetch the tuple */
2896  Buffer buffer;
2897 
2898  tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
2899  if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
2900  false, NULL))
2901  elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2902 
2903  /* successful, copy tuple */
2904  copyTuple = heap_copytuple(&tuple);
2905  ReleaseBuffer(buffer);
2906  }
2907 
2908  /* store tuple */
2909  EvalPlanQualSetTuple(epqstate, erm->rti, copyTuple);
2910  }
2911  else
2912  {
2913  HeapTupleHeader td;
2914 
2915  Assert(erm->markType == ROW_MARK_COPY);
2916 
2917  /* fetch the whole-row Var for the relation */
2918  datum = ExecGetJunkAttribute(epqstate->origslot,
2919  aerm->wholeAttNo,
2920  &isNull);
2921  /* non-locked rels could be on the inside of outer joins */
2922  if (isNull)
2923  continue;
2924  td = DatumGetHeapTupleHeader(datum);
2925 
2926  /* build a temporary HeapTuple control structure */
2928  tuple.t_data = td;
2929  /* relation might be a foreign table, if so provide tableoid */
2930  tuple.t_tableOid = erm->relid;
2931  /* also copy t_ctid in case there's valid data there */
2932  tuple.t_self = td->t_ctid;
2933 
2934  /* copy and store tuple */
2935  EvalPlanQualSetTuple(epqstate, erm->rti,
2936  heap_copytuple(&tuple));
2937  }
2938  }
2939 }
2940 
2941 /*
2942  * Fetch the next row (if any) from EvalPlanQual testing
2943  *
2944  * (In practice, there should never be more than one row...)
2945  */
2948 {
2949  MemoryContext oldcontext;
2950  TupleTableSlot *slot;
2951 
2952  oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
2953  slot = ExecProcNode(epqstate->planstate);
2954  MemoryContextSwitchTo(oldcontext);
2955 
2956  return slot;
2957 }
2958 
2959 /*
2960  * Initialize or reset an EvalPlanQual state tree
2961  */
2962 void
2963 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
2964 {
2965  EState *estate = epqstate->estate;
2966 
2967  if (estate == NULL)
2968  {
2969  /* First time through, so create a child EState */
2970  EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
2971  }
2972  else
2973  {
2974  /*
2975  * We already have a suitable child EPQ tree, so just reset it.
2976  */
2977  int rtsize = list_length(parentestate->es_range_table);
2978  PlanState *planstate = epqstate->planstate;
2979 
2980  MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
2981 
2982  /* Recopy current values of parent parameters */
2983  if (parentestate->es_plannedstmt->nParamExec > 0)
2984  {
2985  int i = parentestate->es_plannedstmt->nParamExec;
2986 
2987  while (--i >= 0)
2988  {
2989  /* copy value if any, but not execPlan link */
2990  estate->es_param_exec_vals[i].value =
2991  parentestate->es_param_exec_vals[i].value;
2992  estate->es_param_exec_vals[i].isnull =
2993  parentestate->es_param_exec_vals[i].isnull;
2994  }
2995  }
2996 
2997  /*
2998  * Mark child plan tree as needing rescan at all scan nodes. The
2999  * first ExecProcNode will take care of actually doing the rescan.
3000  */
3001  planstate->chgParam = bms_add_member(planstate->chgParam,
3002  epqstate->epqParam);
3003  }
3004 }
3005 
3006 /*
3007  * Start execution of an EvalPlanQual plan tree.
3008  *
3009  * This is a cut-down version of ExecutorStart(): we copy some state from
3010  * the top-level estate rather than initializing it fresh.
3011  */
3012 static void
3013 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
3014 {
3015  EState *estate;
3016  int rtsize;
3017  MemoryContext oldcontext;
3018  ListCell *l;
3019 
3020  rtsize = list_length(parentestate->es_range_table);
3021 
3022  epqstate->estate = estate = CreateExecutorState();
3023 
3024  oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3025 
3026  /*
3027  * Child EPQ EStates share the parent's copy of unchanging state such as
3028  * the snapshot, rangetable, result-rel info, and external Param info.
3029  * They need their own copies of local state, including a tuple table,
3030  * es_param_exec_vals, etc.
3031  *
3032  * The ResultRelInfo array management is trickier than it looks. We
3033  * create a fresh array for the child but copy all the content from the
3034  * parent. This is because it's okay for the child to share any
3035  * per-relation state the parent has already created --- but if the child
3036  * sets up any ResultRelInfo fields, such as its own junkfilter, that
3037  * state must *not* propagate back to the parent. (For one thing, the
3038  * pointed-to data is in a memory context that won't last long enough.)
3039  */
3041  estate->es_snapshot = parentestate->es_snapshot;
3042  estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
3043  estate->es_range_table = parentestate->es_range_table;
3044  estate->es_plannedstmt = parentestate->es_plannedstmt;
3045  estate->es_junkFilter = parentestate->es_junkFilter;
3046  estate->es_output_cid = parentestate->es_output_cid;
3047  if (parentestate->es_num_result_relations > 0)
3048  {
3049  int numResultRelations = parentestate->es_num_result_relations;
3050  ResultRelInfo *resultRelInfos;
3051 
3052  resultRelInfos = (ResultRelInfo *)
3053  palloc(numResultRelations * sizeof(ResultRelInfo));
3054  memcpy(resultRelInfos, parentestate->es_result_relations,
3055  numResultRelations * sizeof(ResultRelInfo));
3056  estate->es_result_relations = resultRelInfos;
3057  estate->es_num_result_relations = numResultRelations;
3058  }
3059  /* es_result_relation_info must NOT be copied */
3060  /* es_trig_target_relations must NOT be copied */
3061  estate->es_rowMarks = parentestate->es_rowMarks;
3062  estate->es_top_eflags = parentestate->es_top_eflags;
3063  estate->es_instrument = parentestate->es_instrument;
3064  /* es_auxmodifytables must NOT be copied */
3065 
3066  /*
3067  * The external param list is simply shared from parent. The internal
3068  * param workspace has to be local state, but we copy the initial values
3069  * from the parent, so as to have access to any param values that were
3070  * already set from other parts of the parent's plan tree.
3071  */
3072  estate->es_param_list_info = parentestate->es_param_list_info;
3073  if (parentestate->es_plannedstmt->nParamExec > 0)
3074  {
3075  int i = parentestate->es_plannedstmt->nParamExec;
3076 
3077  estate->es_param_exec_vals = (ParamExecData *)
3078  palloc0(i * sizeof(ParamExecData));
3079  while (--i >= 0)
3080  {
3081  /* copy value if any, but not execPlan link */
3082  estate->es_param_exec_vals[i].value =
3083  parentestate->es_param_exec_vals[i].value;
3084  estate->es_param_exec_vals[i].isnull =
3085  parentestate->es_param_exec_vals[i].isnull;
3086  }
3087  }
3088 
3089  /*
3090  * Each EState must have its own es_epqScanDone state, but if we have
3091  * nested EPQ checks they should share es_epqTuple arrays. This allows
3092  * sub-rechecks to inherit the values being examined by an outer recheck.
3093  */
3094  estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
3095  if (parentestate->es_epqTuple != NULL)
3096  {
3097  estate->es_epqTuple = parentestate->es_epqTuple;
3098  estate->es_epqTupleSet = parentestate->es_epqTupleSet;
3099  }
3100  else
3101  {
3102  estate->es_epqTuple = (HeapTuple *)
3103  palloc0(rtsize * sizeof(HeapTuple));
3104  estate->es_epqTupleSet = (bool *)
3105  palloc0(rtsize * sizeof(bool));
3106  }
3107 
3108  /*
3109  * Each estate also has its own tuple table.
3110  */
3111  estate->es_tupleTable = NIL;
3112 
3113  /*
3114  * Initialize private state information for each SubPlan. We must do this
3115  * before running ExecInitNode on the main query tree, since
3116  * ExecInitSubPlan expects to be able to find these entries. Some of the
3117  * SubPlans might not be used in the part of the plan tree we intend to
3118  * run, but since it's not easy to tell which, we just initialize them
3119  * all.
3120  */
3121  Assert(estate->es_subplanstates == NIL);
3122  foreach(l, parentestate->es_plannedstmt->subplans)
3123  {
3124  Plan *subplan = (Plan *) lfirst(l);
3125  PlanState *subplanstate;
3126 
3127  subplanstate = ExecInitNode(subplan, estate, 0);
3128  estate->es_subplanstates = lappend(estate->es_subplanstates,
3129  subplanstate);
3130  }
3131 
3132  /*
3133  * Initialize the private state information for all the nodes in the part
3134  * of the plan tree we need to run. This opens files, allocates storage
3135  * and leaves us ready to start processing tuples.
3136  */
3137  epqstate->planstate = ExecInitNode(planTree, estate, 0);
3138 
3139  MemoryContextSwitchTo(oldcontext);
3140 }
3141 
3142 /*
3143  * EvalPlanQualEnd -- shut down at termination of parent plan state node,
3144  * or if we are done with the current EPQ child.
3145  *
3146  * This is a cut-down version of ExecutorEnd(); basically we want to do most
3147  * of the normal cleanup, but *not* close result relations (which we are
3148  * just sharing from the outer query). We do, however, have to close any
3149  * trigger target relations that got opened, since those are not shared.
3150  * (There probably shouldn't be any of the latter, but just in case...)
3151  */
3152 void
3154 {
3155  EState *estate = epqstate->estate;
3156  MemoryContext oldcontext;
3157  ListCell *l;
3158 
3159  if (estate == NULL)
3160  return; /* idle, so nothing to do */
3161 
3162  oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3163 
3164  ExecEndNode(epqstate->planstate);
3165 
3166  foreach(l, estate->es_subplanstates)
3167  {
3168  PlanState *subplanstate = (PlanState *) lfirst(l);
3169 
3170  ExecEndNode(subplanstate);
3171  }
3172 
3173  /* throw away the per-estate tuple table */
3174  ExecResetTupleTable(estate->es_tupleTable, false);
3175 
3176  /* close any trigger target relations attached to this EState */
3177  ExecCleanUpTriggerState(estate);
3178 
3179  MemoryContextSwitchTo(oldcontext);
3180 
3181  FreeExecutorState(estate);
3182 
3183  /* Mark EPQState idle */
3184  epqstate->estate = NULL;
3185  epqstate->planstate = NULL;
3186  epqstate->origslot = NULL;
3187 }
3188 
3189 /*
3190  * ExecSetupPartitionTupleRouting - set up information needed during
3191  * tuple routing for partitioned tables
3192  *
3193  * Output arguments:
3194  * 'pd' receives an array of PartitionDispatch objects with one entry for
3195  * every partitioned table in the partition tree
3196  * 'partitions' receives an array of ResultRelInfo objects with one entry for
3197  * every leaf partition in the partition tree
3198  * 'tup_conv_maps' receives an array of TupleConversionMap objects with one
3199  * entry for every leaf partition (required to convert input tuple based
3200  * on the root table's rowtype to a leaf partition's rowtype after tuple
3201  * routing is done
3202  * 'partition_tuple_slot' receives a standalone TupleTableSlot to be used
3203  * to manipulate any given leaf partition's rowtype after that partition
3204  * is chosen by tuple-routing.
3205  * 'num_parted' receives the number of partitioned tables in the partition
3206  * tree (= the number of entries in the 'pd' output array)
3207  * 'num_partitions' receives the number of leaf partitions in the partition
3208  * tree (= the number of entries in the 'partitions' and 'tup_conv_maps'
3209  * output arrays
3210  *
3211  * Note that all the relations in the partition tree are locked using the
3212  * RowExclusiveLock mode upon return from this function.
3213  */
3214 void
3216  PartitionDispatch **pd,
3217  ResultRelInfo **partitions,
3218  TupleConversionMap ***tup_conv_maps,
3219  TupleTableSlot **partition_tuple_slot,
3220  int *num_parted, int *num_partitions)
3221 {
3222  TupleDesc tupDesc = RelationGetDescr(rel);
3223  List *leaf_parts;
3224  ListCell *cell;
3225  int i;
3226  ResultRelInfo *leaf_part_rri;
3227 
3228  /* Get the tuple-routing information and lock partitions */
3229  *pd = RelationGetPartitionDispatchInfo(rel, RowExclusiveLock, num_parted,
3230  &leaf_parts);
3231  *num_partitions = list_length(leaf_parts);
3232  *partitions = (ResultRelInfo *) palloc(*num_partitions *
3233  sizeof(ResultRelInfo));
3234  *tup_conv_maps = (TupleConversionMap **) palloc0(*num_partitions *
3235  sizeof(TupleConversionMap *));
3236 
3237  /*
3238  * Initialize an empty slot that will be used to manipulate tuples of any
3239  * given partition's rowtype. It is attached to the caller-specified node
3240  * (such as ModifyTableState) and released when the node finishes
3241  * processing.
3242  */
3243  *partition_tuple_slot = MakeTupleTableSlot();
3244 
3245  leaf_part_rri = *partitions;
3246  i = 0;
3247  foreach(cell, leaf_parts)
3248  {
3249  Relation partrel;
3250  TupleDesc part_tupdesc;
3251 
3252  /*
3253  * We locked all the partitions above including the leaf partitions.
3254  * Note that each of the relations in *partitions are eventually
3255  * closed by the caller.
3256  */
3257  partrel = heap_open(lfirst_oid(cell), NoLock);
3258  part_tupdesc = RelationGetDescr(partrel);
3259 
3260  /*
3261  * Verify result relation is a valid target for the current operation.
3262  */
3263  CheckValidResultRel(partrel, CMD_INSERT);
3264 
3265  /*
3266  * Save a tuple conversion map to convert a tuple routed to this
3267  * partition from the parent's type to the partition's.
3268  */
3269  (*tup_conv_maps)[i] = convert_tuples_by_name(tupDesc, part_tupdesc,
3270  gettext_noop("could not convert row type"));
3271 
3272  InitResultRelInfo(leaf_part_rri,
3273  partrel,
3274  1, /* dummy */
3275  rel,
3276  0);
3277 
3278  /*
3279  * Open partition indices (remember we do not support ON CONFLICT in
3280  * case of partitioned tables, so we do not need support information
3281  * for speculative insertion)
3282  */
3283  if (leaf_part_rri->ri_RelationDesc->rd_rel->relhasindex &&
3284  leaf_part_rri->ri_IndexRelationDescs == NULL)
3285  ExecOpenIndices(leaf_part_rri, false);
3286 
3287  leaf_part_rri++;
3288  i++;
3289  }
3290 }
3291 
3292 /*
3293  * ExecFindPartition -- Find a leaf partition in the partition tree rooted
3294  * at parent, for the heap tuple contained in *slot
3295  *
3296  * estate must be non-NULL; we'll need it to compute any expressions in the
3297  * partition key(s)
3298  *
3299  * If no leaf partition is found, this routine errors out with the appropriate
3300  * error message, else it returns the leaf partition sequence number returned
3301  * by get_partition_for_tuple() unchanged.
3302  */
3303 int
3305  TupleTableSlot *slot, EState *estate)
3306 {
3307  int result;
3308  PartitionDispatchData *failed_at;
3309  TupleTableSlot *failed_slot;
3310 
3311  /*
3312  * First check the root table's partition constraint, if any. No point in
3313  * routing the tuple it if it doesn't belong in the root table itself.
3314  */
3315  if (resultRelInfo->ri_PartitionCheck)
3316  ExecPartitionCheck(resultRelInfo, slot, estate);
3317 
3318  result = get_partition_for_tuple(pd, slot, estate,
3319  &failed_at, &failed_slot);
3320  if (result < 0)
3321  {
3322  Relation failed_rel;
3323  Datum key_values[PARTITION_MAX_KEYS];
3324  bool key_isnull[PARTITION_MAX_KEYS];
3325  char *val_desc;
3326  ExprContext *ecxt = GetPerTupleExprContext(estate);
3327 
3328  failed_rel = failed_at->reldesc;
3329  ecxt->ecxt_scantuple = failed_slot;
3330  FormPartitionKeyDatum(failed_at, failed_slot, estate,
3331  key_values, key_isnull);
3332  val_desc = ExecBuildSlotPartitionKeyDescription(failed_rel,
3333  key_values,
3334  key_isnull,
3335  64);
3336  Assert(OidIsValid(RelationGetRelid(failed_rel)));
3337  ereport(ERROR,
3338  (errcode(ERRCODE_CHECK_VIOLATION),
3339  errmsg("no partition of relation \"%s\" found for row",
3340  RelationGetRelationName(failed_rel)),
3341  val_desc ? errdetail("Partition key of the failing row contains %s.", val_desc) : 0));
3342  }
3343 
3344  return result;
3345 }
3346 
3347 /*
3348  * BuildSlotPartitionKeyDescription
3349  *
3350  * This works very much like BuildIndexValueDescription() and is currently
3351  * used for building error messages when ExecFindPartition() fails to find
3352  * partition for a row.
3353  */
3354 static char *
3356  Datum *values,
3357  bool *isnull,
3358  int maxfieldlen)
3359 {
3362  int partnatts = get_partition_natts(key);
3363  int i;
3364  Oid relid = RelationGetRelid(rel);
3365  AclResult aclresult;
3366 
3367  if (check_enable_rls(relid, InvalidOid, true) == RLS_ENABLED)
3368  return NULL;
3369 
3370  /* If the user has table-level access, just go build the description. */
3371  aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_SELECT);
3372  if (aclresult != ACLCHECK_OK)
3373  {
3374  /*
3375  * Step through the columns of the partition key and make sure the
3376  * user has SELECT rights on all of them.
3377  */
3378  for (i = 0; i < partnatts; i++)
3379  {
3380  AttrNumber attnum = get_partition_col_attnum(key, i);
3381 
3382  /*
3383  * If this partition key column is an expression, we return no
3384  * detail rather than try to figure out what column(s) the
3385  * expression includes and if the user has SELECT rights on them.
3386  */
3387  if (attnum == InvalidAttrNumber ||
3388  pg_attribute_aclcheck(relid, attnum, GetUserId(),
3389  ACL_SELECT) != ACLCHECK_OK)
3390  return NULL;
3391  }
3392  }
3393 
3394  initStringInfo(&buf);
3395  appendStringInfo(&buf, "(%s) = (",
3396  pg_get_partkeydef_columns(relid, true));
3397 
3398  for (i = 0; i < partnatts; i++)
3399  {
3400  char *val;
3401  int vallen;
3402 
3403  if (isnull[i])
3404  val = "null";
3405  else
3406  {
3407  Oid foutoid;
3408  bool typisvarlena;
3409 
3411  &foutoid, &typisvarlena);
3412  val = OidOutputFunctionCall(foutoid, values[i]);
3413  }
3414 
3415  if (i > 0)
3416  appendStringInfoString(&buf, ", ");
3417 
3418  /* truncate if needed */
3419  vallen = strlen(val);
3420  if (vallen <= maxfieldlen)
3421  appendStringInfoString(&buf, val);
3422  else
3423  {
3424  vallen = pg_mbcliplen(val, vallen, maxfieldlen);
3425  appendBinaryStringInfo(&buf, val, vallen);
3426  appendStringInfoString(&buf, "...");
3427  }
3428  }
3429 
3430  appendStringInfoChar(&buf, ')');
3431 
3432  return buf.data;
3433 }
bool(* ExecutorCheckPerms_hook_type)(List *, bool)
Definition: executor.h:88
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
#define GetUpdatedColumns(relinfo, estate)
Definition: execMain.c:117
ExecForeignDelete_function ExecForeignDelete
Definition: fdwapi.h:199
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:608
int ri_NumIndices
Definition: execnodes.h:357
#define NIL
Definition: pg_list.h:69
void InstrStopNode(Instrumentation *instr, double nTuples)
Definition: instrument.c:80
TupleTableSlot * ExecStoreTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree)
Definition: execTuples.c:320
JunkFilter * ri_junkFilter
Definition: execnodes.h:396
void(* rShutdown)(DestReceiver *self)
Definition: dest.h:124
Definition: fmgr.h:56
HeapTuple * es_epqTuple
Definition: execnodes.h:503
JunkFilter * es_junkFilter
Definition: execnodes.h:435
void standard_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count, bool execute_once)
Definition: execMain.c:309
void InitResultRelInfo(ResultRelInfo *resultRelInfo, Relation resultRelationDesc, Index resultRelationIndex, Relation partition_root, int instrument_options)
Definition: execMain.c:1299
void * stringToNode(char *str)
Definition: read.c:38
void(* rStartup)(DestReceiver *self, int operation, TupleDesc typeinfo)
Definition: dest.h:121
Relation ri_RelationDesc
Definition: execnodes.h:354
TupleTableSlot * ExecProcNode(PlanState *node)
Definition: execProcnode.c:398
TupleTableSlot * ExecInitExtraTupleSlot(EState *estate)
Definition: execTuples.c:852
char * pg_get_partkeydef_columns(Oid relid, bool pretty)
Definition: ruleutils.c:1569
int errhint(const char *fmt,...)
Definition: elog.c:987
void ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:2042
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:180
void getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena)
Definition: lsyscache.c:2632
RowMarkType markType
Definition: plannodes.h:1007
static void ExecPostprocessPlan(EState *estate)
Definition: execMain.c:1523
char * ccname
Definition: tupdesc.h:30
EState * estate
Definition: execdesc.h:48
bool tdhasoid
Definition: tupdesc.h:79
CommandId es_output_cid
Definition: execnodes.h:438
static void test(void)
void PreventCommandIfParallelMode(const char *cmdname)
Definition: utility.c:254
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:397
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:863
AclResult pg_attribute_aclcheck(Oid table_oid, AttrNumber attnum, Oid roleid, AclMode mode)
Definition: aclchk.c:4308
#define ResetPerTupleExprContext(estate)
Definition: executor.h:465
AttrNumber ExecFindJunkAttributeInTlist(List *targetlist, const char *attrName)
Definition: execJunk.c:221
#define RelationGetDescr(relation)
Definition: rel.h:428
List * nonleafResultRelations
Definition: plannodes.h:72
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:773
Oid GetUserId(void)
Definition: miscinit.c:283
TupleTableSlot * es_trig_newtup_slot
Definition: execnodes.h:459
Oid es_lastoid
Definition: execnodes.h:475
HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, HeapUpdateFailureData *hufd)
Definition: heapam.c:4540
bool ExecShutdownNode(PlanState *node)
Definition: execProcnode.c:855
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:654
void EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
Definition: execMain.c:2777
ConstrCheck * check
Definition: tupdesc.h:40
Relation relation
Definition: execnodes.h:531
bool MatViewIncrementalMaintenanceIsEnabled(void)
Definition: matview.c:860
CommandId HeapTupleHeaderGetCmin(HeapTupleHeader tup)
Definition: combocid.c:105
bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf, Relation stats_relation)
Definition: heapam.c:1862
ExecForeignInsert_function ExecForeignInsert
Definition: fdwapi.h:197
CommandDest mydest
Definition: dest.h:128
ExprState * ExecPrepareCheck(List *qual, EState *estate)
Definition: execExpr.c:488
#define DatumGetObjectId(X)
Definition: postgres.h:506
Relation ri_PartitionRoot
Definition: execnodes.h:414
HeapTuple EvalPlanQualFetch(EState *estate, Relation relation, int lockmode, LockWaitPolicy wait_policy, ItemPointer tid, TransactionId priorXmax)
Definition: execMain.c:2513
ExprState * ri_PartitionCheckExpr
Definition: execnodes.h:411
TupleTableSlot * EvalPlanQual(EState *estate, EPQState *epqstate, Relation relation, Index rti, int lockmode, ItemPointer tid, TransactionId priorXmax)
Definition: execMain.c:2426
TupleTableSlot * MakeTupleTableSlot(void)
Definition: execTuples.c:111
void EvalPlanQualFetchRowMarks(EPQState *epqstate)
Definition: execMain.c:2812
void standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
Definition: execMain.c:155
void ExecReScan(PlanState *node)
Definition: execAmi.c:75
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:937
Oid get_rel_namespace(Oid relid)
Definition: lsyscache.c:1750
void(* ExecutorEnd_hook_type)(QueryDesc *queryDesc)
Definition: executor.h:84
void ExecutorStart(QueryDesc *queryDesc, int eflags)
Definition: execMain.c:146
Form_pg_attribute * attrs
Definition: tupdesc.h:74
#define RELKIND_MATVIEW
Definition: pg_class.h:165
void ExecConstraints(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:1913
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
#define IsolationUsesXactSnapshot()
Definition: xact.h:43
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:423
#define AccessShareLock
Definition: lockdefs.h:36
Instrumentation * ri_TrigInstrument
Definition: execnodes.h:375
PlannedStmt * es_plannedstmt
Definition: execnodes.h:432
#define InvalidBuffer
Definition: buf.h:25
#define gettext_noop(x)
Definition: c.h:139
void(* ExecutorFinish_hook_type)(QueryDesc *queryDesc)
Definition: executor.h:80
Definition: nodes.h:509
Snapshot es_crosscheck_snapshot
Definition: execnodes.h:430
int errcode(int sqlerrcode)
Definition: elog.c:575
void standard_ExecutorEnd(QueryDesc *queryDesc)
Definition: execMain.c:469
bool ermActive
Definition: execnodes.h:539
Instrumentation * InstrAlloc(int n, int instrument_options)
Definition: instrument.c:30
Index prti
Definition: plannodes.h:1005
LockWaitPolicy waitPolicy
Definition: execnodes.h:538
#define PARTITION_MAX_KEYS
#define MemSet(start, val, len)
Definition: c.h:857
int get_partition_for_tuple(PartitionDispatch *pd, TupleTableSlot *slot, EState *estate, PartitionDispatchData **failed_at, TupleTableSlot **failed_slot)
Definition: partition.c:1936
Snapshot es_snapshot
Definition: execnodes.h:429
Datum * tts_values
Definition: tuptable.h:125
return result
Definition: formatting.c:1633
#define EXEC_FLAG_WITH_NO_DATA
Definition: executor.h:65
int snprintf(char *str, size_t count, const char *fmt,...) pg_attribute_printf(3
#define FirstLowInvalidHeapAttributeNumber
Definition: sysattr.h:28
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
Definition: execMain.c:2761
AclMode requiredPerms
Definition: parsenodes.h:1039
#define heap_close(r, l)
Definition: heapam.h:97
LockClauseStrength strength
Definition: execnodes.h:537
EState * state
Definition: execnodes.h:834
List * es_range_table
Definition: execnodes.h:431
Form_pg_class rd_rel
Definition: rel.h:114
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1372
unsigned int Oid
Definition: postgres_ext.h:31
static bool ExecQual(ExprState *state, ExprContext *econtext)
Definition: executor.h:346
Index rowmarkId
Definition: plannodes.h:1006
#define ScanDirectionIsNoMovement(direction)
Definition: sdir.h:48
LockWaitPolicy waitPolicy
Definition: plannodes.h:1010
static void CheckValidRowMarkRel(Relation rel, RowMarkType markType)
Definition: execMain.c:1234
AclMode pg_class_aclmask(Oid table_oid, Oid roleid, AclMode mask, AclMaskHow how)
Definition: aclchk.c:3622
#define OidIsValid(objectId)
Definition: c.h:538
ExprState * ExecPrepareExpr(Expr *node, EState *estate)
Definition: execExpr.c:437
#define DatumGetHeapTupleHeader(X)
Definition: fmgr.h:259
#define RowMarkRequiresRowShareLock(marktype)
Definition: plannodes.h:960
int natts
Definition: tupdesc.h:73
PlanState * planstate
Definition: execnodes.h:896
ScanDirection es_direction
Definition: execnodes.h:428
void standard_ExecutorFinish(QueryDesc *queryDesc)
Definition: execMain.c:409
void EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
Definition: execMain.c:2963
Index ri_RangeTableIndex
Definition: execnodes.h:351
TupleTableSlot * EvalPlanQualNext(EPQState *epqstate)
Definition: execMain.c:2947
struct Plan * planTree
Definition: plannodes.h:61
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:607
Snapshot snapshot
Definition: execdesc.h:39
int instrument_options
Definition: execdesc.h:44
void ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
Definition: execIndexing.c:149
void EvalPlanQualEnd(EPQState *epqstate)
Definition: execMain.c:3153
ItemPointerData * ItemPointer
Definition: itemptr.h:49
ExecRowMark * rowmark
Definition: execnodes.h:558
ItemPointerData curCtid
Definition: execnodes.h:540
ExecutorStart_hook_type ExecutorStart_hook
Definition: execMain.c:69
HeapTupleHeader t_data
Definition: htup.h:67
void(* ExecutorStart_hook_type)(QueryDesc *queryDesc, int eflags)
Definition: executor.h:69
List * ri_WithCheckOptionExprs
Definition: execnodes.h:390
void ExecutorEnd(QueryDesc *queryDesc)
Definition: execMain.c:460
LockTupleMode
Definition: heapam.h:38
bool trig_insert_instead_row
Definition: reltrigger.h:57
void FreeExecutorState(EState *estate)
Definition: execUtils.c:178
static int get_partition_natts(PartitionKey key)
Definition: rel.h:596
static int16 get_partition_col_attnum(PartitionKey key, int col)
Definition: rel.h:611
#define GetPerTupleExprContext(estate)
Definition: executor.h:456
int errtableconstraint(Relation rel, const char *conname)
Definition: relcache.c:5297
uint32 AclMode
Definition: parsenodes.h:70
const char * es_sourceText
Definition: execnodes.h:433
int nParamExec
Definition: plannodes.h:92
Bitmapset * selectedCols
Definition: parsenodes.h:1041
ParamExecData * es_param_exec_vals
Definition: execnodes.h:463
QueryEnvironment * queryEnv
Definition: execdesc.h:43
MemoryContext es_query_cxt
Definition: execnodes.h:468
void ExecSetupPartitionTupleRouting(Relation rel, PartitionDispatch **pd, ResultRelInfo **partitions, TupleConversionMap ***tup_conv_maps, TupleTableSlot **partition_tuple_slot, int *num_parted, int *num_partitions)
Definition: execMain.c:3215
bool IsInParallelMode(void)
Definition: xact.c:913
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:78
bool resjunk
Definition: primnodes.h:1375
#define EXEC_FLAG_WITHOUT_OIDS
Definition: executor.h:64
#define ERROR
Definition: elog.h:43
PlanState * planstate
Definition: execdesc.h:49
NodeTag type
Definition: execnodes.h:348
ExecutorRun_hook_type ExecutorRun_hook
Definition: execMain.c:70
ExecutorEnd_hook_type ExecutorEnd_hook
Definition: execMain.c:72
#define lfirst_int(lc)
Definition: pg_list.h:107
void ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count, bool execute_once)
Definition: execMain.c:298
bool isnull
Definition: params.h:101
void InstrStartNode(Instrumentation *instr)
Definition: instrument.c:63
#define InitDirtySnapshot(snapshotdata)
Definition: tqual.h:100
ItemPointerData t_ctid
Definition: htup_details.h:150
static char * ExecBuildSlotValueDescription(Oid reloid, TupleTableSlot *slot, TupleDesc tupdesc, Bitmapset *modifiedCols, int maxfieldlen)
Definition: execMain.c:2169
static char * ExecBuildSlotPartitionKeyDescription(Relation rel, Datum *values, bool *isnull, int maxfieldlen)
Definition: execMain.c:3355
ItemPointerData t_self
Definition: htup.h:65
int pg_mbcliplen(const char *mbstr, int len, int limit)
Definition: mbutils.c:831
List * arowMarks
Definition: execnodes.h:899
TriggerDesc * trigdesc
Definition: rel.h:120
bool ri_usesFdwDirectModify
Definition: execnodes.h:384
void ExitParallelMode(void)
Definition: xact.c:893
ExprState ** ri_TrigWhenExprs
Definition: execnodes.h:372
bool list_member_int(const List *list, int datum)
Definition: list.c:485
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:157
uint32 t_len
Definition: htup.h:64
Index rti
Definition: execnodes.h:533
void FormPartitionKeyDatum(PartitionDispatch pd, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
Definition: partition.c:1877
#define NoLock
Definition: lockdefs.h:34
static char * buf
Definition: pg_test_fsync.c:66
void * ermExtra
Definition: execnodes.h:541
bool * tts_isnull
Definition: tuptable.h:126
TupleDesc jf_cleanTupType
Definition: execnodes.h:333
AttrNumber wholeAttNo
Definition: execnodes.h:561
void aclcheck_error(AclResult aclerr, AclObjectKind objectkind, const char *objectname)
Definition: aclchk.c:3399
ResultRelInfo * es_result_relations
Definition: execnodes.h:441
bool hasReturning
Definition: plannodes.h:49
static bool ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols, AclMode requiredPerms)
Definition: execMain.c:720
#define RowExclusiveLock
Definition: lockdefs.h:38
Index prti
Definition: execnodes.h:534
JunkFilter * ExecInitJunkFilter(List *targetList, bool hasoid, TupleTableSlot *slot)
Definition: execJunk.c:61
int errdetail(const char *fmt,...)
Definition: elog.c:873
ScanDirection
Definition: sdir.h:22
ParamListInfo params
Definition: execdesc.h:42
List * rootResultRelations
Definition: plannodes.h:79
HeapTuple EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
Definition: execMain.c:2797
TupleTableSlot * es_trig_oldtup_slot
Definition: execnodes.h:458
HTSU_Result
Definition: snapshot.h:119
#define RelationGetRelationName(relation)
Definition: rel.h:436
static bool ExecCheckRTEPerms(RangeTblEntry *rte)
Definition: execMain.c:600
ProjectionInfo * ri_projectReturning
Definition: execnodes.h:399
#define TupIsNull(slot)
Definition: tuptable.h:138
void CheckValidResultRel(Relation resultRel, CmdType operation)
Definition: execMain.c:1099
struct FdwRoutine * ri_FdwRoutine
Definition: execnodes.h:378
bool isTempNamespace(Oid namespaceId)
Definition: namespace.c:3118
Oid t_tableOid
Definition: htup.h:66
TransactionId xmax
Definition: snapshot.h:67
#define RELKIND_FOREIGN_TABLE
Definition: pg_class.h:167
TransactionId xmin
Definition: snapshot.h:66
int es_instrument
Definition: execnodes.h:478
void CheckCmdReplicaIdentity(Relation rel, CmdType cmd)
TupleTableSlot * es_trig_tuple_slot
Definition: execnodes.h:457
WCOKind
Definition: parsenodes.h:1097
TupleTableSlot * origslot
Definition: execnodes.h:897
#define EXEC_FLAG_REWIND
Definition: executor.h:59
#define ereport(elevel, rest)
Definition: elog.h:122
ExprState ** ri_ConstraintExprs
Definition: execnodes.h:393
void slot_getallattrs(TupleTableSlot *slot)
Definition: heaptuple.c:1237
const char * CreateCommandTag(Node *parsetree)
Definition: utility.c:2036
TriggerDesc * ri_TrigDesc
Definition: execnodes.h:366
static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
Definition: execMain.c:3013
Index rowmarkId
Definition: execnodes.h:535
void ExecutorFinish(QueryDesc *queryDesc)
Definition: execMain.c:400
ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook
Definition: execMain.c:75
EState * CreateExecutorState(void)
Definition: execUtils.c:80
Bitmapset * chgParam
Definition: execnodes.h:856
bool has_not_null
Definition: tupdesc.h:43
TupleConversionMap * convert_tuples_by_name(TupleDesc indesc, TupleDesc outdesc, const char *msg)
Definition: tupconvert.c:205
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:905
List * lappend(List *list, void *datum)
Definition: list.c:128
QueryEnvironment * es_queryEnv
Definition: execnodes.h:465
bool trig_update_instead_row
Definition: reltrigger.h:62
ResultRelInfo * ExecGetTriggerResultRel(EState *estate, Oid relid)
Definition: execMain.c:1379
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:663
void appendStringInfoChar(StringInfo str, char ch)
Definition: stringinfo.c:169
void initStringInfo(StringInfo str)
Definition: stringinfo.c:46
int es_num_root_result_relations
Definition: execnodes.h:453
TupleDesc tupDesc
Definition: execdesc.h:47
TransactionId xmax
Definition: heapam.h:71
CmdType operation
Definition: execdesc.h:36
int numtriggers
Definition: reltrigger.h:49
static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
Definition: execMain.c:767
#define AttributeNumberIsValid(attributeNumber)
Definition: attnum.h:34
#define ACL_UPDATE
Definition: parsenodes.h:74
#define RELKIND_PARTITIONED_TABLE
Definition: pg_class.h:168
void ExecutorRewind(QueryDesc *queryDesc)
Definition: execMain.c:526
List * es_trig_target_relations
Definition: execnodes.h:456
bool trig_delete_instead_row
Definition: reltrigger.h:67
Plan * plan
Definition: execnodes.h:898
List * RelationGetPartitionQual(Relation rel)
Definition: partition.c:964
List * es_tupleTable
Definition: execnodes.h:470
#define RowShareLock
Definition: lockdefs.h:37
void ExecResetTupleTable(List *tupleTable, bool shouldFree)
Definition: execTuples.c:156
void * palloc0(Size size)
Definition: mcxt.c:878
List * es_auxmodifytables
Definition: execnodes.h:485
#define RELKIND_TOASTVALUE
Definition: pg_class.h:163
AclResult
Definition: acl.h:170
static Oid get_partition_col_typid(PartitionKey key, int col)
Definition: rel.h:617
uintptr_t Datum
Definition: postgres.h:372
TupleTableSlot * ExecFilterJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
Definition: execJunk.c:262
CmdType commandType
Definition: plannodes.h:45
#define ACL_SELECT
Definition: parsenodes.h:73
#define SnapshotAny
Definition: tqual.h:28
List * ri_WithCheckOptions
Definition: execnodes.h:387
bool already_executed
Definition: execdesc.h:52
int ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:3304
Snapshot crosscheck_snapshot
Definition: execdesc.h:40
Relation heap_open(Oid relationId, LOCKMODE lockmode)
Definition: heapam.c:1284
int es_num_result_relations
Definition: execnodes.h:442
List * ri_PartitionCheck
Definition: execnodes.h:408
unsigned int Index
Definition: c.h:365
List * rowMarks
Definition: plannodes.h:86
TupleDesc rd_att
Definition: rel.h:115
void EvalPlanQualInit(EPQState *epqstate, EState *estate, Plan *subplan, List *auxrowmarks, int epqParam)
Definition: execMain.c:2742
Plan * plan
Definition: execnodes.h:832
static const char * ExecRelCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:1765
#define InvalidOid
Definition: postgres_ext.h:36
bool es_finished
Definition: execnodes.h:479
void * ri_FdwState
Definition: execnodes.h:381
Bitmapset * updatedCols
Definition: parsenodes.h:1043
bool XactReadOnly
Definition: xact.c:77
ExecForeignUpdate_function ExecForeignUpdate
Definition: fdwapi.h:198
bool slot_attisnull(TupleTableSlot *slot, int attnum)
Definition: heaptuple.c:1328
void AfterTriggerBeginQuery(void)
Definition: trigger.c:4167
int check_enable_rls(Oid relid, Oid checkAsUser, bool noError)
Definition: rls.c:53
struct Instrumentation * totaltime
Definition: execdesc.h:55
#define makeNode(_type_)
Definition: nodes.h:557
List * subplans
Definition: plannodes.h:81
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
#define NULL
Definition: c.h:229
TriggerDesc * CopyTriggerDesc(TriggerDesc *trigdesc)
Definition: trigger.c:1852
#define Assert(condition)
Definition: c.h:675
#define lfirst(lc)
Definition: pg_list.h:106
Bitmapset * rewindPlanIDs
Definition: plannodes.h:84
int errtablecol(Relation rel, int attnum)
Definition: relcache.c:5260
LockTupleMode ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
Definition: execMain.c:2305
bool hasModifyingCTE
Definition: plannodes.h:51
static void ExecEndPlan(PlanState *planstate, EState *estate)
Definition: execMain.c:1569
#define ACL_INSERT
Definition: parsenodes.h:72
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
RowMarkType markType
Definition: execnodes.h:536
uint64 es_processed
Definition: execnodes.h:474
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:307
LockClauseStrength strength
Definition: plannodes.h:1009
AclResult pg_attribute_aclcheck_all(Oid table_oid, Oid roleid, AclMode mode, AclMaskHow how)
Definition: aclchk.c:4337
TupleConstr * constr
Definition: tupdesc.h:76
Bitmapset * bms_union(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:218
static int list_length(const List *l)
Definition: pg_list.h:89
RowMarkType
Definition: plannodes.h:950
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:197
void EnterParallelMode(void)
Definition: xact.c:880
bool * es_epqTupleSet
Definition: execnodes.h:504
List * es_subplanstates
Definition: execnodes.h:483
AttrNumber toidAttNo
Definition: execnodes.h:560
List * es_rowMarks
Definition: execnodes.h:472
List * rtable
Definition: plannodes.h:63
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:698
HeapTuple ExecMaterializeSlot(TupleTableSlot *slot)
Definition: execTuples.c:725
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
#define RelationGetPartitionKey(relation)
Definition: rel.h:584
#define EXEC_FLAG_SKIP_TRIGGERS
Definition: executor.h:62
TupleDesc ExecGetResultType(PlanState *planstate)
Definition: execUtils.c:469
HeapTuple do_convert_tuple(HeapTuple tuple, TupleConversionMap *map)
Definition: tupconvert.c:343
void PreventCommandIfReadOnly(const char *cmdname)
Definition: utility.c:236
#define InvalidAttrNumber
Definition: attnum.h:23
List * targetlist
Definition: plannodes.h:144
HeapTuple ExecFetchSlotTuple(TupleTableSlot *slot)
Definition: execTuples.c:618
AclResult pg_class_aclcheck(Oid table_oid, Oid roleid, AclMode mode)
Definition: aclchk.c:4422
void ExecCleanUpTriggerState(EState *estate)
Definition: execMain.c:1440
#define DatumGetPointer(X)
Definition: postgres.h:555
const char * sourceText
Definition: execdesc.h:38
RTEKind rtekind
Definition: parsenodes.h:936
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:443
PartitionDispatch * RelationGetPartitionDispatchInfo(Relation rel, int lockmode, int *num_parted, List **leaf_part_oids)
Definition: partition.c:1026
static Datum values[MAXATTR]
Definition: bootstrap.c:163
DestReceiver * dest
Definition: execdesc.h:41
#define ItemPointerSetInvalid(pointer)
Definition: itemptr.h:150
void AfterTriggerEndQuery(EState *estate)
Definition: trigger.c:4187
char * OidOutputFunctionCall(Oid functionId, Datum val)
Definition: fmgr.c:1747
void * palloc(Size size)
Definition: mcxt.c:849
int errmsg(const char *fmt,...)
Definition: elog.c:797
FdwRoutine * GetFdwRoutineForRelation(Relation relation, bool makecopy)
Definition: foreign.c:395
#define EXEC_FLAG_WITH_OIDS
Definition: executor.h:63
#define getrelid(rangeindex, rangetable)
Definition: parsetree.h:41
int es_top_eflags
Definition: execnodes.h:477
List * resultRelations
Definition: plannodes.h:66
bool parallelModeNeeded
Definition: plannodes.h:59
Datum ExecGetJunkAttribute(TupleTableSlot *slot, AttrNumber attno, bool *isNull)
Definition: execJunk.c:248
#define RELKIND_VIEW
Definition: pg_class.h:164
ResultRelInfo * es_root_result_relations
Definition: execnodes.h:452
IndexInfo ** ri_IndexRelationInfo
Definition: execnodes.h:363
Bitmapset * insertedCols
Definition: parsenodes.h:1042
int i
Bitmapset * RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
Definition: relcache.c:4847
bool * es_epqScanDone
Definition: execnodes.h:505
IsForeignRelUpdatable_function IsForeignRelUpdatable
Definition: fdwapi.h:201
bool ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
Definition: execMain.c:569
#define NameStr(name)
Definition: c.h:499
bool(* receiveSlot)(TupleTableSlot *slot, DestReceiver *self)
Definition: dest.h:118
Datum value
Definition: params.h:100
int epqParam
Definition: execnodes.h:900
bool ExecCheck(ExprState *state, ExprContext *econtext)
Definition: execExpr.c:544
ParamListInfo es_param_list_info
Definition: execnodes.h:462
uint16 num_check
Definition: tupdesc.h:42
ExecutorFinish_hook_type ExecutorFinish_hook
Definition: execMain.c:71
bool isParent
Definition: plannodes.h:1011
bool ExecContextForcesOids(PlanState *planstate, bool *hasoids)
Definition: execMain.c:1487
ItemPointerData ctid
Definition: heapam.h:70
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:687
#define elog
Definition: elog.h:219
PlannedStmt * plannedstmt
Definition: execdesc.h:37
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void LockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:105
EState * estate
Definition: execnodes.h:895
#define RELKIND_RELATION
Definition: pg_class.h:160
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:140
void(* ExecutorRun_hook_type)(QueryDesc *queryDesc, ScanDirection direction, uint64 count, bool execute_once)
Definition: executor.h:73
#define RELKIND_SEQUENCE
Definition: pg_class.h:162
Definition: pg_list.h:45
char * get_rel_name(Oid relid)
Definition: lsyscache.c:1726
int Buffer
Definition: buf.h:23
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:420
#define EXEC_FLAG_EXPLAIN_ONLY
Definition: executor.h:58
int16 AttrNumber
Definition: attnum.h:21
#define RelationGetRelid(relation)
Definition: rel.h:416
LockWaitPolicy
Definition: lockoptions.h:36
void appendBinaryStringInfo(StringInfo str, const char *data, int datalen)
Definition: stringinfo.c:208
long val
Definition: informix.c:689
static void InitPlan(QueryDesc *queryDesc, int eflags)
Definition: execMain.c:804
CmdType
Definition: nodes.h:649
void ExecCloseIndices(ResultRelInfo *resultRelInfo)
Definition: execIndexing.c:224
AttrNumber ctidAttNo
Definition: execnodes.h:559
RelationPtr ri_IndexRelationDescs
Definition: execnodes.h:360
ExecAuxRowMark * ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
Definition: execMain.c:2355
#define GetInsertedColumns(relinfo, estate)
Definition: execMain.c:115
FmgrInfo * ri_TrigFunctions
Definition: execnodes.h:369
#define lfirst_oid(lc)
Definition: pg_list.h:108
RefetchForeignRow_function RefetchForeignRow
Definition: fdwapi.h:209
ExecRowMark * ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
Definition: execMain.c:2331
static void ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:1827
static void ExecutePlan(EState *estate, PlanState *planstate, bool use_parallel_mode, CmdType operation, bool sendTuples, uint64 numberTuples, ScanDirection direction, DestReceiver *dest, bool execute_once)
Definition: execMain.c:1647
#define HeapTupleHeaderGetDatumLength(tup)
Definition: htup_details.h:439
ResultRelInfo * es_result_relation_info
Definition: execnodes.h:443