PostgreSQL Source Code  git master
pgoutput.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pgoutput.c
4  * Logical Replication output plugin
5  *
6  * Copyright (c) 2012-2022, PostgreSQL Global Development Group
7  *
8  * IDENTIFICATION
9  * src/backend/replication/pgoutput/pgoutput.c
10  *
11  *-------------------------------------------------------------------------
12  */
13 #include "postgres.h"
14 
15 #include "access/tupconvert.h"
16 #include "catalog/partition.h"
17 #include "catalog/pg_publication.h"
20 #include "commands/defrem.h"
21 #include "executor/executor.h"
22 #include "fmgr.h"
23 #include "nodes/makefuncs.h"
24 #include "optimizer/optimizer.h"
25 #include "replication/logical.h"
27 #include "replication/origin.h"
28 #include "replication/pgoutput.h"
29 #include "utils/builtins.h"
30 #include "utils/inval.h"
31 #include "utils/lsyscache.h"
32 #include "utils/memutils.h"
33 #include "utils/rel.h"
34 #include "utils/syscache.h"
35 #include "utils/varlena.h"
36 
38 
40  OutputPluginOptions *opt, bool is_init);
43  ReorderBufferTXN *txn);
45  ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
47  ReorderBufferTXN *txn, Relation relation,
48  ReorderBufferChange *change);
50  ReorderBufferTXN *txn, int nrelations, Relation relations[],
51  ReorderBufferChange *change);
53  ReorderBufferTXN *txn, XLogRecPtr message_lsn,
54  bool transactional, const char *prefix,
55  Size sz, const char *message);
57  RepOriginId origin_id);
59  ReorderBufferTXN *txn);
61  ReorderBufferTXN *txn, XLogRecPtr prepare_lsn);
63  ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
65  ReorderBufferTXN *txn,
66  XLogRecPtr prepare_end_lsn,
67  TimestampTz prepare_time);
68 static void pgoutput_stream_start(struct LogicalDecodingContext *ctx,
69  ReorderBufferTXN *txn);
70 static void pgoutput_stream_stop(struct LogicalDecodingContext *ctx,
71  ReorderBufferTXN *txn);
72 static void pgoutput_stream_abort(struct LogicalDecodingContext *ctx,
73  ReorderBufferTXN *txn,
74  XLogRecPtr abort_lsn);
75 static void pgoutput_stream_commit(struct LogicalDecodingContext *ctx,
76  ReorderBufferTXN *txn,
77  XLogRecPtr commit_lsn);
79  ReorderBufferTXN *txn, XLogRecPtr prepare_lsn);
80 
81 static bool publications_valid;
82 static bool in_streaming;
83 static bool publish_no_origin;
84 
85 static List *LoadPublications(List *pubnames);
86 static void publication_invalidation_cb(Datum arg, int cacheid,
87  uint32 hashvalue);
88 static void send_relation_and_attrs(Relation relation, TransactionId xid,
90  Bitmapset *columns);
92  RepOriginId origin_id, XLogRecPtr origin_lsn,
93  bool send_origin);
95  bool skipped_xact);
96 
97 /*
98  * Only 3 publication actions are used for row filtering ("insert", "update",
99  * "delete"). See RelationSyncEntry.exprstate[].
100  */
102 {
106 };
107 
108 #define NUM_ROWFILTER_PUBACTIONS (PUBACTION_DELETE+1)
109 
110 /*
111  * Entry in the map used to remember which relation schemas we sent.
112  *
113  * The schema_sent flag determines if the current schema record for the
114  * relation (and for its ancestor if publish_as_relid is set) was already
115  * sent to the subscriber (in which case we don't need to send it again).
116  *
117  * The schema cache on downstream is however updated only at commit time,
118  * and with streamed transactions the commit order may be different from
119  * the order the transactions are sent in. Also, the (sub) transactions
120  * might get aborted so we need to send the schema for each (sub) transaction
121  * so that we don't lose the schema information on abort. For handling this,
122  * we maintain the list of xids (streamed_txns) for those we have already sent
123  * the schema.
124  *
125  * For partitions, 'pubactions' considers not only the table's own
126  * publications, but also those of all of its ancestors.
127  */
128 typedef struct RelationSyncEntry
129 {
130  Oid relid; /* relation oid */
131 
132  bool replicate_valid; /* overall validity flag for entry */
133 
135  List *streamed_txns; /* streamed toplevel transactions with this
136  * schema */
137 
138  /* are we publishing this rel? */
140 
141  /*
142  * ExprState array for row filter. Different publication actions don't
143  * allow multiple expressions to always be combined into one, because
144  * updates or deletes restrict the column in expression to be part of the
145  * replica identity index whereas inserts do not have this restriction, so
146  * there is one ExprState per publication action.
147  */
149  EState *estate; /* executor state used for row filter */
150  TupleTableSlot *new_slot; /* slot for storing new tuple */
151  TupleTableSlot *old_slot; /* slot for storing old tuple */
152 
153  /*
154  * OID of the relation to publish changes as. For a partition, this may
155  * be set to one of its ancestors whose schema will be used when
156  * replicating changes, if publish_via_partition_root is set for the
157  * publication.
158  */
160 
161  /*
162  * Map used when replicating using an ancestor's schema to convert tuples
163  * from partition's type to the ancestor's; NULL if publish_as_relid is
164  * same as 'relid' or if unnecessary due to partition and the ancestor
165  * having identical TupleDesc.
166  */
168 
169  /*
170  * Columns included in the publication, or NULL if all columns are
171  * included implicitly. Note that the attnums in this bitmap are not
172  * shifted by FirstLowInvalidHeapAttributeNumber.
173  */
175 
176  /*
177  * Private context to store additional data for this entry - state for the
178  * row filter expressions, column list, etc.
179  */
182 
183 /*
184  * Maintain a per-transaction level variable to track whether the transaction
185  * has sent BEGIN. BEGIN is only sent when the first change in a transaction
186  * is processed. This makes it possible to skip sending a pair of BEGIN/COMMIT
187  * messages for empty transactions which saves network bandwidth.
188  *
189  * This optimization is not used for prepared transactions because if the
190  * WALSender restarts after prepare of a transaction and before commit prepared
191  * of the same transaction then we won't be able to figure out if we have
192  * skipped sending BEGIN/PREPARE of a transaction as it was empty. This is
193  * because we would have lost the in-memory txndata information that was
194  * present prior to the restart. This will result in sending a spurious
195  * COMMIT PREPARED without a corresponding prepared transaction at the
196  * downstream which would lead to an error when it tries to process it.
197  *
198  * XXX We could achieve this optimization by changing protocol to send
199  * additional information so that downstream can detect that the corresponding
200  * prepare has not been sent. However, adding such a check for every
201  * transaction in the downstream could be costly so we might want to do it
202  * optionally.
203  *
204  * We also don't have this optimization for streamed transactions because
205  * they can contain prepared transactions.
206  */
207 typedef struct PGOutputTxnData
208 {
209  bool sent_begin_txn; /* flag indicating whether BEGIN has been sent */
211 
212 /* Map used to remember which relation schemas we sent. */
213 static HTAB *RelationSyncCache = NULL;
214 
215 static void init_rel_sync_cache(MemoryContext cachectx);
216 static void cleanup_rel_sync_cache(TransactionId xid, bool is_commit);
218  Relation relation);
219 static void rel_sync_cache_relation_cb(Datum arg, Oid relid);
220 static void rel_sync_cache_publication_cb(Datum arg, int cacheid,
221  uint32 hashvalue);
223  TransactionId xid);
225  TransactionId xid);
226 static void init_tuple_slot(PGOutputData *data, Relation relation,
227  RelationSyncEntry *entry);
228 
229 /* row filter routines */
232  List *publications,
233  RelationSyncEntry *entry);
235  ExprContext *econtext);
236 static bool pgoutput_row_filter(Relation relation, TupleTableSlot *old_slot,
237  TupleTableSlot **new_slot_ptr,
238  RelationSyncEntry *entry,
240 
241 /* column list routines */
243  List *publications,
244  RelationSyncEntry *entry);
245 
246 /*
247  * Specify output plugin callbacks
248  */
249 void
251 {
253 
260 
267 
268  /* transaction streaming */
276  /* transaction streaming - two-phase commit */
278 }
279 
280 static void
282 {
283  ListCell *lc;
284  bool protocol_version_given = false;
285  bool publication_names_given = false;
286  bool binary_option_given = false;
287  bool messages_option_given = false;
288  bool streaming_given = false;
289  bool two_phase_option_given = false;
290  bool origin_option_given = false;
291 
292  data->binary = false;
293  data->streaming = false;
294  data->messages = false;
295  data->two_phase = false;
296 
297  foreach(lc, options)
298  {
299  DefElem *defel = (DefElem *) lfirst(lc);
300 
301  Assert(defel->arg == NULL || IsA(defel->arg, String));
302 
303  /* Check each param, whether or not we recognize it */
304  if (strcmp(defel->defname, "proto_version") == 0)
305  {
306  unsigned long parsed;
307  char *endptr;
308 
309  if (protocol_version_given)
310  ereport(ERROR,
311  (errcode(ERRCODE_SYNTAX_ERROR),
312  errmsg("conflicting or redundant options")));
313  protocol_version_given = true;
314 
315  errno = 0;
316  parsed = strtoul(strVal(defel->arg), &endptr, 10);
317  if (errno != 0 || *endptr != '\0')
318  ereport(ERROR,
319  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
320  errmsg("invalid proto_version")));
321 
322  if (parsed > PG_UINT32_MAX)
323  ereport(ERROR,
324  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
325  errmsg("proto_version \"%s\" out of range",
326  strVal(defel->arg))));
327 
328  data->protocol_version = (uint32) parsed;
329  }
330  else if (strcmp(defel->defname, "publication_names") == 0)
331  {
332  if (publication_names_given)
333  ereport(ERROR,
334  (errcode(ERRCODE_SYNTAX_ERROR),
335  errmsg("conflicting or redundant options")));
336  publication_names_given = true;
337 
338  if (!SplitIdentifierString(strVal(defel->arg), ',',
339  &data->publication_names))
340  ereport(ERROR,
341  (errcode(ERRCODE_INVALID_NAME),
342  errmsg("invalid publication_names syntax")));
343  }
344  else if (strcmp(defel->defname, "binary") == 0)
345  {
346  if (binary_option_given)
347  ereport(ERROR,
348  (errcode(ERRCODE_SYNTAX_ERROR),
349  errmsg("conflicting or redundant options")));
350  binary_option_given = true;
351 
352  data->binary = defGetBoolean(defel);
353  }
354  else if (strcmp(defel->defname, "messages") == 0)
355  {
356  if (messages_option_given)
357  ereport(ERROR,
358  (errcode(ERRCODE_SYNTAX_ERROR),
359  errmsg("conflicting or redundant options")));
360  messages_option_given = true;
361 
362  data->messages = defGetBoolean(defel);
363  }
364  else if (strcmp(defel->defname, "streaming") == 0)
365  {
366  if (streaming_given)
367  ereport(ERROR,
368  (errcode(ERRCODE_SYNTAX_ERROR),
369  errmsg("conflicting or redundant options")));
370  streaming_given = true;
371 
372  data->streaming = defGetBoolean(defel);
373  }
374  else if (strcmp(defel->defname, "two_phase") == 0)
375  {
376  if (two_phase_option_given)
377  ereport(ERROR,
378  (errcode(ERRCODE_SYNTAX_ERROR),
379  errmsg("conflicting or redundant options")));
380  two_phase_option_given = true;
381 
382  data->two_phase = defGetBoolean(defel);
383  }
384  else if (strcmp(defel->defname, "origin") == 0)
385  {
386  if (origin_option_given)
387  ereport(ERROR,
388  errcode(ERRCODE_SYNTAX_ERROR),
389  errmsg("conflicting or redundant options"));
390  origin_option_given = true;
391 
392  data->origin = defGetString(defel);
393  if (pg_strcasecmp(data->origin, LOGICALREP_ORIGIN_NONE) == 0)
394  publish_no_origin = true;
395  else if (pg_strcasecmp(data->origin, LOGICALREP_ORIGIN_ANY) == 0)
396  publish_no_origin = false;
397  else
398  ereport(ERROR,
399  errcode(ERRCODE_INVALID_PARAMETER_VALUE),
400  errmsg("unrecognized origin value: \"%s\"", data->origin));
401  }
402  else
403  elog(ERROR, "unrecognized pgoutput option: %s", defel->defname);
404  }
405 }
406 
407 /*
408  * Initialize this plugin
409  */
410 static void
412  bool is_init)
413 {
415 
416  /* Create our memory context for private allocations. */
417  data->context = AllocSetContextCreate(ctx->context,
418  "logical replication output context",
420 
421  data->cachectx = AllocSetContextCreate(ctx->context,
422  "logical replication cache context",
424 
426 
427  /* This plugin uses binary protocol. */
429 
430  /*
431  * This is replication start and not slot initialization.
432  *
433  * Parse and validate options passed by the client.
434  */
435  if (!is_init)
436  {
437  /* Parse the params and ERROR if we see any we don't recognize */
439 
440  /* Check if we support requested protocol */
441  if (data->protocol_version > LOGICALREP_PROTO_MAX_VERSION_NUM)
442  ereport(ERROR,
443  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
444  errmsg("client sent proto_version=%d but server only supports protocol %d or lower",
445  data->protocol_version, LOGICALREP_PROTO_MAX_VERSION_NUM)));
446 
447  if (data->protocol_version < LOGICALREP_PROTO_MIN_VERSION_NUM)
448  ereport(ERROR,
449  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
450  errmsg("client sent proto_version=%d but server only supports protocol %d or higher",
451  data->protocol_version, LOGICALREP_PROTO_MIN_VERSION_NUM)));
452 
453  if (data->publication_names == NIL)
454  ereport(ERROR,
455  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
456  errmsg("publication_names parameter missing")));
457 
458  /*
459  * Decide whether to enable streaming. It is disabled by default, in
460  * which case we just update the flag in decoding context. Otherwise
461  * we only allow it with sufficient version of the protocol, and when
462  * the output plugin supports it.
463  */
464  if (!data->streaming)
465  ctx->streaming = false;
466  else if (data->protocol_version < LOGICALREP_PROTO_STREAM_VERSION_NUM)
467  ereport(ERROR,
468  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
469  errmsg("requested proto_version=%d does not support streaming, need %d or higher",
470  data->protocol_version, LOGICALREP_PROTO_STREAM_VERSION_NUM)));
471  else if (!ctx->streaming)
472  ereport(ERROR,
473  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
474  errmsg("streaming requested, but not supported by output plugin")));
475 
476  /* Also remember we're currently not streaming any transaction. */
477  in_streaming = false;
478 
479  /*
480  * Here, we just check whether the two-phase option is passed by
481  * plugin and decide whether to enable it at later point of time. It
482  * remains enabled if the previous start-up has done so. But we only
483  * allow the option to be passed in with sufficient version of the
484  * protocol, and when the output plugin supports it.
485  */
486  if (!data->two_phase)
487  ctx->twophase_opt_given = false;
488  else if (data->protocol_version < LOGICALREP_PROTO_TWOPHASE_VERSION_NUM)
489  ereport(ERROR,
490  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
491  errmsg("requested proto_version=%d does not support two-phase commit, need %d or higher",
492  data->protocol_version, LOGICALREP_PROTO_TWOPHASE_VERSION_NUM)));
493  else if (!ctx->twophase)
494  ereport(ERROR,
495  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
496  errmsg("two-phase commit requested, but not supported by output plugin")));
497  else
498  ctx->twophase_opt_given = true;
499 
500  /* Init publication state. */
501  data->publications = NIL;
502  publications_valid = false;
505  (Datum) 0);
506 
507  /* Initialize relation schema cache. */
509  }
510  else
511  {
512  /*
513  * Disable the streaming and prepared transactions during the slot
514  * initialization mode.
515  */
516  ctx->streaming = false;
517  ctx->twophase = false;
518  }
519 }
520 
521 /*
522  * BEGIN callback.
523  *
524  * Don't send the BEGIN message here instead postpone it until the first
525  * change. In logical replication, a common scenario is to replicate a set of
526  * tables (instead of all tables) and transactions whose changes were on
527  * the table(s) that are not published will produce empty transactions. These
528  * empty transactions will send BEGIN and COMMIT messages to subscribers,
529  * using bandwidth on something with little/no use for logical replication.
530  */
531 static void
533 {
535  sizeof(PGOutputTxnData));
536 
537  txn->output_plugin_private = txndata;
538 }
539 
540 /*
541  * Send BEGIN.
542  *
543  * This is called while processing the first change of the transaction.
544  */
545 static void
547 {
548  bool send_replication_origin = txn->origin_id != InvalidRepOriginId;
550 
551  Assert(txndata);
552  Assert(!txndata->sent_begin_txn);
553 
554  OutputPluginPrepareWrite(ctx, !send_replication_origin);
555  logicalrep_write_begin(ctx->out, txn);
556  txndata->sent_begin_txn = true;
557 
558  send_repl_origin(ctx, txn->origin_id, txn->origin_lsn,
559  send_replication_origin);
560 
561  OutputPluginWrite(ctx, true);
562 }
563 
564 /*
565  * COMMIT callback
566  */
567 static void
569  XLogRecPtr commit_lsn)
570 {
572  bool sent_begin_txn;
573 
574  Assert(txndata);
575 
576  /*
577  * We don't need to send the commit message unless some relevant change
578  * from this transaction has been sent to the downstream.
579  */
580  sent_begin_txn = txndata->sent_begin_txn;
581  update_replication_progress(ctx, !sent_begin_txn);
582  pfree(txndata);
583  txn->output_plugin_private = NULL;
584 
585  if (!sent_begin_txn)
586  {
587  elog(DEBUG1, "skipped replication of an empty transaction with XID: %u", txn->xid);
588  return;
589  }
590 
591  OutputPluginPrepareWrite(ctx, true);
592  logicalrep_write_commit(ctx->out, txn, commit_lsn);
593  OutputPluginWrite(ctx, true);
594 }
595 
596 /*
597  * BEGIN PREPARE callback
598  */
599 static void
601 {
602  bool send_replication_origin = txn->origin_id != InvalidRepOriginId;
603 
604  OutputPluginPrepareWrite(ctx, !send_replication_origin);
606 
607  send_repl_origin(ctx, txn->origin_id, txn->origin_lsn,
608  send_replication_origin);
609 
610  OutputPluginWrite(ctx, true);
611 }
612 
613 /*
614  * PREPARE callback
615  */
616 static void
618  XLogRecPtr prepare_lsn)
619 {
620  update_replication_progress(ctx, false);
621 
622  OutputPluginPrepareWrite(ctx, true);
623  logicalrep_write_prepare(ctx->out, txn, prepare_lsn);
624  OutputPluginWrite(ctx, true);
625 }
626 
627 /*
628  * COMMIT PREPARED callback
629  */
630 static void
632  XLogRecPtr commit_lsn)
633 {
634  update_replication_progress(ctx, false);
635 
636  OutputPluginPrepareWrite(ctx, true);
637  logicalrep_write_commit_prepared(ctx->out, txn, commit_lsn);
638  OutputPluginWrite(ctx, true);
639 }
640 
641 /*
642  * ROLLBACK PREPARED callback
643  */
644 static void
646  ReorderBufferTXN *txn,
647  XLogRecPtr prepare_end_lsn,
648  TimestampTz prepare_time)
649 {
650  update_replication_progress(ctx, false);
651 
652  OutputPluginPrepareWrite(ctx, true);
653  logicalrep_write_rollback_prepared(ctx->out, txn, prepare_end_lsn,
654  prepare_time);
655  OutputPluginWrite(ctx, true);
656 }
657 
658 /*
659  * Write the current schema of the relation and its ancestor (if any) if not
660  * done yet.
661  */
662 static void
664  ReorderBufferChange *change,
665  Relation relation, RelationSyncEntry *relentry)
666 {
667  bool schema_sent;
670 
671  /*
672  * Remember XID of the (sub)transaction for the change. We don't care if
673  * it's top-level transaction or not (we have already sent that XID in
674  * start of the current streaming block).
675  *
676  * If we're not in a streaming block, just use InvalidTransactionId and
677  * the write methods will not include it.
678  */
679  if (in_streaming)
680  xid = change->txn->xid;
681 
682  if (change->txn->toptxn)
683  topxid = change->txn->toptxn->xid;
684  else
685  topxid = xid;
686 
687  /*
688  * Do we need to send the schema? We do track streamed transactions
689  * separately, because those may be applied later (and the regular
690  * transactions won't see their effects until then) and in an order that
691  * we don't know at this point.
692  *
693  * XXX There is a scope of optimization here. Currently, we always send
694  * the schema first time in a streaming transaction but we can probably
695  * avoid that by checking 'relentry->schema_sent' flag. However, before
696  * doing that we need to study its impact on the case where we have a mix
697  * of streaming and non-streaming transactions.
698  */
699  if (in_streaming)
700  schema_sent = get_schema_sent_in_streamed_txn(relentry, topxid);
701  else
702  schema_sent = relentry->schema_sent;
703 
704  /* Nothing to do if we already sent the schema. */
705  if (schema_sent)
706  return;
707 
708  /*
709  * Send the schema. If the changes will be published using an ancestor's
710  * schema, not the relation's own, send that ancestor's schema before
711  * sending relation's own (XXX - maybe sending only the former suffices?).
712  */
713  if (relentry->publish_as_relid != RelationGetRelid(relation))
714  {
715  Relation ancestor = RelationIdGetRelation(relentry->publish_as_relid);
716 
717  send_relation_and_attrs(ancestor, xid, ctx, relentry->columns);
718  RelationClose(ancestor);
719  }
720 
721  send_relation_and_attrs(relation, xid, ctx, relentry->columns);
722 
723  if (in_streaming)
724  set_schema_sent_in_streamed_txn(relentry, topxid);
725  else
726  relentry->schema_sent = true;
727 }
728 
729 /*
730  * Sends a relation
731  */
732 static void
735  Bitmapset *columns)
736 {
737  TupleDesc desc = RelationGetDescr(relation);
738  int i;
739 
740  /*
741  * Write out type info if needed. We do that only for user-created types.
742  * We use FirstGenbkiObjectId as the cutoff, so that we only consider
743  * objects with hand-assigned OIDs to be "built in", not for instance any
744  * function or type defined in the information_schema. This is important
745  * because only hand-assigned OIDs can be expected to remain stable across
746  * major versions.
747  */
748  for (i = 0; i < desc->natts; i++)
749  {
750  Form_pg_attribute att = TupleDescAttr(desc, i);
751 
752  if (att->attisdropped || att->attgenerated)
753  continue;
754 
755  if (att->atttypid < FirstGenbkiObjectId)
756  continue;
757 
758  /* Skip this attribute if it's not present in the column list */
759  if (columns != NULL && !bms_is_member(att->attnum, columns))
760  continue;
761 
762  OutputPluginPrepareWrite(ctx, false);
763  logicalrep_write_typ(ctx->out, xid, att->atttypid);
764  OutputPluginWrite(ctx, false);
765  }
766 
767  OutputPluginPrepareWrite(ctx, false);
768  logicalrep_write_rel(ctx->out, xid, relation, columns);
769  OutputPluginWrite(ctx, false);
770 }
771 
772 /*
773  * Executor state preparation for evaluation of row filter expressions for the
774  * specified relation.
775  */
776 static EState *
778 {
779  EState *estate;
780  RangeTblEntry *rte;
781 
782  estate = CreateExecutorState();
783 
784  rte = makeNode(RangeTblEntry);
785  rte->rtekind = RTE_RELATION;
786  rte->relid = RelationGetRelid(rel);
787  rte->relkind = rel->rd_rel->relkind;
789  ExecInitRangeTable(estate, list_make1(rte));
790 
791  estate->es_output_cid = GetCurrentCommandId(false);
792 
793  return estate;
794 }
795 
796 /*
797  * Evaluates row filter.
798  *
799  * If the row filter evaluates to NULL, it is taken as false i.e. the change
800  * isn't replicated.
801  */
802 static bool
804 {
805  Datum ret;
806  bool isnull;
807 
808  Assert(state != NULL);
809 
810  ret = ExecEvalExprSwitchContext(state, econtext, &isnull);
811 
812  elog(DEBUG3, "row filter evaluates to %s (isnull: %s)",
813  isnull ? "false" : DatumGetBool(ret) ? "true" : "false",
814  isnull ? "true" : "false");
815 
816  if (isnull)
817  return false;
818 
819  return DatumGetBool(ret);
820 }
821 
822 /*
823  * Make sure the per-entry memory context exists.
824  */
825 static void
827 {
828  Relation relation;
829 
830  /* The context may already exist, in which case bail out. */
831  if (entry->entry_cxt)
832  return;
833 
834  relation = RelationIdGetRelation(entry->publish_as_relid);
835 
836  entry->entry_cxt = AllocSetContextCreate(data->cachectx,
837  "entry private context",
839 
841  RelationGetRelationName(relation));
842 }
843 
844 /*
845  * Initialize the row filter.
846  */
847 static void
849  RelationSyncEntry *entry)
850 {
851  ListCell *lc;
852  List *rfnodes[] = {NIL, NIL, NIL}; /* One per pubaction */
853  bool no_filter[] = {false, false, false}; /* One per pubaction */
854  MemoryContext oldctx;
855  int idx;
856  bool has_filter = true;
857  Oid schemaid = get_rel_namespace(entry->publish_as_relid);
858 
859  /*
860  * Find if there are any row filters for this relation. If there are, then
861  * prepare the necessary ExprState and cache it in entry->exprstate. To
862  * build an expression state, we need to ensure the following:
863  *
864  * All the given publication-table mappings must be checked.
865  *
866  * Multiple publications might have multiple row filters for this
867  * relation. Since row filter usage depends on the DML operation, there
868  * are multiple lists (one for each operation) to which row filters will
869  * be appended.
870  *
871  * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row
872  * filter expression" so it takes precedence.
873  */
874  foreach(lc, publications)
875  {
876  Publication *pub = lfirst(lc);
877  HeapTuple rftuple = NULL;
878  Datum rfdatum = 0;
879  bool pub_no_filter = true;
880 
881  /*
882  * If the publication is FOR ALL TABLES, or the publication includes a
883  * FOR TABLES IN SCHEMA where the table belongs to the referred
884  * schema, then it is treated the same as if there are no row filters
885  * (even if other publications have a row filter).
886  */
887  if (!pub->alltables &&
889  ObjectIdGetDatum(schemaid),
890  ObjectIdGetDatum(pub->oid)))
891  {
892  /*
893  * Check for the presence of a row filter in this publication.
894  */
897  ObjectIdGetDatum(pub->oid));
898 
899  if (HeapTupleIsValid(rftuple))
900  {
901  /* Null indicates no filter. */
902  rfdatum = SysCacheGetAttr(PUBLICATIONRELMAP, rftuple,
903  Anum_pg_publication_rel_prqual,
904  &pub_no_filter);
905  }
906  }
907 
908  if (pub_no_filter)
909  {
910  if (rftuple)
911  ReleaseSysCache(rftuple);
912 
913  no_filter[PUBACTION_INSERT] |= pub->pubactions.pubinsert;
914  no_filter[PUBACTION_UPDATE] |= pub->pubactions.pubupdate;
915  no_filter[PUBACTION_DELETE] |= pub->pubactions.pubdelete;
916 
917  /*
918  * Quick exit if all the DML actions are publicized via this
919  * publication.
920  */
921  if (no_filter[PUBACTION_INSERT] &&
922  no_filter[PUBACTION_UPDATE] &&
923  no_filter[PUBACTION_DELETE])
924  {
925  has_filter = false;
926  break;
927  }
928 
929  /* No additional work for this publication. Next one. */
930  continue;
931  }
932 
933  /* Form the per pubaction row filter lists. */
934  if (pub->pubactions.pubinsert && !no_filter[PUBACTION_INSERT])
935  rfnodes[PUBACTION_INSERT] = lappend(rfnodes[PUBACTION_INSERT],
936  TextDatumGetCString(rfdatum));
937  if (pub->pubactions.pubupdate && !no_filter[PUBACTION_UPDATE])
938  rfnodes[PUBACTION_UPDATE] = lappend(rfnodes[PUBACTION_UPDATE],
939  TextDatumGetCString(rfdatum));
940  if (pub->pubactions.pubdelete && !no_filter[PUBACTION_DELETE])
941  rfnodes[PUBACTION_DELETE] = lappend(rfnodes[PUBACTION_DELETE],
942  TextDatumGetCString(rfdatum));
943 
944  ReleaseSysCache(rftuple);
945  } /* loop all subscribed publications */
946 
947  /* Clean the row filter */
948  for (idx = 0; idx < NUM_ROWFILTER_PUBACTIONS; idx++)
949  {
950  if (no_filter[idx])
951  {
952  list_free_deep(rfnodes[idx]);
953  rfnodes[idx] = NIL;
954  }
955  }
956 
957  if (has_filter)
958  {
960 
962 
963  /*
964  * Now all the filters for all pubactions are known. Combine them when
965  * their pubactions are the same.
966  */
967  oldctx = MemoryContextSwitchTo(entry->entry_cxt);
968  entry->estate = create_estate_for_relation(relation);
969  for (idx = 0; idx < NUM_ROWFILTER_PUBACTIONS; idx++)
970  {
971  List *filters = NIL;
972  Expr *rfnode;
973 
974  if (rfnodes[idx] == NIL)
975  continue;
976 
977  foreach(lc, rfnodes[idx])
978  filters = lappend(filters, stringToNode((char *) lfirst(lc)));
979 
980  /* combine the row filter and cache the ExprState */
981  rfnode = make_orclause(filters);
982  entry->exprstate[idx] = ExecPrepareExpr(rfnode, entry->estate);
983  } /* for each pubaction */
984  MemoryContextSwitchTo(oldctx);
985 
986  RelationClose(relation);
987  }
988 }
989 
990 /*
991  * Initialize the column list.
992  */
993 static void
995  RelationSyncEntry *entry)
996 {
997  ListCell *lc;
998  bool first = true;
1000 
1001  /*
1002  * Find if there are any column lists for this relation. If there are,
1003  * build a bitmap using the column lists.
1004  *
1005  * Multiple publications might have multiple column lists for this
1006  * relation.
1007  *
1008  * Note that we don't support the case where the column list is different
1009  * for the same table when combining publications. See comments atop
1010  * fetch_table_list. But one can later change the publication so we still
1011  * need to check all the given publication-table mappings and report an
1012  * error if any publications have a different column list.
1013  *
1014  * FOR ALL TABLES and FOR TABLES IN SCHEMA imply "don't use column list".
1015  */
1016  foreach(lc, publications)
1017  {
1018  Publication *pub = lfirst(lc);
1019  HeapTuple cftuple = NULL;
1020  Datum cfdatum = 0;
1021  Bitmapset *cols = NULL;
1022 
1023  /*
1024  * If the publication is FOR ALL TABLES then it is treated the same as
1025  * if there are no column lists (even if other publications have a
1026  * list).
1027  */
1028  if (!pub->alltables)
1029  {
1030  bool pub_no_list = true;
1031 
1032  /*
1033  * Check for the presence of a column list in this publication.
1034  *
1035  * Note: If we find no pg_publication_rel row, it's a publication
1036  * defined for a whole schema, so it can't have a column list,
1037  * just like a FOR ALL TABLES publication.
1038  */
1041  ObjectIdGetDatum(pub->oid));
1042 
1043  if (HeapTupleIsValid(cftuple))
1044  {
1045  /* Lookup the column list attribute. */
1046  cfdatum = SysCacheGetAttr(PUBLICATIONRELMAP, cftuple,
1047  Anum_pg_publication_rel_prattrs,
1048  &pub_no_list);
1049 
1050  /* Build the column list bitmap in the per-entry context. */
1051  if (!pub_no_list) /* when not null */
1052  {
1054 
1055  cols = pub_collist_to_bitmapset(cols, cfdatum,
1056  entry->entry_cxt);
1057 
1058  /*
1059  * If column list includes all the columns of the table,
1060  * set it to NULL.
1061  */
1062  if (bms_num_members(cols) == RelationGetNumberOfAttributes(relation))
1063  {
1064  bms_free(cols);
1065  cols = NULL;
1066  }
1067  }
1068 
1069  ReleaseSysCache(cftuple);
1070  }
1071  }
1072 
1073  if (first)
1074  {
1075  entry->columns = cols;
1076  first = false;
1077  }
1078  else if (!bms_equal(entry->columns, cols))
1079  ereport(ERROR,
1080  errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1081  errmsg("cannot use different column lists for table \"%s.%s\" in different publications",
1083  RelationGetRelationName(relation)));
1084  } /* loop all subscribed publications */
1085 
1086  RelationClose(relation);
1087 }
1088 
1089 /*
1090  * Initialize the slot for storing new and old tuples, and build the map that
1091  * will be used to convert the relation's tuples into the ancestor's format.
1092  */
1093 static void
1095  RelationSyncEntry *entry)
1096 {
1097  MemoryContext oldctx;
1098  TupleDesc oldtupdesc;
1099  TupleDesc newtupdesc;
1100 
1101  oldctx = MemoryContextSwitchTo(data->cachectx);
1102 
1103  /*
1104  * Create tuple table slots. Create a copy of the TupleDesc as it needs to
1105  * live as long as the cache remains.
1106  */
1107  oldtupdesc = CreateTupleDescCopy(RelationGetDescr(relation));
1108  newtupdesc = CreateTupleDescCopy(RelationGetDescr(relation));
1109 
1110  entry->old_slot = MakeSingleTupleTableSlot(oldtupdesc, &TTSOpsHeapTuple);
1111  entry->new_slot = MakeSingleTupleTableSlot(newtupdesc, &TTSOpsHeapTuple);
1112 
1113  MemoryContextSwitchTo(oldctx);
1114 
1115  /*
1116  * Cache the map that will be used to convert the relation's tuples into
1117  * the ancestor's format, if needed.
1118  */
1119  if (entry->publish_as_relid != RelationGetRelid(relation))
1120  {
1121  Relation ancestor = RelationIdGetRelation(entry->publish_as_relid);
1122  TupleDesc indesc = RelationGetDescr(relation);
1123  TupleDesc outdesc = RelationGetDescr(ancestor);
1124 
1125  /* Map must live as long as the session does. */
1127 
1128  entry->attrmap = build_attrmap_by_name_if_req(indesc, outdesc, false);
1129 
1130  MemoryContextSwitchTo(oldctx);
1131  RelationClose(ancestor);
1132  }
1133 }
1134 
1135 /*
1136  * Change is checked against the row filter if any.
1137  *
1138  * Returns true if the change is to be replicated, else false.
1139  *
1140  * For inserts, evaluate the row filter for new tuple.
1141  * For deletes, evaluate the row filter for old tuple.
1142  * For updates, evaluate the row filter for old and new tuple.
1143  *
1144  * For updates, if both evaluations are true, we allow sending the UPDATE and
1145  * if both the evaluations are false, it doesn't replicate the UPDATE. Now, if
1146  * only one of the tuples matches the row filter expression, we transform
1147  * UPDATE to DELETE or INSERT to avoid any data inconsistency based on the
1148  * following rules:
1149  *
1150  * Case 1: old-row (no match) new-row (no match) -> (drop change)
1151  * Case 2: old-row (no match) new row (match) -> INSERT
1152  * Case 3: old-row (match) new-row (no match) -> DELETE
1153  * Case 4: old-row (match) new row (match) -> UPDATE
1154  *
1155  * The new action is updated in the action parameter.
1156  *
1157  * The new slot could be updated when transforming the UPDATE into INSERT,
1158  * because the original new tuple might not have column values from the replica
1159  * identity.
1160  *
1161  * Examples:
1162  * Let's say the old tuple satisfies the row filter but the new tuple doesn't.
1163  * Since the old tuple satisfies, the initial table synchronization copied this
1164  * row (or another method was used to guarantee that there is data
1165  * consistency). However, after the UPDATE the new tuple doesn't satisfy the
1166  * row filter, so from a data consistency perspective, that row should be
1167  * removed on the subscriber. The UPDATE should be transformed into a DELETE
1168  * statement and be sent to the subscriber. Keeping this row on the subscriber
1169  * is undesirable because it doesn't reflect what was defined in the row filter
1170  * expression on the publisher. This row on the subscriber would likely not be
1171  * modified by replication again. If someone inserted a new row with the same
1172  * old identifier, replication could stop due to a constraint violation.
1173  *
1174  * Let's say the old tuple doesn't match the row filter but the new tuple does.
1175  * Since the old tuple doesn't satisfy, the initial table synchronization
1176  * probably didn't copy this row. However, after the UPDATE the new tuple does
1177  * satisfy the row filter, so from a data consistency perspective, that row
1178  * should be inserted on the subscriber. Otherwise, subsequent UPDATE or DELETE
1179  * statements have no effect (it matches no row -- see
1180  * apply_handle_update_internal()). So, the UPDATE should be transformed into a
1181  * INSERT statement and be sent to the subscriber. However, this might surprise
1182  * someone who expects the data set to satisfy the row filter expression on the
1183  * provider.
1184  */
1185 static bool
1187  TupleTableSlot **new_slot_ptr, RelationSyncEntry *entry,
1189 {
1190  TupleDesc desc;
1191  int i;
1192  bool old_matched,
1193  new_matched,
1194  result;
1195  TupleTableSlot *tmp_new_slot;
1196  TupleTableSlot *new_slot = *new_slot_ptr;
1197  ExprContext *ecxt;
1198  ExprState *filter_exprstate;
1199 
1200  /*
1201  * We need this map to avoid relying on ReorderBufferChangeType enums
1202  * having specific values.
1203  */
1204  static const int map_changetype_pubaction[] = {
1208  };
1209 
1213 
1214  Assert(new_slot || old_slot);
1215 
1216  /* Get the corresponding row filter */
1217  filter_exprstate = entry->exprstate[map_changetype_pubaction[*action]];
1218 
1219  /* Bail out if there is no row filter */
1220  if (!filter_exprstate)
1221  return true;
1222 
1223  elog(DEBUG3, "table \"%s.%s\" has row filter",
1225  RelationGetRelationName(relation));
1226 
1228 
1229  ecxt = GetPerTupleExprContext(entry->estate);
1230 
1231  /*
1232  * For the following occasions where there is only one tuple, we can
1233  * evaluate the row filter for that tuple and return.
1234  *
1235  * For inserts, we only have the new tuple.
1236  *
1237  * For updates, we can have only a new tuple when none of the replica
1238  * identity columns changed and none of those columns have external data
1239  * but we still need to evaluate the row filter for the new tuple as the
1240  * existing values of those columns might not match the filter. Also,
1241  * users can use constant expressions in the row filter, so we anyway need
1242  * to evaluate it for the new tuple.
1243  *
1244  * For deletes, we only have the old tuple.
1245  */
1246  if (!new_slot || !old_slot)
1247  {
1248  ecxt->ecxt_scantuple = new_slot ? new_slot : old_slot;
1249  result = pgoutput_row_filter_exec_expr(filter_exprstate, ecxt);
1250 
1251  return result;
1252  }
1253 
1254  /*
1255  * Both the old and new tuples must be valid only for updates and need to
1256  * be checked against the row filter.
1257  */
1258  Assert(map_changetype_pubaction[*action] == PUBACTION_UPDATE);
1259 
1260  slot_getallattrs(new_slot);
1261  slot_getallattrs(old_slot);
1262 
1263  tmp_new_slot = NULL;
1264  desc = RelationGetDescr(relation);
1265 
1266  /*
1267  * The new tuple might not have all the replica identity columns, in which
1268  * case it needs to be copied over from the old tuple.
1269  */
1270  for (i = 0; i < desc->natts; i++)
1271  {
1272  Form_pg_attribute att = TupleDescAttr(desc, i);
1273 
1274  /*
1275  * if the column in the new tuple or old tuple is null, nothing to do
1276  */
1277  if (new_slot->tts_isnull[i] || old_slot->tts_isnull[i])
1278  continue;
1279 
1280  /*
1281  * Unchanged toasted replica identity columns are only logged in the
1282  * old tuple. Copy this over to the new tuple. The changed (or WAL
1283  * Logged) toast values are always assembled in memory and set as
1284  * VARTAG_INDIRECT. See ReorderBufferToastReplace.
1285  */
1286  if (att->attlen == -1 &&
1287  VARATT_IS_EXTERNAL_ONDISK(new_slot->tts_values[i]) &&
1288  !VARATT_IS_EXTERNAL_ONDISK(old_slot->tts_values[i]))
1289  {
1290  if (!tmp_new_slot)
1291  {
1292  tmp_new_slot = MakeSingleTupleTableSlot(desc, &TTSOpsVirtual);
1293  ExecClearTuple(tmp_new_slot);
1294 
1295  memcpy(tmp_new_slot->tts_values, new_slot->tts_values,
1296  desc->natts * sizeof(Datum));
1297  memcpy(tmp_new_slot->tts_isnull, new_slot->tts_isnull,
1298  desc->natts * sizeof(bool));
1299  }
1300 
1301  tmp_new_slot->tts_values[i] = old_slot->tts_values[i];
1302  tmp_new_slot->tts_isnull[i] = old_slot->tts_isnull[i];
1303  }
1304  }
1305 
1306  ecxt->ecxt_scantuple = old_slot;
1307  old_matched = pgoutput_row_filter_exec_expr(filter_exprstate, ecxt);
1308 
1309  if (tmp_new_slot)
1310  {
1311  ExecStoreVirtualTuple(tmp_new_slot);
1312  ecxt->ecxt_scantuple = tmp_new_slot;
1313  }
1314  else
1315  ecxt->ecxt_scantuple = new_slot;
1316 
1317  new_matched = pgoutput_row_filter_exec_expr(filter_exprstate, ecxt);
1318 
1319  /*
1320  * Case 1: if both tuples don't match the row filter, bailout. Send
1321  * nothing.
1322  */
1323  if (!old_matched && !new_matched)
1324  return false;
1325 
1326  /*
1327  * Case 2: if the old tuple doesn't satisfy the row filter but the new
1328  * tuple does, transform the UPDATE into INSERT.
1329  *
1330  * Use the newly transformed tuple that must contain the column values for
1331  * all the replica identity columns. This is required to ensure that the
1332  * while inserting the tuple in the downstream node, we have all the
1333  * required column values.
1334  */
1335  if (!old_matched && new_matched)
1336  {
1338 
1339  if (tmp_new_slot)
1340  *new_slot_ptr = tmp_new_slot;
1341  }
1342 
1343  /*
1344  * Case 3: if the old tuple satisfies the row filter but the new tuple
1345  * doesn't, transform the UPDATE into DELETE.
1346  *
1347  * This transformation does not require another tuple. The Old tuple will
1348  * be used for DELETE.
1349  */
1350  else if (old_matched && !new_matched)
1352 
1353  /*
1354  * Case 4: if both tuples match the row filter, transformation isn't
1355  * required. (*action is default UPDATE).
1356  */
1357 
1358  return true;
1359 }
1360 
1361 /*
1362  * Sends the decoded DML over wire.
1363  *
1364  * This is called both in streaming and non-streaming modes.
1365  */
1366 static void
1368  Relation relation, ReorderBufferChange *change)
1369 {
1372  MemoryContext old;
1373  RelationSyncEntry *relentry;
1375  Relation ancestor = NULL;
1376  Relation targetrel = relation;
1378  TupleTableSlot *old_slot = NULL;
1379  TupleTableSlot *new_slot = NULL;
1380 
1381  update_replication_progress(ctx, false);
1382 
1383  if (!is_publishable_relation(relation))
1384  return;
1385 
1386  /*
1387  * Remember the xid for the change in streaming mode. We need to send xid
1388  * with each change in the streaming mode so that subscriber can make
1389  * their association and on aborts, it can discard the corresponding
1390  * changes.
1391  */
1392  if (in_streaming)
1393  xid = change->txn->xid;
1394 
1395  relentry = get_rel_sync_entry(data, relation);
1396 
1397  /* First check the table filter */
1398  switch (action)
1399  {
1401  if (!relentry->pubactions.pubinsert)
1402  return;
1403  break;
1405  if (!relentry->pubactions.pubupdate)
1406  return;
1407  break;
1409  if (!relentry->pubactions.pubdelete)
1410  return;
1411  break;
1412  default:
1413  Assert(false);
1414  }
1415 
1416  /* Avoid leaking memory by using and resetting our own context */
1417  old = MemoryContextSwitchTo(data->context);
1418 
1419  /* Send the data */
1420  switch (action)
1421  {
1423  new_slot = relentry->new_slot;
1424  ExecStoreHeapTuple(&change->data.tp.newtuple->tuple,
1425  new_slot, false);
1426 
1427  /* Switch relation if publishing via root. */
1428  if (relentry->publish_as_relid != RelationGetRelid(relation))
1429  {
1430  Assert(relation->rd_rel->relispartition);
1431  ancestor = RelationIdGetRelation(relentry->publish_as_relid);
1432  targetrel = ancestor;
1433  /* Convert tuple if needed. */
1434  if (relentry->attrmap)
1435  {
1436  TupleDesc tupdesc = RelationGetDescr(targetrel);
1437 
1438  new_slot = execute_attr_map_slot(relentry->attrmap,
1439  new_slot,
1440  MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1441  }
1442  }
1443 
1444  /* Check row filter */
1445  if (!pgoutput_row_filter(targetrel, NULL, &new_slot, relentry,
1446  &action))
1447  break;
1448 
1449  /*
1450  * Send BEGIN if we haven't yet.
1451  *
1452  * We send the BEGIN message after ensuring that we will actually
1453  * send the change. This avoids sending a pair of BEGIN/COMMIT
1454  * messages for empty transactions.
1455  */
1456  if (txndata && !txndata->sent_begin_txn)
1457  pgoutput_send_begin(ctx, txn);
1458 
1459  /*
1460  * Schema should be sent using the original relation because it
1461  * also sends the ancestor's relation.
1462  */
1463  maybe_send_schema(ctx, change, relation, relentry);
1464 
1465  OutputPluginPrepareWrite(ctx, true);
1466  logicalrep_write_insert(ctx->out, xid, targetrel, new_slot,
1467  data->binary, relentry->columns);
1468  OutputPluginWrite(ctx, true);
1469  break;
1471  if (change->data.tp.oldtuple)
1472  {
1473  old_slot = relentry->old_slot;
1474  ExecStoreHeapTuple(&change->data.tp.oldtuple->tuple,
1475  old_slot, false);
1476  }
1477 
1478  new_slot = relentry->new_slot;
1479  ExecStoreHeapTuple(&change->data.tp.newtuple->tuple,
1480  new_slot, false);
1481 
1482  /* Switch relation if publishing via root. */
1483  if (relentry->publish_as_relid != RelationGetRelid(relation))
1484  {
1485  Assert(relation->rd_rel->relispartition);
1486  ancestor = RelationIdGetRelation(relentry->publish_as_relid);
1487  targetrel = ancestor;
1488  /* Convert tuples if needed. */
1489  if (relentry->attrmap)
1490  {
1491  TupleDesc tupdesc = RelationGetDescr(targetrel);
1492 
1493  if (old_slot)
1494  old_slot = execute_attr_map_slot(relentry->attrmap,
1495  old_slot,
1496  MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1497 
1498  new_slot = execute_attr_map_slot(relentry->attrmap,
1499  new_slot,
1500  MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1501  }
1502  }
1503 
1504  /* Check row filter */
1505  if (!pgoutput_row_filter(targetrel, old_slot, &new_slot,
1506  relentry, &action))
1507  break;
1508 
1509  /* Send BEGIN if we haven't yet */
1510  if (txndata && !txndata->sent_begin_txn)
1511  pgoutput_send_begin(ctx, txn);
1512 
1513  maybe_send_schema(ctx, change, relation, relentry);
1514 
1515  OutputPluginPrepareWrite(ctx, true);
1516 
1517  /*
1518  * Updates could be transformed to inserts or deletes based on the
1519  * results of the row filter for old and new tuple.
1520  */
1521  switch (action)
1522  {
1524  logicalrep_write_insert(ctx->out, xid, targetrel,
1525  new_slot, data->binary,
1526  relentry->columns);
1527  break;
1529  logicalrep_write_update(ctx->out, xid, targetrel,
1530  old_slot, new_slot, data->binary,
1531  relentry->columns);
1532  break;
1534  logicalrep_write_delete(ctx->out, xid, targetrel,
1535  old_slot, data->binary,
1536  relentry->columns);
1537  break;
1538  default:
1539  Assert(false);
1540  }
1541 
1542  OutputPluginWrite(ctx, true);
1543  break;
1545  if (change->data.tp.oldtuple)
1546  {
1547  old_slot = relentry->old_slot;
1548 
1549  ExecStoreHeapTuple(&change->data.tp.oldtuple->tuple,
1550  old_slot, false);
1551 
1552  /* Switch relation if publishing via root. */
1553  if (relentry->publish_as_relid != RelationGetRelid(relation))
1554  {
1555  Assert(relation->rd_rel->relispartition);
1556  ancestor = RelationIdGetRelation(relentry->publish_as_relid);
1557  targetrel = ancestor;
1558  /* Convert tuple if needed. */
1559  if (relentry->attrmap)
1560  {
1561  TupleDesc tupdesc = RelationGetDescr(targetrel);
1562 
1563  old_slot = execute_attr_map_slot(relentry->attrmap,
1564  old_slot,
1565  MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1566  }
1567  }
1568 
1569  /* Check row filter */
1570  if (!pgoutput_row_filter(targetrel, old_slot, &new_slot,
1571  relentry, &action))
1572  break;
1573 
1574  /* Send BEGIN if we haven't yet */
1575  if (txndata && !txndata->sent_begin_txn)
1576  pgoutput_send_begin(ctx, txn);
1577 
1578  maybe_send_schema(ctx, change, relation, relentry);
1579 
1580  OutputPluginPrepareWrite(ctx, true);
1581  logicalrep_write_delete(ctx->out, xid, targetrel,
1582  old_slot, data->binary,
1583  relentry->columns);
1584  OutputPluginWrite(ctx, true);
1585  }
1586  else
1587  elog(DEBUG1, "didn't send DELETE change because of missing oldtuple");
1588  break;
1589  default:
1590  Assert(false);
1591  }
1592 
1593  if (RelationIsValid(ancestor))
1594  {
1595  RelationClose(ancestor);
1596  ancestor = NULL;
1597  }
1598 
1599  /* Cleanup */
1600  MemoryContextSwitchTo(old);
1601  MemoryContextReset(data->context);
1602 }
1603 
1604 static void
1606  int nrelations, Relation relations[], ReorderBufferChange *change)
1607 {
1610  MemoryContext old;
1611  RelationSyncEntry *relentry;
1612  int i;
1613  int nrelids;
1614  Oid *relids;
1616 
1617  update_replication_progress(ctx, false);
1618 
1619  /* Remember the xid for the change in streaming mode. See pgoutput_change. */
1620  if (in_streaming)
1621  xid = change->txn->xid;
1622 
1623  old = MemoryContextSwitchTo(data->context);
1624 
1625  relids = palloc0(nrelations * sizeof(Oid));
1626  nrelids = 0;
1627 
1628  for (i = 0; i < nrelations; i++)
1629  {
1630  Relation relation = relations[i];
1631  Oid relid = RelationGetRelid(relation);
1632 
1633  if (!is_publishable_relation(relation))
1634  continue;
1635 
1636  relentry = get_rel_sync_entry(data, relation);
1637 
1638  if (!relentry->pubactions.pubtruncate)
1639  continue;
1640 
1641  /*
1642  * Don't send partitions if the publication wants to send only the
1643  * root tables through it.
1644  */
1645  if (relation->rd_rel->relispartition &&
1646  relentry->publish_as_relid != relid)
1647  continue;
1648 
1649  relids[nrelids++] = relid;
1650 
1651  /* Send BEGIN if we haven't yet */
1652  if (txndata && !txndata->sent_begin_txn)
1653  pgoutput_send_begin(ctx, txn);
1654 
1655  maybe_send_schema(ctx, change, relation, relentry);
1656  }
1657 
1658  if (nrelids > 0)
1659  {
1660  OutputPluginPrepareWrite(ctx, true);
1662  xid,
1663  nrelids,
1664  relids,
1665  change->data.truncate.cascade,
1666  change->data.truncate.restart_seqs);
1667  OutputPluginWrite(ctx, true);
1668  }
1669 
1670  MemoryContextSwitchTo(old);
1671  MemoryContextReset(data->context);
1672 }
1673 
1674 static void
1676  XLogRecPtr message_lsn, bool transactional, const char *prefix, Size sz,
1677  const char *message)
1678 {
1681 
1682  update_replication_progress(ctx, false);
1683 
1684  if (!data->messages)
1685  return;
1686 
1687  /*
1688  * Remember the xid for the message in streaming mode. See
1689  * pgoutput_change.
1690  */
1691  if (in_streaming)
1692  xid = txn->xid;
1693 
1694  /*
1695  * Output BEGIN if we haven't yet. Avoid for non-transactional messages.
1696  */
1697  if (transactional)
1698  {
1700 
1701  /* Send BEGIN if we haven't yet */
1702  if (txndata && !txndata->sent_begin_txn)
1703  pgoutput_send_begin(ctx, txn);
1704  }
1705 
1706  OutputPluginPrepareWrite(ctx, true);
1708  xid,
1709  message_lsn,
1710  transactional,
1711  prefix,
1712  sz,
1713  message);
1714  OutputPluginWrite(ctx, true);
1715 }
1716 
1717 /*
1718  * Return true if the data is associated with an origin and the user has
1719  * requested the changes that don't have an origin, false otherwise.
1720  */
1721 static bool
1723  RepOriginId origin_id)
1724 {
1725  if (publish_no_origin && origin_id != InvalidRepOriginId)
1726  return true;
1727 
1728  return false;
1729 }
1730 
1731 /*
1732  * Shutdown the output plugin.
1733  *
1734  * Note, we don't need to clean the data->context and data->cachectx as
1735  * they are child context of the ctx->context so it will be cleaned up by
1736  * logical decoding machinery.
1737  */
1738 static void
1740 {
1741  if (RelationSyncCache)
1742  {
1744  RelationSyncCache = NULL;
1745  }
1746 }
1747 
1748 /*
1749  * Load publications from the list of publication names.
1750  */
1751 static List *
1753 {
1754  List *result = NIL;
1755  ListCell *lc;
1756 
1757  foreach(lc, pubnames)
1758  {
1759  char *pubname = (char *) lfirst(lc);
1760  Publication *pub = GetPublicationByName(pubname, false);
1761 
1762  result = lappend(result, pub);
1763  }
1764 
1765  return result;
1766 }
1767 
1768 /*
1769  * Publication syscache invalidation callback.
1770  *
1771  * Called for invalidations on pg_publication.
1772  */
1773 static void
1775 {
1776  publications_valid = false;
1777 
1778  /*
1779  * Also invalidate per-relation cache so that next time the filtering info
1780  * is checked it will be updated with the new publication settings.
1781  */
1782  rel_sync_cache_publication_cb(arg, cacheid, hashvalue);
1783 }
1784 
1785 /*
1786  * START STREAM callback
1787  */
1788 static void
1790  ReorderBufferTXN *txn)
1791 {
1792  bool send_replication_origin = txn->origin_id != InvalidRepOriginId;
1793 
1794  /* we can't nest streaming of transactions */
1795  Assert(!in_streaming);
1796 
1797  /*
1798  * If we already sent the first stream for this transaction then don't
1799  * send the origin id in the subsequent streams.
1800  */
1801  if (rbtxn_is_streamed(txn))
1802  send_replication_origin = false;
1803 
1804  OutputPluginPrepareWrite(ctx, !send_replication_origin);
1806 
1808  send_replication_origin);
1809 
1810  OutputPluginWrite(ctx, true);
1811 
1812  /* we're streaming a chunk of transaction now */
1813  in_streaming = true;
1814 }
1815 
1816 /*
1817  * STOP STREAM callback
1818  */
1819 static void
1821  ReorderBufferTXN *txn)
1822 {
1823  /* we should be streaming a trasanction */
1825 
1826  OutputPluginPrepareWrite(ctx, true);
1828  OutputPluginWrite(ctx, true);
1829 
1830  /* we've stopped streaming a transaction */
1831  in_streaming = false;
1832 }
1833 
1834 /*
1835  * Notify downstream to discard the streamed transaction (along with all
1836  * it's subtransactions, if it's a toplevel transaction).
1837  */
1838 static void
1840  ReorderBufferTXN *txn,
1841  XLogRecPtr abort_lsn)
1842 {
1843  ReorderBufferTXN *toptxn;
1844 
1845  /*
1846  * The abort should happen outside streaming block, even for streamed
1847  * transactions. The transaction has to be marked as streamed, though.
1848  */
1849  Assert(!in_streaming);
1850 
1851  /* determine the toplevel transaction */
1852  toptxn = (txn->toptxn) ? txn->toptxn : txn;
1853 
1854  Assert(rbtxn_is_streamed(toptxn));
1855 
1856  OutputPluginPrepareWrite(ctx, true);
1857  logicalrep_write_stream_abort(ctx->out, toptxn->xid, txn->xid);
1858  OutputPluginWrite(ctx, true);
1859 
1860  cleanup_rel_sync_cache(toptxn->xid, false);
1861 }
1862 
1863 /*
1864  * Notify downstream to apply the streamed transaction (along with all
1865  * it's subtransactions).
1866  */
1867 static void
1869  ReorderBufferTXN *txn,
1870  XLogRecPtr commit_lsn)
1871 {
1872  /*
1873  * The commit should happen outside streaming block, even for streamed
1874  * transactions. The transaction has to be marked as streamed, though.
1875  */
1876  Assert(!in_streaming);
1877  Assert(rbtxn_is_streamed(txn));
1878 
1879  update_replication_progress(ctx, false);
1880 
1881  OutputPluginPrepareWrite(ctx, true);
1882  logicalrep_write_stream_commit(ctx->out, txn, commit_lsn);
1883  OutputPluginWrite(ctx, true);
1884 
1885  cleanup_rel_sync_cache(txn->xid, true);
1886 }
1887 
1888 /*
1889  * PREPARE callback (for streaming two-phase commit).
1890  *
1891  * Notify the downstream to prepare the transaction.
1892  */
1893 static void
1895  ReorderBufferTXN *txn,
1896  XLogRecPtr prepare_lsn)
1897 {
1898  Assert(rbtxn_is_streamed(txn));
1899 
1900  update_replication_progress(ctx, false);
1901  OutputPluginPrepareWrite(ctx, true);
1902  logicalrep_write_stream_prepare(ctx->out, txn, prepare_lsn);
1903  OutputPluginWrite(ctx, true);
1904 }
1905 
1906 /*
1907  * Initialize the relation schema sync cache for a decoding session.
1908  *
1909  * The hash table is destroyed at the end of a decoding session. While
1910  * relcache invalidations still exist and will still be invoked, they
1911  * will just see the null hash table global and take no action.
1912  */
1913 static void
1915 {
1916  HASHCTL ctl;
1917 
1918  if (RelationSyncCache != NULL)
1919  return;
1920 
1921  /* Make a new hash table for the cache */
1922  ctl.keysize = sizeof(Oid);
1923  ctl.entrysize = sizeof(RelationSyncEntry);
1924  ctl.hcxt = cachectx;
1925 
1926  RelationSyncCache = hash_create("logical replication output relation cache",
1927  128, &ctl,
1929 
1930  Assert(RelationSyncCache != NULL);
1931 
1935  (Datum) 0);
1938  (Datum) 0);
1939 }
1940 
1941 /*
1942  * We expect relatively small number of streamed transactions.
1943  */
1944 static bool
1946 {
1947  return list_member_xid(entry->streamed_txns, xid);
1948 }
1949 
1950 /*
1951  * Add the xid in the rel sync entry for which we have already sent the schema
1952  * of the relation.
1953  */
1954 static void
1956 {
1957  MemoryContext oldctx;
1958 
1960 
1961  entry->streamed_txns = lappend_xid(entry->streamed_txns, xid);
1962 
1963  MemoryContextSwitchTo(oldctx);
1964 }
1965 
1966 /*
1967  * Find or create entry in the relation schema cache.
1968  *
1969  * This looks up publications that the given relation is directly or
1970  * indirectly part of (the latter if it's really the relation's ancestor that
1971  * is part of a publication) and fills up the found entry with the information
1972  * about which operations to publish and whether to use an ancestor's schema
1973  * when publishing.
1974  */
1975 static RelationSyncEntry *
1977 {
1978  RelationSyncEntry *entry;
1979  bool found;
1980  MemoryContext oldctx;
1981  Oid relid = RelationGetRelid(relation);
1982 
1983  Assert(RelationSyncCache != NULL);
1984 
1985  /* Find cached relation info, creating if not found */
1987  (void *) &relid,
1988  HASH_ENTER, &found);
1989  Assert(entry != NULL);
1990 
1991  /* initialize entry, if it's new */
1992  if (!found)
1993  {
1994  entry->replicate_valid = false;
1995  entry->schema_sent = false;
1996  entry->streamed_txns = NIL;
1997  entry->pubactions.pubinsert = entry->pubactions.pubupdate =
1998  entry->pubactions.pubdelete = entry->pubactions.pubtruncate = false;
1999  entry->new_slot = NULL;
2000  entry->old_slot = NULL;
2001  memset(entry->exprstate, 0, sizeof(entry->exprstate));
2002  entry->entry_cxt = NULL;
2003  entry->publish_as_relid = InvalidOid;
2004  entry->columns = NULL;
2005  entry->attrmap = NULL;
2006  }
2007 
2008  /* Validate the entry */
2009  if (!entry->replicate_valid)
2010  {
2011  Oid schemaId = get_rel_namespace(relid);
2012  List *pubids = GetRelationPublications(relid);
2013 
2014  /*
2015  * We don't acquire a lock on the namespace system table as we build
2016  * the cache entry using a historic snapshot and all the later changes
2017  * are absorbed while decoding WAL.
2018  */
2019  List *schemaPubids = GetSchemaPublications(schemaId);
2020  ListCell *lc;
2021  Oid publish_as_relid = relid;
2022  int publish_ancestor_level = 0;
2023  bool am_partition = get_rel_relispartition(relid);
2024  char relkind = get_rel_relkind(relid);
2025  List *rel_publications = NIL;
2026 
2027  /* Reload publications if needed before use. */
2028  if (!publications_valid)
2029  {
2031  if (data->publications)
2032  {
2033  list_free_deep(data->publications);
2034  data->publications = NIL;
2035  }
2036  data->publications = LoadPublications(data->publication_names);
2037  MemoryContextSwitchTo(oldctx);
2038  publications_valid = true;
2039  }
2040 
2041  /*
2042  * Reset schema_sent status as the relation definition may have
2043  * changed. Also reset pubactions to empty in case rel was dropped
2044  * from a publication. Also free any objects that depended on the
2045  * earlier definition.
2046  */
2047  entry->schema_sent = false;
2048  list_free(entry->streamed_txns);
2049  entry->streamed_txns = NIL;
2050  bms_free(entry->columns);
2051  entry->columns = NULL;
2052  entry->pubactions.pubinsert = false;
2053  entry->pubactions.pubupdate = false;
2054  entry->pubactions.pubdelete = false;
2055  entry->pubactions.pubtruncate = false;
2056 
2057  /*
2058  * Tuple slots cleanups. (Will be rebuilt later if needed).
2059  */
2060  if (entry->old_slot)
2062  if (entry->new_slot)
2064 
2065  entry->old_slot = NULL;
2066  entry->new_slot = NULL;
2067 
2068  if (entry->attrmap)
2069  free_attrmap(entry->attrmap);
2070  entry->attrmap = NULL;
2071 
2072  /*
2073  * Row filter cache cleanups.
2074  */
2075  if (entry->entry_cxt)
2077 
2078  entry->entry_cxt = NULL;
2079  entry->estate = NULL;
2080  memset(entry->exprstate, 0, sizeof(entry->exprstate));
2081 
2082  /*
2083  * Build publication cache. We can't use one provided by relcache as
2084  * relcache considers all publications that the given relation is in,
2085  * but here we only need to consider ones that the subscriber
2086  * requested.
2087  */
2088  foreach(lc, data->publications)
2089  {
2090  Publication *pub = lfirst(lc);
2091  bool publish = false;
2092 
2093  /*
2094  * Under what relid should we publish changes in this publication?
2095  * We'll use the top-most relid across all publications. Also
2096  * track the ancestor level for this publication.
2097  */
2098  Oid pub_relid = relid;
2099  int ancestor_level = 0;
2100 
2101  /*
2102  * If this is a FOR ALL TABLES publication, pick the partition
2103  * root and set the ancestor level accordingly.
2104  */
2105  if (pub->alltables)
2106  {
2107  publish = true;
2108  if (pub->pubviaroot && am_partition)
2109  {
2110  List *ancestors = get_partition_ancestors(relid);
2111 
2112  pub_relid = llast_oid(ancestors);
2113  ancestor_level = list_length(ancestors);
2114  }
2115  }
2116 
2117  if (!publish)
2118  {
2119  bool ancestor_published = false;
2120 
2121  /*
2122  * For a partition, check if any of the ancestors are
2123  * published. If so, note down the topmost ancestor that is
2124  * published via this publication, which will be used as the
2125  * relation via which to publish the partition's changes.
2126  */
2127  if (am_partition)
2128  {
2129  Oid ancestor;
2130  int level;
2131  List *ancestors = get_partition_ancestors(relid);
2132 
2133  ancestor = GetTopMostAncestorInPublication(pub->oid,
2134  ancestors,
2135  &level);
2136 
2137  if (ancestor != InvalidOid)
2138  {
2139  ancestor_published = true;
2140  if (pub->pubviaroot)
2141  {
2142  pub_relid = ancestor;
2143  ancestor_level = level;
2144  }
2145  }
2146  }
2147 
2148  if (list_member_oid(pubids, pub->oid) ||
2149  list_member_oid(schemaPubids, pub->oid) ||
2150  ancestor_published)
2151  publish = true;
2152  }
2153 
2154  /*
2155  * If the relation is to be published, determine actions to
2156  * publish, and list of columns, if appropriate.
2157  *
2158  * Don't publish changes for partitioned tables, because
2159  * publishing those of its partitions suffices, unless partition
2160  * changes won't be published due to pubviaroot being set.
2161  */
2162  if (publish &&
2163  (relkind != RELKIND_PARTITIONED_TABLE || pub->pubviaroot))
2164  {
2165  entry->pubactions.pubinsert |= pub->pubactions.pubinsert;
2166  entry->pubactions.pubupdate |= pub->pubactions.pubupdate;
2167  entry->pubactions.pubdelete |= pub->pubactions.pubdelete;
2169 
2170  /*
2171  * We want to publish the changes as the top-most ancestor
2172  * across all publications. So we need to check if the already
2173  * calculated level is higher than the new one. If yes, we can
2174  * ignore the new value (as it's a child). Otherwise the new
2175  * value is an ancestor, so we keep it.
2176  */
2177  if (publish_ancestor_level > ancestor_level)
2178  continue;
2179 
2180  /*
2181  * If we found an ancestor higher up in the tree, discard the
2182  * list of publications through which we replicate it, and use
2183  * the new ancestor.
2184  */
2185  if (publish_ancestor_level < ancestor_level)
2186  {
2187  publish_as_relid = pub_relid;
2188  publish_ancestor_level = ancestor_level;
2189 
2190  /* reset the publication list for this relation */
2191  rel_publications = NIL;
2192  }
2193  else
2194  {
2195  /* Same ancestor level, has to be the same OID. */
2196  Assert(publish_as_relid == pub_relid);
2197  }
2198 
2199  /* Track publications for this ancestor. */
2200  rel_publications = lappend(rel_publications, pub);
2201  }
2202  }
2203 
2204  entry->publish_as_relid = publish_as_relid;
2205 
2206  /*
2207  * Initialize the tuple slot, map, and row filter. These are only used
2208  * when publishing inserts, updates, or deletes.
2209  */
2210  if (entry->pubactions.pubinsert || entry->pubactions.pubupdate ||
2211  entry->pubactions.pubdelete)
2212  {
2213  /* Initialize the tuple slot and map */
2214  init_tuple_slot(data, relation, entry);
2215 
2216  /* Initialize the row filter */
2217  pgoutput_row_filter_init(data, rel_publications, entry);
2218 
2219  /* Initialize the column list */
2220  pgoutput_column_list_init(data, rel_publications, entry);
2221  }
2222 
2223  list_free(pubids);
2224  list_free(schemaPubids);
2225  list_free(rel_publications);
2226 
2227  entry->replicate_valid = true;
2228  }
2229 
2230  return entry;
2231 }
2232 
2233 /*
2234  * Cleanup list of streamed transactions and update the schema_sent flag.
2235  *
2236  * When a streamed transaction commits or aborts, we need to remove the
2237  * toplevel XID from the schema cache. If the transaction aborted, the
2238  * subscriber will simply throw away the schema records we streamed, so
2239  * we don't need to do anything else.
2240  *
2241  * If the transaction is committed, the subscriber will update the relation
2242  * cache - so tweak the schema_sent flag accordingly.
2243  */
2244 static void
2246 {
2247  HASH_SEQ_STATUS hash_seq;
2248  RelationSyncEntry *entry;
2249  ListCell *lc;
2250 
2251  Assert(RelationSyncCache != NULL);
2252 
2253  hash_seq_init(&hash_seq, RelationSyncCache);
2254  while ((entry = hash_seq_search(&hash_seq)) != NULL)
2255  {
2256  /*
2257  * We can set the schema_sent flag for an entry that has committed xid
2258  * in the list as that ensures that the subscriber would have the
2259  * corresponding schema and we don't need to send it unless there is
2260  * any invalidation for that relation.
2261  */
2262  foreach(lc, entry->streamed_txns)
2263  {
2264  if (xid == lfirst_xid(lc))
2265  {
2266  if (is_commit)
2267  entry->schema_sent = true;
2268 
2269  entry->streamed_txns =
2271  break;
2272  }
2273  }
2274  }
2275 }
2276 
2277 /*
2278  * Relcache invalidation callback
2279  */
2280 static void
2282 {
2283  RelationSyncEntry *entry;
2284 
2285  /*
2286  * We can get here if the plugin was used in SQL interface as the
2287  * RelSchemaSyncCache is destroyed when the decoding finishes, but there
2288  * is no way to unregister the relcache invalidation callback.
2289  */
2290  if (RelationSyncCache == NULL)
2291  return;
2292 
2293  /*
2294  * Nobody keeps pointers to entries in this hash table around outside
2295  * logical decoding callback calls - but invalidation events can come in
2296  * *during* a callback if we do any syscache access in the callback.
2297  * Because of that we must mark the cache entry as invalid but not damage
2298  * any of its substructure here. The next get_rel_sync_entry() call will
2299  * rebuild it all.
2300  */
2301  if (OidIsValid(relid))
2302  {
2303  /*
2304  * Getting invalidations for relations that aren't in the table is
2305  * entirely normal. So we don't care if it's found or not.
2306  */
2307  entry = (RelationSyncEntry *) hash_search(RelationSyncCache, &relid,
2308  HASH_FIND, NULL);
2309  if (entry != NULL)
2310  entry->replicate_valid = false;
2311  }
2312  else
2313  {
2314  /* Whole cache must be flushed. */
2316 
2318  while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL)
2319  {
2320  entry->replicate_valid = false;
2321  }
2322  }
2323 }
2324 
2325 /*
2326  * Publication relation/schema map syscache invalidation callback
2327  *
2328  * Called for invalidations on pg_publication, pg_publication_rel, and
2329  * pg_publication_namespace.
2330  */
2331 static void
2333 {
2335  RelationSyncEntry *entry;
2336 
2337  /*
2338  * We can get here if the plugin was used in SQL interface as the
2339  * RelSchemaSyncCache is destroyed when the decoding finishes, but there
2340  * is no way to unregister the relcache invalidation callback.
2341  */
2342  if (RelationSyncCache == NULL)
2343  return;
2344 
2345  /*
2346  * There is no way to find which entry in our cache the hash belongs to so
2347  * mark the whole cache as invalid.
2348  */
2350  while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL)
2351  {
2352  entry->replicate_valid = false;
2353  }
2354 }
2355 
2356 /* Send Replication origin */
2357 static void
2359  XLogRecPtr origin_lsn, bool send_origin)
2360 {
2361  if (send_origin)
2362  {
2363  char *origin;
2364 
2365  /*----------
2366  * XXX: which behaviour do we want here?
2367  *
2368  * Alternatives:
2369  * - don't send origin message if origin name not found
2370  * (that's what we do now)
2371  * - throw error - that will break replication, not good
2372  * - send some special "unknown" origin
2373  *----------
2374  */
2375  if (replorigin_by_oid(origin_id, true, &origin))
2376  {
2377  /* Message boundary */
2378  OutputPluginWrite(ctx, false);
2379  OutputPluginPrepareWrite(ctx, true);
2380 
2381  logicalrep_write_origin(ctx->out, origin, origin_lsn);
2382  }
2383  }
2384 }
2385 
2386 /*
2387  * Try to update progress and send a keepalive message if too many changes were
2388  * processed.
2389  *
2390  * For a large transaction, if we don't send any change to the downstream for a
2391  * long time (exceeds the wal_receiver_timeout of standby) then it can timeout.
2392  * This can happen when all or most of the changes are either not published or
2393  * got filtered out.
2394  */
2395 static void
2397 {
2398  static int changes_count = 0;
2399 
2400  /*
2401  * We don't want to try sending a keepalive message after processing each
2402  * change as that can have overhead. Tests revealed that there is no
2403  * noticeable overhead in doing it after continuously processing 100 or so
2404  * changes.
2405  */
2406 #define CHANGES_THRESHOLD 100
2407 
2408  /*
2409  * If we are at the end of transaction LSN, update progress tracking.
2410  * Otherwise, after continuously processing CHANGES_THRESHOLD changes, we
2411  * try to send a keepalive message if required.
2412  */
2413  if (ctx->end_xact || ++changes_count >= CHANGES_THRESHOLD)
2414  {
2415  OutputPluginUpdateProgress(ctx, skipped_xact);
2416  changes_count = 0;
2417  }
2418 }
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
void free_attrmap(AttrMap *map)
Definition: attmap.c:57
AttrMap * build_attrmap_by_name_if_req(TupleDesc indesc, TupleDesc outdesc, bool missing_ok)
Definition: attmap.c:264
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:94
void bms_free(Bitmapset *a)
Definition: bitmapset.c:209
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:649
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:428
#define TextDatumGetCString(d)
Definition: builtins.h:86
unsigned int uint32
Definition: c.h:442
#define PG_UINT32_MAX
Definition: c.h:526
#define AssertVariableIsOfType(varname, typename)
Definition: c.h:914
uint32 TransactionId
Definition: c.h:588
#define OidIsValid(objectId)
Definition: c.h:711
size_t Size
Definition: c.h:541
int64 TimestampTz
Definition: timestamp.h:39
bool defGetBoolean(DefElem *def)
Definition: define.c:108
char * defGetString(DefElem *def)
Definition: define.c:49
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:863
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:953
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:350
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1431
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1421
int errcode(int sqlerrcode)
Definition: elog.c:735
int errmsg(const char *fmt,...)
Definition: elog.c:946
#define DEBUG3
Definition: elog.h:24
#define DEBUG1
Definition: elog.h:26
#define ERROR
Definition: elog.h:35
#define ereport(elevel,...)
Definition: elog.h:145
ExprState * ExecPrepareExpr(Expr *node, EState *estate)
Definition: execExpr.c:747
TupleTableSlot * MakeTupleTableSlot(TupleDesc tupleDesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1112
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:83
TupleTableSlot * ExecStoreVirtualTuple(TupleTableSlot *slot)
Definition: execTuples.c:1552
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1254
TupleTableSlot * ExecStoreHeapTuple(HeapTuple tuple, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1352
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:84
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1238
void ExecInitRangeTable(EState *estate, List *rangeTable)
Definition: execUtils.c:755
EState * CreateExecutorState(void)
Definition: execUtils.c:92
#define ResetPerTupleExprContext(estate)
Definition: executor.h:547
#define GetPerTupleExprContext(estate)
Definition: executor.h:538
static Datum ExecEvalExprSwitchContext(ExprState *state, ExprContext *econtext, bool *isNull)
Definition: executor.h:336
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition: inval.c:1561
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition: inval.c:1519
int i
Definition: isn.c:73
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
Assert(fmt[strlen(fmt) - 1] !='\n')
List * lappend_xid(List *list, TransactionId datum)
Definition: list.c:392
bool list_member_xid(const List *list, TransactionId datum)
Definition: list.c:741
List * lappend(List *list, void *datum)
Definition: list.c:338
void list_free(List *list)
Definition: list.c:1545
bool list_member_oid(const List *list, Oid datum)
Definition: list.c:721
void list_free_deep(List *list)
Definition: list.c:1559
#define AccessShareLock
Definition: lockdefs.h:36
void OutputPluginWrite(struct LogicalDecodingContext *ctx, bool last_write)
Definition: logical.c:662
void OutputPluginUpdateProgress(struct LogicalDecodingContext *ctx, bool skipped_xact)
Definition: logical.c:675
void OutputPluginPrepareWrite(struct LogicalDecodingContext *ctx, bool last_write)
Definition: logical.c:649
#define LOGICALREP_PROTO_MIN_VERSION_NUM
Definition: logicalproto.h:36
#define LOGICALREP_PROTO_STREAM_VERSION_NUM
Definition: logicalproto.h:38
#define LOGICALREP_PROTO_TWOPHASE_VERSION_NUM
Definition: logicalproto.h:39
#define LOGICALREP_PROTO_MAX_VERSION_NUM
Definition: logicalproto.h:40
bool get_rel_relispartition(Oid relid)
Definition: lsyscache.c:2009
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3331
char get_rel_relkind(Oid relid)
Definition: lsyscache.c:1985
Oid get_rel_namespace(Oid relid)
Definition: lsyscache.c:1934
Expr * make_orclause(List *orclauses)
Definition: makefuncs.c:652
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:303
void pfree(void *pointer)
Definition: mcxt.c:1306
void * palloc0(Size size)
Definition: mcxt.c:1230
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1037
MemoryContext CacheMemoryContext
Definition: mcxt.c:133
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:376
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:153
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:163
#define MemoryContextCopyAndSetIdentifier(cxt, id)
Definition: memutils.h:101
#define IsA(nodeptr, _type_)
Definition: nodes.h:168
#define makeNode(_type_)
Definition: nodes.h:165
bool replorigin_by_oid(RepOriginId roident, bool missing_ok, char **roname)
Definition: origin.c:461
#define InvalidRepOriginId
Definition: origin.h:33
@ OUTPUT_PLUGIN_BINARY_OUTPUT
Definition: output_plugin.h:19
void(* LogicalOutputPluginInit)(struct OutputPluginCallbacks *cb)
Definition: output_plugin.h:36
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:135
@ RTE_RELATION
Definition: parsenodes.h:982
List * get_partition_ancestors(Oid relid)
Definition: partition.c:133
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:207
void * arg
const void * data
#define lfirst(lc)
Definition: pg_list.h:170
static int list_length(const List *l)
Definition: pg_list.h:150
#define NIL
Definition: pg_list.h:66
#define list_make1(x1)
Definition: pg_list.h:210
#define llast_oid(l)
Definition: pg_list.h:198
#define lfirst_xid(lc)
Definition: pg_list.h:173
#define foreach_delete_current(lst, cell)
Definition: pg_list.h:388
Publication * GetPublicationByName(const char *pubname, bool missing_ok)
List * GetSchemaPublications(Oid schemaid)
List * GetRelationPublications(Oid relid)
Oid GetTopMostAncestorInPublication(Oid puboid, List *ancestors, int *ancestor_level)
Bitmapset * pub_collist_to_bitmapset(Bitmapset *columns, Datum pubcols, MemoryContext mcxt)
bool is_publishable_relation(Relation rel)
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
#define LOGICALREP_ORIGIN_NONE
#define LOGICALREP_ORIGIN_ANY
static List * LoadPublications(List *pubnames)
Definition: pgoutput.c:1752
static void pgoutput_send_begin(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
Definition: pgoutput.c:546
static bool publish_no_origin
Definition: pgoutput.c:83
static void rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
Definition: pgoutput.c:2332
struct RelationSyncEntry RelationSyncEntry
static void pgoutput_ensure_entry_cxt(PGOutputData *data, RelationSyncEntry *entry)
Definition: pgoutput.c:826
static void parse_output_parameters(List *options, PGOutputData *data)
Definition: pgoutput.c:281
static void init_tuple_slot(PGOutputData *data, Relation relation, RelationSyncEntry *entry)
Definition: pgoutput.c:1094
static bool pgoutput_row_filter_exec_expr(ExprState *state, ExprContext *econtext)
Definition: pgoutput.c:803
static void update_replication_progress(LogicalDecodingContext *ctx, bool skipped_xact)
Definition: pgoutput.c:2396
static void pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Relation relation, ReorderBufferChange *change)
Definition: pgoutput.c:1367
#define NUM_ROWFILTER_PUBACTIONS
Definition: pgoutput.c:108
static void pgoutput_begin_prepare_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
Definition: pgoutput.c:600
struct PGOutputTxnData PGOutputTxnData
static void pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, bool is_init)
Definition: pgoutput.c:411
static void pgoutput_truncate(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, int nrelations, Relation relations[], ReorderBufferChange *change)
Definition: pgoutput.c:1605
static void init_rel_sync_cache(MemoryContext cachectx)
Definition: pgoutput.c:1914
RowFilterPubAction
Definition: pgoutput.c:102
@ PUBACTION_INSERT
Definition: pgoutput.c:103
@ PUBACTION_UPDATE
Definition: pgoutput.c:104
@ PUBACTION_DELETE
Definition: pgoutput.c:105
PG_MODULE_MAGIC
Definition: pgoutput.c:37
static void rel_sync_cache_relation_cb(Datum arg, Oid relid)
Definition: pgoutput.c:2281
static void pgoutput_prepare_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr prepare_lsn)
Definition: pgoutput.c:617
static RelationSyncEntry * get_rel_sync_entry(PGOutputData *data, Relation relation)
Definition: pgoutput.c:1976
static bool pgoutput_origin_filter(LogicalDecodingContext *ctx, RepOriginId origin_id)
Definition: pgoutput.c:1722
static void send_repl_origin(LogicalDecodingContext *ctx, RepOriginId origin_id, XLogRecPtr origin_lsn, bool send_origin)
Definition: pgoutput.c:2358
static void pgoutput_rollback_prepared_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr prepare_end_lsn, TimestampTz prepare_time)
Definition: pgoutput.c:645
static void pgoutput_shutdown(LogicalDecodingContext *ctx)
Definition: pgoutput.c:1739
static void cleanup_rel_sync_cache(TransactionId xid, bool is_commit)
Definition: pgoutput.c:2245
static void pgoutput_stream_abort(struct LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr abort_lsn)
Definition: pgoutput.c:1839
static void pgoutput_stream_prepare_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr prepare_lsn)
Definition: pgoutput.c:1894
static void maybe_send_schema(LogicalDecodingContext *ctx, ReorderBufferChange *change, Relation relation, RelationSyncEntry *relentry)
Definition: pgoutput.c:663
static void pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
Definition: pgoutput.c:532
static void send_relation_and_attrs(Relation relation, TransactionId xid, LogicalDecodingContext *ctx, Bitmapset *columns)
Definition: pgoutput.c:733
static HTAB * RelationSyncCache
Definition: pgoutput.c:213
static void pgoutput_row_filter_init(PGOutputData *data, List *publications, RelationSyncEntry *entry)
Definition: pgoutput.c:848
static void pgoutput_stream_commit(struct LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr commit_lsn)
Definition: pgoutput.c:1868
#define CHANGES_THRESHOLD
static void pgoutput_commit_prepared_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr commit_lsn)
Definition: pgoutput.c:631
static bool pgoutput_row_filter(Relation relation, TupleTableSlot *old_slot, TupleTableSlot **new_slot_ptr, RelationSyncEntry *entry, ReorderBufferChangeType *action)
Definition: pgoutput.c:1186
static void set_schema_sent_in_streamed_txn(RelationSyncEntry *entry, TransactionId xid)
Definition: pgoutput.c:1955
static void pgoutput_column_list_init(PGOutputData *data, List *publications, RelationSyncEntry *entry)
Definition: pgoutput.c:994
static bool in_streaming
Definition: pgoutput.c:82
static void pgoutput_stream_stop(struct LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
Definition: pgoutput.c:1820
static void pgoutput_stream_start(struct LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
Definition: pgoutput.c:1789
static void publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue)
Definition: pgoutput.c:1774
void _PG_output_plugin_init(OutputPluginCallbacks *cb)
Definition: pgoutput.c:250
static void pgoutput_message(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr message_lsn, bool transactional, const char *prefix, Size sz, const char *message)
Definition: pgoutput.c:1675
static void pgoutput_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr commit_lsn)
Definition: pgoutput.c:568
static bool publications_valid
Definition: pgoutput.c:81
static bool get_schema_sent_in_streamed_txn(RelationSyncEntry *entry, TransactionId xid)
Definition: pgoutput.c:1945
static EState * create_estate_for_relation(Relation rel)
Definition: pgoutput.c:777
int pg_strcasecmp(const char *s1, const char *s2)
Definition: pgstrcasecmp.c:36
#define VARATT_IS_EXTERNAL_ONDISK(PTR)
Definition: postgres.h:328
static bool DatumGetBool(Datum X)
Definition: postgres.h:438
uintptr_t Datum
Definition: postgres.h:412
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:600
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
void logicalrep_write_commit(StringInfo out, ReorderBufferTXN *txn, XLogRecPtr commit_lsn)
Definition: proto.c:89
void logicalrep_write_rollback_prepared(StringInfo out, ReorderBufferTXN *txn, XLogRecPtr prepare_end_lsn, TimestampTz prepare_time)
Definition: proto.c:304
void logicalrep_write_origin(StringInfo out, const char *origin, XLogRecPtr origin_lsn)
Definition: proto.c:385
void logicalrep_write_rel(StringInfo out, TransactionId xid, Relation rel, Bitmapset *columns)
Definition: proto.c:670
void logicalrep_write_message(StringInfo out, TransactionId xid, XLogRecPtr lsn, bool transactional, const char *prefix, Size sz, const char *message)
Definition: proto.c:643
void logicalrep_write_update(StringInfo out, TransactionId xid, Relation rel, TupleTableSlot *oldslot, TupleTableSlot *newslot, bool binary, Bitmapset *columns)
Definition: proto.c:458
void logicalrep_write_prepare(StringInfo out, ReorderBufferTXN *txn, XLogRecPtr prepare_lsn)
Definition: proto.c:198
void logicalrep_write_insert(StringInfo out, TransactionId xid, Relation rel, TupleTableSlot *newslot, bool binary, Bitmapset *columns)
Definition: proto.c:414
void logicalrep_write_typ(StringInfo out, TransactionId xid, Oid typoid)
Definition: proto.c:725
void logicalrep_write_truncate(StringInfo out, TransactionId xid, int nrelids, Oid relids[], bool cascade, bool restart_seqs)
Definition: proto.c:586
void logicalrep_write_begin(StringInfo out, ReorderBufferTXN *txn)
Definition: proto.c:60
void logicalrep_write_delete(StringInfo out, TransactionId xid, Relation rel, TupleTableSlot *oldslot, bool binary, Bitmapset *columns)
Definition: proto.c:533
void logicalrep_write_commit_prepared(StringInfo out, ReorderBufferTXN *txn, XLogRecPtr commit_lsn)
Definition: proto.c:248
void logicalrep_write_stream_commit(StringInfo out, ReorderBufferTXN *txn, XLogRecPtr commit_lsn)
Definition: proto.c:1118
void logicalrep_write_stream_prepare(StringInfo out, ReorderBufferTXN *txn, XLogRecPtr prepare_lsn)
Definition: proto.c:364
void logicalrep_write_stream_abort(StringInfo out, TransactionId xid, TransactionId subxid)
Definition: proto.c:1169
void logicalrep_write_begin_prepare(StringInfo out, ReorderBufferTXN *txn)
Definition: proto.c:127
void logicalrep_write_stream_start(StringInfo out, TransactionId xid, bool first_segment)
Definition: proto.c:1075
void logicalrep_write_stream_stop(StringInfo out)
Definition: proto.c:1109
void * stringToNode(const char *str)
Definition: read.c:90
#define RelationGetRelid(relation)
Definition: rel.h:501
#define RelationGetDescr(relation)
Definition: rel.h:527
#define RelationGetNumberOfAttributes(relation)
Definition: rel.h:507
#define RelationGetRelationName(relation)
Definition: rel.h:535
#define RelationIsValid(relation)
Definition: rel.h:474
#define RelationGetNamespace(relation)
Definition: rel.h:542
Relation RelationIdGetRelation(Oid relationId)
Definition: relcache.c:2054
void RelationClose(Relation relation)
Definition: relcache.c:2160
#define rbtxn_is_streamed(txn)
ReorderBufferChangeType
Definition: reorderbuffer.h:55
@ REORDER_BUFFER_CHANGE_INSERT
Definition: reorderbuffer.h:56
@ REORDER_BUFFER_CHANGE_DELETE
Definition: reorderbuffer.h:58
@ REORDER_BUFFER_CHANGE_UPDATE
Definition: reorderbuffer.h:57
Definition: attmap.h:35
char * defname
Definition: parsenodes.h:779
Node * arg
Definition: parsenodes.h:780
CommandId es_output_cid
Definition: execnodes.h:628
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:247
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
Definition: dynahash.c:220
Definition: pg_list.h:52
MemoryContext context
Definition: logical.h:36
StringInfo out
Definition: logical.h:71
void * output_plugin_private
Definition: logical.h:76
List * output_plugin_options
Definition: logical.h:59
LogicalDecodeStreamChangeCB stream_change_cb
LogicalDecodeMessageCB message_cb
LogicalDecodeStreamTruncateCB stream_truncate_cb
LogicalDecodeStreamMessageCB stream_message_cb
LogicalDecodeFilterByOriginCB filter_by_origin_cb
LogicalDecodeTruncateCB truncate_cb
LogicalDecodeStreamStopCB stream_stop_cb
LogicalDecodeStreamCommitCB stream_commit_cb
LogicalDecodeRollbackPreparedCB rollback_prepared_cb
LogicalDecodeStreamPrepareCB stream_prepare_cb
LogicalDecodeCommitPreparedCB commit_prepared_cb
LogicalDecodeStreamStartCB stream_start_cb
LogicalDecodePrepareCB prepare_cb
LogicalDecodeStartupCB startup_cb
LogicalDecodeCommitCB commit_cb
LogicalDecodeBeginCB begin_cb
LogicalDecodeStreamAbortCB stream_abort_cb
LogicalDecodeBeginPrepareCB begin_prepare_cb
LogicalDecodeChangeCB change_cb
LogicalDecodeShutdownCB shutdown_cb
OutputPluginOutputType output_type
Definition: output_plugin.h:28
bool sent_begin_txn
Definition: pgoutput.c:209
PublicationActions pubactions
RTEKind rtekind
Definition: parsenodes.h:1001
Form_pg_class rd_rel
Definition: rel.h:110
ExprState * exprstate[NUM_ROWFILTER_PUBACTIONS]
Definition: pgoutput.c:148
Bitmapset * columns
Definition: pgoutput.c:174
PublicationActions pubactions
Definition: pgoutput.c:139
TupleTableSlot * old_slot
Definition: pgoutput.c:151
bool replicate_valid
Definition: pgoutput.c:132
MemoryContext entry_cxt
Definition: pgoutput.c:180
EState * estate
Definition: pgoutput.c:149
TupleTableSlot * new_slot
Definition: pgoutput.c:150
List * streamed_txns
Definition: pgoutput.c:135
AttrMap * attrmap
Definition: pgoutput.c:167
struct ReorderBufferChange::@97::@99 truncate
ReorderBufferChangeType action
Definition: reorderbuffer.h:85
union ReorderBufferChange::@97 data
struct ReorderBufferTXN * txn
Definition: reorderbuffer.h:88
struct ReorderBufferChange::@97::@98 tp
RepOriginId origin_id
struct ReorderBufferTXN * toptxn
void * output_plugin_private
XLogRecPtr origin_lsn
TransactionId xid
Definition: value.h:64
bool * tts_isnull
Definition: tuptable.h:128
Datum * tts_values
Definition: tuptable.h:126
Definition: regguts.h:318
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1221
Datum SysCacheGetAttr(int cacheId, HeapTuple tup, AttrNumber attributeNumber, bool *isNull)
Definition: syscache.c:1434
HeapTuple SearchSysCache2(int cacheId, Datum key1, Datum key2)
Definition: syscache.c:1184
@ PUBLICATIONOID
Definition: syscache.h:83
@ PUBLICATIONNAMESPACEMAP
Definition: syscache.h:82
@ PUBLICATIONRELMAP
Definition: syscache.h:85
#define SearchSysCacheExists2(cacheId, key1, key2)
Definition: syscache.h:190
#define InvalidTransactionId
Definition: transam.h:31
#define FirstGenbkiObjectId
Definition: transam.h:195
TupleTableSlot * execute_attr_map_slot(AttrMap *attrMap, TupleTableSlot *in_slot, TupleTableSlot *out_slot)
Definition: tupconvert.c:192
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition: tupdesc.c:111
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:433
static void slot_getallattrs(TupleTableSlot *slot)
Definition: tuptable.h:362
#define strVal(v)
Definition: value.h:82
bool SplitIdentifierString(char *rawstring, char separator, List **namelist)
Definition: varlena.c:3712
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:817
uint16 RepOriginId
Definition: xlogdefs.h:65
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28