aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/libpq/src/include/access
diff options
context:
space:
mode:
authorgalaxycrab <UgnineSirdis@ydb.tech>2023-11-23 11:26:33 +0300
committergalaxycrab <UgnineSirdis@ydb.tech>2023-11-23 12:01:57 +0300
commit44354d0fc55926c1d4510d1d2c9c9f6a1a5e9300 (patch)
treecb4d75cd1c6dbc3da0ed927337fd8d1b6ed9da84 /contrib/libs/libpq/src/include/access
parent0e69bf615395fdd48ecee032faaec81bc468b0b8 (diff)
downloadydb-44354d0fc55926c1d4510d1d2c9c9f6a1a5e9300.tar.gz
YQ Connector:test INNER JOIN
Diffstat (limited to 'contrib/libs/libpq/src/include/access')
-rw-r--r--contrib/libs/libpq/src/include/access/rmgr.h62
-rw-r--r--contrib/libs/libpq/src/include/access/rmgrlist.h49
-rw-r--r--contrib/libs/libpq/src/include/access/transam.h375
-rw-r--r--contrib/libs/libpq/src/include/access/xlog_internal.h404
-rw-r--r--contrib/libs/libpq/src/include/access/xlogdefs.h82
-rw-r--r--contrib/libs/libpq/src/include/access/xlogreader.h444
-rw-r--r--contrib/libs/libpq/src/include/access/xlogrecord.h248
7 files changed, 1664 insertions, 0 deletions
diff --git a/contrib/libs/libpq/src/include/access/rmgr.h b/contrib/libs/libpq/src/include/access/rmgr.h
new file mode 100644
index 0000000000..3b6a497e1b
--- /dev/null
+++ b/contrib/libs/libpq/src/include/access/rmgr.h
@@ -0,0 +1,62 @@
+/*
+ * rmgr.h
+ *
+ * Resource managers definition
+ *
+ * src/include/access/rmgr.h
+ */
+#ifndef RMGR_H
+#define RMGR_H
+
+typedef uint8 RmgrId;
+
+/*
+ * Built-in resource managers
+ *
+ * The actual numerical values for each rmgr ID are defined by the order
+ * of entries in rmgrlist.h.
+ *
+ * Note: RM_MAX_ID must fit in RmgrId; widening that type will affect the XLOG
+ * file format.
+ */
+#define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask,decode) \
+ symname,
+
+typedef enum RmgrIds
+{
+#include "access/rmgrlist.h"
+ RM_NEXT_ID
+} RmgrIds;
+
+#undef PG_RMGR
+
+#define RM_MAX_ID UINT8_MAX
+#define RM_MAX_BUILTIN_ID (RM_NEXT_ID - 1)
+#define RM_MIN_CUSTOM_ID 128
+#define RM_MAX_CUSTOM_ID UINT8_MAX
+#define RM_N_IDS (UINT8_MAX + 1)
+#define RM_N_BUILTIN_IDS (RM_MAX_BUILTIN_ID + 1)
+#define RM_N_CUSTOM_IDS (RM_MAX_CUSTOM_ID - RM_MIN_CUSTOM_ID + 1)
+
+static inline bool
+RmgrIdIsBuiltin(int rmid)
+{
+ return rmid <= RM_MAX_BUILTIN_ID;
+}
+
+static inline bool
+RmgrIdIsCustom(int rmid)
+{
+ return rmid >= RM_MIN_CUSTOM_ID && rmid <= RM_MAX_CUSTOM_ID;
+}
+
+#define RmgrIdIsValid(rmid) (RmgrIdIsBuiltin((rmid)) || RmgrIdIsCustom((rmid)))
+
+/*
+ * RmgrId to use for extensions that require an RmgrId, but are still in
+ * development and have not reserved their own unique RmgrId yet. See:
+ * https://wiki.postgresql.org/wiki/CustomWALResourceManagers
+ */
+#define RM_EXPERIMENTAL_ID 128
+
+#endif /* RMGR_H */
diff --git a/contrib/libs/libpq/src/include/access/rmgrlist.h b/contrib/libs/libpq/src/include/access/rmgrlist.h
new file mode 100644
index 0000000000..463bcb67c5
--- /dev/null
+++ b/contrib/libs/libpq/src/include/access/rmgrlist.h
@@ -0,0 +1,49 @@
+/*---------------------------------------------------------------------------
+ * rmgrlist.h
+ *
+ * The resource manager list is kept in its own source file for possible
+ * use by automatic tools. The exact representation of a rmgr is determined
+ * by the PG_RMGR macro, which is not defined in this file; it can be
+ * defined by the caller for special purposes.
+ *
+ * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/access/rmgrlist.h
+ *---------------------------------------------------------------------------
+ */
+
+/* there is deliberately not an #ifndef RMGRLIST_H here */
+
+/*
+ * List of resource manager entries. Note that order of entries defines the
+ * numerical values of each rmgr's ID, which is stored in WAL records. New
+ * entries should be added at the end, to avoid changing IDs of existing
+ * entries.
+ *
+ * Changes to this list possibly need an XLOG_PAGE_MAGIC bump.
+ */
+
+/* symbol name, textual name, redo, desc, identify, startup, cleanup, mask, decode */
+PG_RMGR(RM_XLOG_ID, "XLOG", xlog_redo, xlog_desc, xlog_identify, NULL, NULL, NULL, xlog_decode)
+PG_RMGR(RM_XACT_ID, "Transaction", xact_redo, xact_desc, xact_identify, NULL, NULL, NULL, xact_decode)
+PG_RMGR(RM_SMGR_ID, "Storage", smgr_redo, smgr_desc, smgr_identify, NULL, NULL, NULL, NULL)
+PG_RMGR(RM_CLOG_ID, "CLOG", clog_redo, clog_desc, clog_identify, NULL, NULL, NULL, NULL)
+PG_RMGR(RM_DBASE_ID, "Database", dbase_redo, dbase_desc, dbase_identify, NULL, NULL, NULL, NULL)
+PG_RMGR(RM_TBLSPC_ID, "Tablespace", tblspc_redo, tblspc_desc, tblspc_identify, NULL, NULL, NULL, NULL)
+PG_RMGR(RM_MULTIXACT_ID, "MultiXact", multixact_redo, multixact_desc, multixact_identify, NULL, NULL, NULL, NULL)
+PG_RMGR(RM_RELMAP_ID, "RelMap", relmap_redo, relmap_desc, relmap_identify, NULL, NULL, NULL, NULL)
+PG_RMGR(RM_STANDBY_ID, "Standby", standby_redo, standby_desc, standby_identify, NULL, NULL, NULL, standby_decode)
+PG_RMGR(RM_HEAP2_ID, "Heap2", heap2_redo, heap2_desc, heap2_identify, NULL, NULL, heap_mask, heap2_decode)
+PG_RMGR(RM_HEAP_ID, "Heap", heap_redo, heap_desc, heap_identify, NULL, NULL, heap_mask, heap_decode)
+PG_RMGR(RM_BTREE_ID, "Btree", btree_redo, btree_desc, btree_identify, btree_xlog_startup, btree_xlog_cleanup, btree_mask, NULL)
+PG_RMGR(RM_HASH_ID, "Hash", hash_redo, hash_desc, hash_identify, NULL, NULL, hash_mask, NULL)
+PG_RMGR(RM_GIN_ID, "Gin", gin_redo, gin_desc, gin_identify, gin_xlog_startup, gin_xlog_cleanup, gin_mask, NULL)
+PG_RMGR(RM_GIST_ID, "Gist", gist_redo, gist_desc, gist_identify, gist_xlog_startup, gist_xlog_cleanup, gist_mask, NULL)
+PG_RMGR(RM_SEQ_ID, "Sequence", seq_redo, seq_desc, seq_identify, NULL, NULL, seq_mask, NULL)
+PG_RMGR(RM_SPGIST_ID, "SPGist", spg_redo, spg_desc, spg_identify, spg_xlog_startup, spg_xlog_cleanup, spg_mask, NULL)
+PG_RMGR(RM_BRIN_ID, "BRIN", brin_redo, brin_desc, brin_identify, NULL, NULL, brin_mask, NULL)
+PG_RMGR(RM_COMMIT_TS_ID, "CommitTs", commit_ts_redo, commit_ts_desc, commit_ts_identify, NULL, NULL, NULL, NULL)
+PG_RMGR(RM_REPLORIGIN_ID, "ReplicationOrigin", replorigin_redo, replorigin_desc, replorigin_identify, NULL, NULL, NULL, NULL)
+PG_RMGR(RM_GENERIC_ID, "Generic", generic_redo, generic_desc, generic_identify, NULL, NULL, generic_mask, NULL)
+PG_RMGR(RM_LOGICALMSG_ID, "LogicalMessage", logicalmsg_redo, logicalmsg_desc, logicalmsg_identify, NULL, NULL, NULL, logicalmsg_decode)
diff --git a/contrib/libs/libpq/src/include/access/transam.h b/contrib/libs/libpq/src/include/access/transam.h
new file mode 100644
index 0000000000..f5af6d3055
--- /dev/null
+++ b/contrib/libs/libpq/src/include/access/transam.h
@@ -0,0 +1,375 @@
+/*-------------------------------------------------------------------------
+ *
+ * transam.h
+ * postgres transaction access method support code
+ *
+ *
+ * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/access/transam.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef TRANSAM_H
+#define TRANSAM_H
+
+#include "access/xlogdefs.h"
+
+
+/* ----------------
+ * Special transaction ID values
+ *
+ * BootstrapTransactionId is the XID for "bootstrap" operations, and
+ * FrozenTransactionId is used for very old tuples. Both should
+ * always be considered valid.
+ *
+ * FirstNormalTransactionId is the first "normal" transaction id.
+ * Note: if you need to change it, you must change pg_class.h as well.
+ * ----------------
+ */
+#define InvalidTransactionId ((TransactionId) 0)
+#define BootstrapTransactionId ((TransactionId) 1)
+#define FrozenTransactionId ((TransactionId) 2)
+#define FirstNormalTransactionId ((TransactionId) 3)
+#define MaxTransactionId ((TransactionId) 0xFFFFFFFF)
+
+/* ----------------
+ * transaction ID manipulation macros
+ * ----------------
+ */
+#define TransactionIdIsValid(xid) ((xid) != InvalidTransactionId)
+#define TransactionIdIsNormal(xid) ((xid) >= FirstNormalTransactionId)
+#define TransactionIdEquals(id1, id2) ((id1) == (id2))
+#define TransactionIdStore(xid, dest) (*(dest) = (xid))
+#define StoreInvalidTransactionId(dest) (*(dest) = InvalidTransactionId)
+
+#define EpochFromFullTransactionId(x) ((uint32) ((x).value >> 32))
+#define XidFromFullTransactionId(x) ((uint32) (x).value)
+#define U64FromFullTransactionId(x) ((x).value)
+#define FullTransactionIdEquals(a, b) ((a).value == (b).value)
+#define FullTransactionIdPrecedes(a, b) ((a).value < (b).value)
+#define FullTransactionIdPrecedesOrEquals(a, b) ((a).value <= (b).value)
+#define FullTransactionIdFollows(a, b) ((a).value > (b).value)
+#define FullTransactionIdFollowsOrEquals(a, b) ((a).value >= (b).value)
+#define FullTransactionIdIsValid(x) TransactionIdIsValid(XidFromFullTransactionId(x))
+#define InvalidFullTransactionId FullTransactionIdFromEpochAndXid(0, InvalidTransactionId)
+#define FirstNormalFullTransactionId FullTransactionIdFromEpochAndXid(0, FirstNormalTransactionId)
+#define FullTransactionIdIsNormal(x) FullTransactionIdFollowsOrEquals(x, FirstNormalFullTransactionId)
+
+/*
+ * A 64 bit value that contains an epoch and a TransactionId. This is
+ * wrapped in a struct to prevent implicit conversion to/from TransactionId.
+ * Not all values represent valid normal XIDs.
+ */
+typedef struct FullTransactionId
+{
+ uint64 value;
+} FullTransactionId;
+
+static inline FullTransactionId
+FullTransactionIdFromEpochAndXid(uint32 epoch, TransactionId xid)
+{
+ FullTransactionId result;
+
+ result.value = ((uint64) epoch) << 32 | xid;
+
+ return result;
+}
+
+static inline FullTransactionId
+FullTransactionIdFromU64(uint64 value)
+{
+ FullTransactionId result;
+
+ result.value = value;
+
+ return result;
+}
+
+/* advance a transaction ID variable, handling wraparound correctly */
+#define TransactionIdAdvance(dest) \
+ do { \
+ (dest)++; \
+ if ((dest) < FirstNormalTransactionId) \
+ (dest) = FirstNormalTransactionId; \
+ } while(0)
+
+/*
+ * Retreat a FullTransactionId variable, stepping over xids that would appear
+ * to be special only when viewed as 32bit XIDs.
+ */
+static inline void
+FullTransactionIdRetreat(FullTransactionId *dest)
+{
+ dest->value--;
+
+ /*
+ * In contrast to 32bit XIDs don't step over the "actual" special xids.
+ * For 64bit xids these can't be reached as part of a wraparound as they
+ * can in the 32bit case.
+ */
+ if (FullTransactionIdPrecedes(*dest, FirstNormalFullTransactionId))
+ return;
+
+ /*
+ * But we do need to step over XIDs that'd appear special only for 32bit
+ * XIDs.
+ */
+ while (XidFromFullTransactionId(*dest) < FirstNormalTransactionId)
+ dest->value--;
+}
+
+/*
+ * Advance a FullTransactionId variable, stepping over xids that would appear
+ * to be special only when viewed as 32bit XIDs.
+ */
+static inline void
+FullTransactionIdAdvance(FullTransactionId *dest)
+{
+ dest->value++;
+
+ /* see FullTransactionIdAdvance() */
+ if (FullTransactionIdPrecedes(*dest, FirstNormalFullTransactionId))
+ return;
+
+ while (XidFromFullTransactionId(*dest) < FirstNormalTransactionId)
+ dest->value++;
+}
+
+/* back up a transaction ID variable, handling wraparound correctly */
+#define TransactionIdRetreat(dest) \
+ do { \
+ (dest)--; \
+ } while ((dest) < FirstNormalTransactionId)
+
+/* compare two XIDs already known to be normal; this is a macro for speed */
+#define NormalTransactionIdPrecedes(id1, id2) \
+ (AssertMacro(TransactionIdIsNormal(id1) && TransactionIdIsNormal(id2)), \
+ (int32) ((id1) - (id2)) < 0)
+
+/* compare two XIDs already known to be normal; this is a macro for speed */
+#define NormalTransactionIdFollows(id1, id2) \
+ (AssertMacro(TransactionIdIsNormal(id1) && TransactionIdIsNormal(id2)), \
+ (int32) ((id1) - (id2)) > 0)
+
+/* ----------
+ * Object ID (OID) zero is InvalidOid.
+ *
+ * OIDs 1-9999 are reserved for manual assignment (see .dat files in
+ * src/include/catalog/). Of these, 8000-9999 are reserved for
+ * development purposes (such as in-progress patches and forks);
+ * they should not appear in released versions.
+ *
+ * OIDs 10000-11999 are reserved for assignment by genbki.pl, for use
+ * when the .dat files in src/include/catalog/ do not specify an OID
+ * for a catalog entry that requires one. Note that genbki.pl assigns
+ * these OIDs independently in each catalog, so they're not guaranteed
+ * to be globally unique. Furthermore, the bootstrap backend and
+ * initdb's post-bootstrap processing can also assign OIDs in this range.
+ * The normal OID-generation logic takes care of any OID conflicts that
+ * might arise from that.
+ *
+ * OIDs 12000-16383 are reserved for unpinned objects created by initdb's
+ * post-bootstrap processing. initdb forces the OID generator up to
+ * 12000 as soon as it's made the pinned objects it's responsible for.
+ *
+ * OIDs beginning at 16384 are assigned from the OID generator
+ * during normal multiuser operation. (We force the generator up to
+ * 16384 as soon as we are in normal operation.)
+ *
+ * The choices of 8000, 10000 and 12000 are completely arbitrary, and can be
+ * moved if we run low on OIDs in any category. Changing the macros below,
+ * and updating relevant documentation (see bki.sgml and RELEASE_CHANGES),
+ * should be sufficient to do this. Moving the 16384 boundary between
+ * initdb-assigned OIDs and user-defined objects would be substantially
+ * more painful, however, since some user-defined OIDs will appear in
+ * on-disk data; such a change would probably break pg_upgrade.
+ *
+ * NOTE: if the OID generator wraps around, we skip over OIDs 0-16383
+ * and resume with 16384. This minimizes the odds of OID conflict, by not
+ * reassigning OIDs that might have been assigned during initdb. Critically,
+ * it also ensures that no user-created object will be considered pinned.
+ * ----------
+ */
+#define FirstGenbkiObjectId 10000
+#define FirstUnpinnedObjectId 12000
+#define FirstNormalObjectId 16384
+
+/*
+ * VariableCache is a data structure in shared memory that is used to track
+ * OID and XID assignment state. For largely historical reasons, there is
+ * just one struct with different fields that are protected by different
+ * LWLocks.
+ *
+ * Note: xidWrapLimit and oldestXidDB are not "active" values, but are
+ * used just to generate useful messages when xidWarnLimit or xidStopLimit
+ * are exceeded.
+ */
+typedef struct VariableCacheData
+{
+ /*
+ * These fields are protected by OidGenLock.
+ */
+ Oid nextOid; /* next OID to assign */
+ uint32 oidCount; /* OIDs available before must do XLOG work */
+
+ /*
+ * These fields are protected by XidGenLock.
+ */
+ FullTransactionId nextXid; /* next XID to assign */
+
+ TransactionId oldestXid; /* cluster-wide minimum datfrozenxid */
+ TransactionId xidVacLimit; /* start forcing autovacuums here */
+ TransactionId xidWarnLimit; /* start complaining here */
+ TransactionId xidStopLimit; /* refuse to advance nextXid beyond here */
+ TransactionId xidWrapLimit; /* where the world ends */
+ Oid oldestXidDB; /* database with minimum datfrozenxid */
+
+ /*
+ * These fields are protected by CommitTsLock
+ */
+ TransactionId oldestCommitTsXid;
+ TransactionId newestCommitTsXid;
+
+ /*
+ * These fields are protected by ProcArrayLock.
+ */
+ FullTransactionId latestCompletedXid; /* newest full XID that has
+ * committed or aborted */
+
+ /*
+ * Number of top-level transactions with xids (i.e. which may have
+ * modified the database) that completed in some form since the start of
+ * the server. This currently is solely used to check whether
+ * GetSnapshotData() needs to recompute the contents of the snapshot, or
+ * not. There are likely other users of this. Always above 1.
+ */
+ uint64 xactCompletionCount;
+
+ /*
+ * These fields are protected by XactTruncationLock
+ */
+ TransactionId oldestClogXid; /* oldest it's safe to look up in clog */
+
+} VariableCacheData;
+
+typedef VariableCacheData *VariableCache;
+
+
+/* ----------------
+ * extern declarations
+ * ----------------
+ */
+
+/* in transam/xact.c */
+extern bool TransactionStartedDuringRecovery(void);
+
+/* in transam/varsup.c */
+extern PGDLLIMPORT VariableCache ShmemVariableCache;
+
+/*
+ * prototypes for functions in transam/transam.c
+ */
+extern bool TransactionIdDidCommit(TransactionId transactionId);
+extern bool TransactionIdDidAbort(TransactionId transactionId);
+extern void TransactionIdCommitTree(TransactionId xid, int nxids, TransactionId *xids);
+extern void TransactionIdAsyncCommitTree(TransactionId xid, int nxids, TransactionId *xids, XLogRecPtr lsn);
+extern void TransactionIdAbortTree(TransactionId xid, int nxids, TransactionId *xids);
+extern bool TransactionIdPrecedes(TransactionId id1, TransactionId id2);
+extern bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2);
+extern bool TransactionIdFollows(TransactionId id1, TransactionId id2);
+extern bool TransactionIdFollowsOrEquals(TransactionId id1, TransactionId id2);
+extern TransactionId TransactionIdLatest(TransactionId mainxid,
+ int nxids, const TransactionId *xids);
+extern XLogRecPtr TransactionIdGetCommitLSN(TransactionId xid);
+
+/* in transam/varsup.c */
+extern FullTransactionId GetNewTransactionId(bool isSubXact);
+extern void AdvanceNextFullTransactionIdPastXid(TransactionId xid);
+extern FullTransactionId ReadNextFullTransactionId(void);
+extern void SetTransactionIdLimit(TransactionId oldest_datfrozenxid,
+ Oid oldest_datoid);
+extern void AdvanceOldestClogXid(TransactionId oldest_datfrozenxid);
+extern bool ForceTransactionIdLimitUpdate(void);
+extern Oid GetNewObjectId(void);
+extern void StopGeneratingPinnedObjectIds(void);
+
+#ifdef USE_ASSERT_CHECKING
+extern void AssertTransactionIdInAllowableRange(TransactionId xid);
+#else
+#define AssertTransactionIdInAllowableRange(xid) ((void)true)
+#endif
+
+/*
+ * Some frontend programs include this header. For compilers that emit static
+ * inline functions even when they're unused, that leads to unsatisfied
+ * external references; hence hide them with #ifndef FRONTEND.
+ */
+#ifndef FRONTEND
+
+/*
+ * For callers that just need the XID part of the next transaction ID.
+ */
+static inline TransactionId
+ReadNextTransactionId(void)
+{
+ return XidFromFullTransactionId(ReadNextFullTransactionId());
+}
+
+/* return transaction ID backed up by amount, handling wraparound correctly */
+static inline TransactionId
+TransactionIdRetreatedBy(TransactionId xid, uint32 amount)
+{
+ xid -= amount;
+
+ while (xid < FirstNormalTransactionId)
+ xid--;
+
+ return xid;
+}
+
+/* return the older of the two IDs */
+static inline TransactionId
+TransactionIdOlder(TransactionId a, TransactionId b)
+{
+ if (!TransactionIdIsValid(a))
+ return b;
+
+ if (!TransactionIdIsValid(b))
+ return a;
+
+ if (TransactionIdPrecedes(a, b))
+ return a;
+ return b;
+}
+
+/* return the older of the two IDs, assuming they're both normal */
+static inline TransactionId
+NormalTransactionIdOlder(TransactionId a, TransactionId b)
+{
+ Assert(TransactionIdIsNormal(a));
+ Assert(TransactionIdIsNormal(b));
+ if (NormalTransactionIdPrecedes(a, b))
+ return a;
+ return b;
+}
+
+/* return the newer of the two IDs */
+static inline FullTransactionId
+FullTransactionIdNewer(FullTransactionId a, FullTransactionId b)
+{
+ if (!FullTransactionIdIsValid(a))
+ return b;
+
+ if (!FullTransactionIdIsValid(b))
+ return a;
+
+ if (FullTransactionIdFollows(a, b))
+ return a;
+ return b;
+}
+
+#endif /* FRONTEND */
+
+#endif /* TRANSAM_H */
diff --git a/contrib/libs/libpq/src/include/access/xlog_internal.h b/contrib/libs/libpq/src/include/access/xlog_internal.h
new file mode 100644
index 0000000000..b0fd338a00
--- /dev/null
+++ b/contrib/libs/libpq/src/include/access/xlog_internal.h
@@ -0,0 +1,404 @@
+/*
+ * xlog_internal.h
+ *
+ * PostgreSQL write-ahead log internal declarations
+ *
+ * NOTE: this file is intended to contain declarations useful for
+ * manipulating the XLOG files directly, but it is not supposed to be
+ * needed by rmgr routines (redo support for individual record types).
+ * So the XLogRecord typedef and associated stuff appear in xlogrecord.h.
+ *
+ * Note: This file must be includable in both frontend and backend contexts,
+ * to allow stand-alone tools like pg_receivewal to deal with WAL files.
+ *
+ * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/access/xlog_internal.h
+ */
+#ifndef XLOG_INTERNAL_H
+#define XLOG_INTERNAL_H
+
+#include "access/xlogdefs.h"
+#include "access/xlogreader.h"
+#include "datatype/timestamp.h"
+#include "lib/stringinfo.h"
+#include "pgtime.h"
+#include "storage/block.h"
+#include "storage/relfilelocator.h"
+
+
+/*
+ * Each page of XLOG file has a header like this:
+ */
+#define XLOG_PAGE_MAGIC 0xD113 /* can be used as WAL version indicator */
+
+typedef struct XLogPageHeaderData
+{
+ uint16 xlp_magic; /* magic value for correctness checks */
+ uint16 xlp_info; /* flag bits, see below */
+ TimeLineID xlp_tli; /* TimeLineID of first record on page */
+ XLogRecPtr xlp_pageaddr; /* XLOG address of this page */
+
+ /*
+ * When there is not enough space on current page for whole record, we
+ * continue on the next page. xlp_rem_len is the number of bytes
+ * remaining from a previous page; it tracks xl_tot_len in the initial
+ * header. Note that the continuation data isn't necessarily aligned.
+ */
+ uint32 xlp_rem_len; /* total len of remaining data for record */
+} XLogPageHeaderData;
+
+#define SizeOfXLogShortPHD MAXALIGN(sizeof(XLogPageHeaderData))
+
+typedef XLogPageHeaderData *XLogPageHeader;
+
+/*
+ * When the XLP_LONG_HEADER flag is set, we store additional fields in the
+ * page header. (This is ordinarily done just in the first page of an
+ * XLOG file.) The additional fields serve to identify the file accurately.
+ */
+typedef struct XLogLongPageHeaderData
+{
+ XLogPageHeaderData std; /* standard header fields */
+ uint64 xlp_sysid; /* system identifier from pg_control */
+ uint32 xlp_seg_size; /* just as a cross-check */
+ uint32 xlp_xlog_blcksz; /* just as a cross-check */
+} XLogLongPageHeaderData;
+
+#define SizeOfXLogLongPHD MAXALIGN(sizeof(XLogLongPageHeaderData))
+
+typedef XLogLongPageHeaderData *XLogLongPageHeader;
+
+/* When record crosses page boundary, set this flag in new page's header */
+#define XLP_FIRST_IS_CONTRECORD 0x0001
+/* This flag indicates a "long" page header */
+#define XLP_LONG_HEADER 0x0002
+/* This flag indicates backup blocks starting in this page are optional */
+#define XLP_BKP_REMOVABLE 0x0004
+/* Replaces a missing contrecord; see CreateOverwriteContrecordRecord */
+#define XLP_FIRST_IS_OVERWRITE_CONTRECORD 0x0008
+/* All defined flag bits in xlp_info (used for validity checking of header) */
+#define XLP_ALL_FLAGS 0x000F
+
+#define XLogPageHeaderSize(hdr) \
+ (((hdr)->xlp_info & XLP_LONG_HEADER) ? SizeOfXLogLongPHD : SizeOfXLogShortPHD)
+
+/* wal_segment_size can range from 1MB to 1GB */
+#define WalSegMinSize 1024 * 1024
+#define WalSegMaxSize 1024 * 1024 * 1024
+/* default number of min and max wal segments */
+#define DEFAULT_MIN_WAL_SEGS 5
+#define DEFAULT_MAX_WAL_SEGS 64
+
+/* check that the given size is a valid wal_segment_size */
+#define IsPowerOf2(x) (x > 0 && ((x) & ((x)-1)) == 0)
+#define IsValidWalSegSize(size) \
+ (IsPowerOf2(size) && \
+ ((size) >= WalSegMinSize && (size) <= WalSegMaxSize))
+
+#define XLogSegmentsPerXLogId(wal_segsz_bytes) \
+ (UINT64CONST(0x100000000) / (wal_segsz_bytes))
+
+#define XLogSegNoOffsetToRecPtr(segno, offset, wal_segsz_bytes, dest) \
+ (dest) = (segno) * (wal_segsz_bytes) + (offset)
+
+#define XLogSegmentOffset(xlogptr, wal_segsz_bytes) \
+ ((xlogptr) & ((wal_segsz_bytes) - 1))
+
+/*
+ * Compute a segment number from an XLogRecPtr.
+ *
+ * For XLByteToSeg, do the computation at face value. For XLByteToPrevSeg,
+ * a boundary byte is taken to be in the previous segment. This is suitable
+ * for deciding which segment to write given a pointer to a record end,
+ * for example.
+ */
+#define XLByteToSeg(xlrp, logSegNo, wal_segsz_bytes) \
+ logSegNo = (xlrp) / (wal_segsz_bytes)
+
+#define XLByteToPrevSeg(xlrp, logSegNo, wal_segsz_bytes) \
+ logSegNo = ((xlrp) - 1) / (wal_segsz_bytes)
+
+/*
+ * Convert values of GUCs measured in megabytes to equiv. segment count.
+ * Rounds down.
+ */
+#define XLogMBVarToSegs(mbvar, wal_segsz_bytes) \
+ ((mbvar) / ((wal_segsz_bytes) / (1024 * 1024)))
+
+/*
+ * Is an XLogRecPtr within a particular XLOG segment?
+ *
+ * For XLByteInSeg, do the computation at face value. For XLByteInPrevSeg,
+ * a boundary byte is taken to be in the previous segment.
+ */
+#define XLByteInSeg(xlrp, logSegNo, wal_segsz_bytes) \
+ (((xlrp) / (wal_segsz_bytes)) == (logSegNo))
+
+#define XLByteInPrevSeg(xlrp, logSegNo, wal_segsz_bytes) \
+ ((((xlrp) - 1) / (wal_segsz_bytes)) == (logSegNo))
+
+/* Check if an XLogRecPtr value is in a plausible range */
+#define XRecOffIsValid(xlrp) \
+ ((xlrp) % XLOG_BLCKSZ >= SizeOfXLogShortPHD)
+
+/*
+ * The XLog directory and control file (relative to $PGDATA)
+ */
+#define XLOGDIR "pg_wal"
+#define XLOG_CONTROL_FILE "global/pg_control"
+
+/*
+ * These macros encapsulate knowledge about the exact layout of XLog file
+ * names, timeline history file names, and archive-status file names.
+ */
+#define MAXFNAMELEN 64
+
+/* Length of XLog file name */
+#define XLOG_FNAME_LEN 24
+
+/*
+ * Generate a WAL segment file name. Do not use this function in a helper
+ * function allocating the result generated.
+ */
+static inline void
+XLogFileName(char *fname, TimeLineID tli, XLogSegNo logSegNo, int wal_segsz_bytes)
+{
+ snprintf(fname, MAXFNAMELEN, "%08X%08X%08X", tli,
+ (uint32) (logSegNo / XLogSegmentsPerXLogId(wal_segsz_bytes)),
+ (uint32) (logSegNo % XLogSegmentsPerXLogId(wal_segsz_bytes)));
+}
+
+static inline void
+XLogFileNameById(char *fname, TimeLineID tli, uint32 log, uint32 seg)
+{
+ snprintf(fname, MAXFNAMELEN, "%08X%08X%08X", tli, log, seg);
+}
+
+static inline bool
+IsXLogFileName(const char *fname)
+{
+ return (strlen(fname) == XLOG_FNAME_LEN && \
+ strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN);
+}
+
+/*
+ * XLOG segment with .partial suffix. Used by pg_receivewal and at end of
+ * archive recovery, when we want to archive a WAL segment but it might not
+ * be complete yet.
+ */
+static inline bool
+IsPartialXLogFileName(const char *fname)
+{
+ return (strlen(fname) == XLOG_FNAME_LEN + strlen(".partial") &&
+ strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN &&
+ strcmp(fname + XLOG_FNAME_LEN, ".partial") == 0);
+}
+
+static inline void
+XLogFromFileName(const char *fname, TimeLineID *tli, XLogSegNo *logSegNo, int wal_segsz_bytes)
+{
+ uint32 log;
+ uint32 seg;
+
+ sscanf(fname, "%08X%08X%08X", tli, &log, &seg);
+ *logSegNo = (uint64) log * XLogSegmentsPerXLogId(wal_segsz_bytes) + seg;
+}
+
+static inline void
+XLogFilePath(char *path, TimeLineID tli, XLogSegNo logSegNo, int wal_segsz_bytes)
+{
+ snprintf(path, MAXPGPATH, XLOGDIR "/%08X%08X%08X", tli,
+ (uint32) (logSegNo / XLogSegmentsPerXLogId(wal_segsz_bytes)),
+ (uint32) (logSegNo % XLogSegmentsPerXLogId(wal_segsz_bytes)));
+}
+
+static inline void
+TLHistoryFileName(char *fname, TimeLineID tli)
+{
+ snprintf(fname, MAXFNAMELEN, "%08X.history", tli);
+}
+
+static inline bool
+IsTLHistoryFileName(const char *fname)
+{
+ return (strlen(fname) == 8 + strlen(".history") &&
+ strspn(fname, "0123456789ABCDEF") == 8 &&
+ strcmp(fname + 8, ".history") == 0);
+}
+
+static inline void
+TLHistoryFilePath(char *path, TimeLineID tli)
+{
+ snprintf(path, MAXPGPATH, XLOGDIR "/%08X.history", tli);
+}
+
+static inline void
+StatusFilePath(char *path, const char *xlog, const char *suffix)
+{
+ snprintf(path, MAXPGPATH, XLOGDIR "/archive_status/%s%s", xlog, suffix);
+}
+
+static inline void
+BackupHistoryFileName(char *fname, TimeLineID tli, XLogSegNo logSegNo, XLogRecPtr startpoint, int wal_segsz_bytes)
+{
+ snprintf(fname, MAXFNAMELEN, "%08X%08X%08X.%08X.backup", tli,
+ (uint32) (logSegNo / XLogSegmentsPerXLogId(wal_segsz_bytes)),
+ (uint32) (logSegNo % XLogSegmentsPerXLogId(wal_segsz_bytes)),
+ (uint32) (XLogSegmentOffset(startpoint, wal_segsz_bytes)));
+}
+
+static inline bool
+IsBackupHistoryFileName(const char *fname)
+{
+ return (strlen(fname) > XLOG_FNAME_LEN &&
+ strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN &&
+ strcmp(fname + strlen(fname) - strlen(".backup"), ".backup") == 0);
+}
+
+static inline void
+BackupHistoryFilePath(char *path, TimeLineID tli, XLogSegNo logSegNo, XLogRecPtr startpoint, int wal_segsz_bytes)
+{
+ snprintf(path, MAXPGPATH, XLOGDIR "/%08X%08X%08X.%08X.backup", tli,
+ (uint32) (logSegNo / XLogSegmentsPerXLogId(wal_segsz_bytes)),
+ (uint32) (logSegNo % XLogSegmentsPerXLogId(wal_segsz_bytes)),
+ (uint32) (XLogSegmentOffset((startpoint), wal_segsz_bytes)));
+}
+
+/*
+ * Information logged when we detect a change in one of the parameters
+ * important for Hot Standby.
+ */
+typedef struct xl_parameter_change
+{
+ int MaxConnections;
+ int max_worker_processes;
+ int max_wal_senders;
+ int max_prepared_xacts;
+ int max_locks_per_xact;
+ int wal_level;
+ bool wal_log_hints;
+ bool track_commit_timestamp;
+} xl_parameter_change;
+
+/* logs restore point */
+typedef struct xl_restore_point
+{
+ TimestampTz rp_time;
+ char rp_name[MAXFNAMELEN];
+} xl_restore_point;
+
+/* Overwrite of prior contrecord */
+typedef struct xl_overwrite_contrecord
+{
+ XLogRecPtr overwritten_lsn;
+ TimestampTz overwrite_time;
+} xl_overwrite_contrecord;
+
+/* End of recovery mark, when we don't do an END_OF_RECOVERY checkpoint */
+typedef struct xl_end_of_recovery
+{
+ TimestampTz end_time;
+ TimeLineID ThisTimeLineID; /* new TLI */
+ TimeLineID PrevTimeLineID; /* previous TLI we forked off from */
+} xl_end_of_recovery;
+
+/*
+ * The functions in xloginsert.c construct a chain of XLogRecData structs
+ * to represent the final WAL record.
+ */
+typedef struct XLogRecData
+{
+ struct XLogRecData *next; /* next struct in chain, or NULL */
+ char *data; /* start of rmgr data to include */
+ uint32 len; /* length of rmgr data to include */
+} XLogRecData;
+
+/*
+ * Recovery target action.
+ */
+typedef enum
+{
+ RECOVERY_TARGET_ACTION_PAUSE,
+ RECOVERY_TARGET_ACTION_PROMOTE,
+ RECOVERY_TARGET_ACTION_SHUTDOWN
+} RecoveryTargetAction;
+
+struct LogicalDecodingContext;
+struct XLogRecordBuffer;
+
+/*
+ * Method table for resource managers.
+ *
+ * This struct must be kept in sync with the PG_RMGR definition in
+ * rmgr.c.
+ *
+ * rm_identify must return a name for the record based on xl_info (without
+ * reference to the rmid). For example, XLOG_BTREE_VACUUM would be named
+ * "VACUUM". rm_desc can then be called to obtain additional detail for the
+ * record, if available (e.g. the last block).
+ *
+ * rm_mask takes as input a page modified by the resource manager and masks
+ * out bits that shouldn't be flagged by wal_consistency_checking.
+ *
+ * RmgrTable[] is indexed by RmgrId values (see rmgrlist.h). If rm_name is
+ * NULL, the corresponding RmgrTable entry is considered invalid.
+ */
+typedef struct RmgrData
+{
+ const char *rm_name;
+ void (*rm_redo) (XLogReaderState *record);
+ void (*rm_desc) (StringInfo buf, XLogReaderState *record);
+ const char *(*rm_identify) (uint8 info);
+ void (*rm_startup) (void);
+ void (*rm_cleanup) (void);
+ void (*rm_mask) (char *pagedata, BlockNumber blkno);
+ void (*rm_decode) (struct LogicalDecodingContext *ctx,
+ struct XLogRecordBuffer *buf);
+} RmgrData;
+
+extern PGDLLIMPORT RmgrData RmgrTable[];
+extern void RmgrStartup(void);
+extern void RmgrCleanup(void);
+extern void RmgrNotFound(RmgrId rmid);
+extern void RegisterCustomRmgr(RmgrId rmid, const RmgrData *rmgr);
+
+#ifndef FRONTEND
+static inline bool
+RmgrIdExists(RmgrId rmid)
+{
+ return RmgrTable[rmid].rm_name != NULL;
+}
+
+static inline RmgrData
+GetRmgr(RmgrId rmid)
+{
+ if (unlikely(!RmgrIdExists(rmid)))
+ RmgrNotFound(rmid);
+ return RmgrTable[rmid];
+}
+#endif
+
+/*
+ * Exported to support xlog switching from checkpointer
+ */
+extern pg_time_t GetLastSegSwitchData(XLogRecPtr *lastSwitchLSN);
+extern XLogRecPtr RequestXLogSwitch(bool mark_unimportant);
+
+extern void GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli);
+
+extern void XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty,
+ bool detailed_format, StringInfo buf,
+ uint32 *fpi_len);
+
+/*
+ * Exported for the functions in timeline.c and xlogarchive.c. Only valid
+ * in the startup process.
+ */
+extern PGDLLIMPORT bool ArchiveRecoveryRequested;
+extern PGDLLIMPORT bool InArchiveRecovery;
+extern PGDLLIMPORT bool StandbyMode;
+extern PGDLLIMPORT char *recoveryRestoreCommand;
+
+#endif /* XLOG_INTERNAL_H */
diff --git a/contrib/libs/libpq/src/include/access/xlogdefs.h b/contrib/libs/libpq/src/include/access/xlogdefs.h
new file mode 100644
index 0000000000..fe794c7740
--- /dev/null
+++ b/contrib/libs/libpq/src/include/access/xlogdefs.h
@@ -0,0 +1,82 @@
+/*
+ * xlogdefs.h
+ *
+ * Postgres write-ahead log manager record pointer and
+ * timeline number definitions
+ *
+ * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/access/xlogdefs.h
+ */
+#ifndef XLOG_DEFS_H
+#define XLOG_DEFS_H
+
+#include <fcntl.h> /* need open() flags */
+
+/*
+ * Pointer to a location in the XLOG. These pointers are 64 bits wide,
+ * because we don't want them ever to overflow.
+ */
+typedef uint64 XLogRecPtr;
+
+/*
+ * Zero is used indicate an invalid pointer. Bootstrap skips the first possible
+ * WAL segment, initializing the first WAL page at WAL segment size, so no XLOG
+ * record can begin at zero.
+ */
+#define InvalidXLogRecPtr 0
+#define XLogRecPtrIsInvalid(r) ((r) == InvalidXLogRecPtr)
+
+/*
+ * First LSN to use for "fake" LSNs.
+ *
+ * Values smaller than this can be used for special per-AM purposes.
+ */
+#define FirstNormalUnloggedLSN ((XLogRecPtr) 1000)
+
+/*
+ * Handy macro for printing XLogRecPtr in conventional format, e.g.,
+ *
+ * printf("%X/%X", LSN_FORMAT_ARGS(lsn));
+ */
+#define LSN_FORMAT_ARGS(lsn) (AssertVariableIsOfTypeMacro((lsn), XLogRecPtr), (uint32) ((lsn) >> 32)), ((uint32) (lsn))
+
+/*
+ * XLogSegNo - physical log file sequence number.
+ */
+typedef uint64 XLogSegNo;
+
+/*
+ * TimeLineID (TLI) - identifies different database histories to prevent
+ * confusion after restoring a prior state of a database installation.
+ * TLI does not change in a normal stop/restart of the database (including
+ * crash-and-recover cases); but we must assign a new TLI after doing
+ * a recovery to a prior state, a/k/a point-in-time recovery. This makes
+ * the new WAL logfile sequence we generate distinguishable from the
+ * sequence that was generated in the previous incarnation.
+ */
+typedef uint32 TimeLineID;
+
+/*
+ * Replication origin id - this is located in this file to avoid having to
+ * include origin.h in a bunch of xlog related places.
+ */
+typedef uint16 RepOriginId;
+
+/*
+ * This chunk of hackery attempts to determine which file sync methods
+ * are available on the current platform, and to choose an appropriate
+ * default method.
+ *
+ * Note that we define our own O_DSYNC on Windows, but not O_SYNC.
+ */
+#if defined(PLATFORM_DEFAULT_SYNC_METHOD)
+#define DEFAULT_SYNC_METHOD PLATFORM_DEFAULT_SYNC_METHOD
+#elif defined(O_DSYNC) && (!defined(O_SYNC) || O_DSYNC != O_SYNC)
+#define DEFAULT_SYNC_METHOD SYNC_METHOD_OPEN_DSYNC
+#else
+#define DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
+#endif
+
+#endif /* XLOG_DEFS_H */
diff --git a/contrib/libs/libpq/src/include/access/xlogreader.h b/contrib/libs/libpq/src/include/access/xlogreader.h
new file mode 100644
index 0000000000..da32c7db77
--- /dev/null
+++ b/contrib/libs/libpq/src/include/access/xlogreader.h
@@ -0,0 +1,444 @@
+/*-------------------------------------------------------------------------
+ *
+ * xlogreader.h
+ * Definitions for the generic XLog reading facility
+ *
+ * Portions Copyright (c) 2013-2023, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/include/access/xlogreader.h
+ *
+ * NOTES
+ * See the definition of the XLogReaderState struct for instructions on
+ * how to use the XLogReader infrastructure.
+ *
+ * The basic idea is to allocate an XLogReaderState via
+ * XLogReaderAllocate(), position the reader to the first record with
+ * XLogBeginRead() or XLogFindNextRecord(), and call XLogReadRecord()
+ * until it returns NULL.
+ *
+ * Callers supply a page_read callback if they want to call
+ * XLogReadRecord or XLogFindNextRecord; it can be passed in as NULL
+ * otherwise. The WALRead function can be used as a helper to write
+ * page_read callbacks, but it is not mandatory; callers that use it,
+ * must supply segment_open callbacks. The segment_close callback
+ * must always be supplied.
+ *
+ * After reading a record with XLogReadRecord(), it's decomposed into
+ * the per-block and main data parts, and the parts can be accessed
+ * with the XLogRec* macros and functions. You can also decode a
+ * record that's already constructed in memory, without reading from
+ * disk, by calling the DecodeXLogRecord() function.
+ *-------------------------------------------------------------------------
+ */
+#ifndef XLOGREADER_H
+#define XLOGREADER_H
+
+#ifndef FRONTEND
+#include "access/transam.h"
+#endif
+
+#include "access/xlogrecord.h"
+#include "storage/buf.h"
+
+/* WALOpenSegment represents a WAL segment being read. */
+typedef struct WALOpenSegment
+{
+ int ws_file; /* segment file descriptor */
+ XLogSegNo ws_segno; /* segment number */
+ TimeLineID ws_tli; /* timeline ID of the currently open file */
+} WALOpenSegment;
+
+/* WALSegmentContext carries context information about WAL segments to read */
+typedef struct WALSegmentContext
+{
+ char ws_dir[MAXPGPATH];
+ int ws_segsize;
+} WALSegmentContext;
+
+typedef struct XLogReaderState XLogReaderState;
+
+/* Function type definitions for various xlogreader interactions */
+typedef int (*XLogPageReadCB) (XLogReaderState *xlogreader,
+ XLogRecPtr targetPagePtr,
+ int reqLen,
+ XLogRecPtr targetRecPtr,
+ char *readBuf);
+typedef void (*WALSegmentOpenCB) (XLogReaderState *xlogreader,
+ XLogSegNo nextSegNo,
+ TimeLineID *tli_p);
+typedef void (*WALSegmentCloseCB) (XLogReaderState *xlogreader);
+
+typedef struct XLogReaderRoutine
+{
+ /*
+ * Data input callback
+ *
+ * This callback shall read at least reqLen valid bytes of the xlog page
+ * starting at targetPagePtr, and store them in readBuf. The callback
+ * shall return the number of bytes read (never more than XLOG_BLCKSZ), or
+ * -1 on failure. The callback shall sleep, if necessary, to wait for the
+ * requested bytes to become available. The callback will not be invoked
+ * again for the same page unless more than the returned number of bytes
+ * are needed.
+ *
+ * targetRecPtr is the position of the WAL record we're reading. Usually
+ * it is equal to targetPagePtr + reqLen, but sometimes xlogreader needs
+ * to read and verify the page or segment header, before it reads the
+ * actual WAL record it's interested in. In that case, targetRecPtr can
+ * be used to determine which timeline to read the page from.
+ *
+ * The callback shall set ->seg.ws_tli to the TLI of the file the page was
+ * read from.
+ */
+ XLogPageReadCB page_read;
+
+ /*
+ * Callback to open the specified WAL segment for reading. ->seg.ws_file
+ * shall be set to the file descriptor of the opened segment. In case of
+ * failure, an error shall be raised by the callback and it shall not
+ * return.
+ *
+ * "nextSegNo" is the number of the segment to be opened.
+ *
+ * "tli_p" is an input/output argument. WALRead() uses it to pass the
+ * timeline in which the new segment should be found, but the callback can
+ * use it to return the TLI that it actually opened.
+ */
+ WALSegmentOpenCB segment_open;
+
+ /*
+ * WAL segment close callback. ->seg.ws_file shall be set to a negative
+ * number.
+ */
+ WALSegmentCloseCB segment_close;
+} XLogReaderRoutine;
+
+#define XL_ROUTINE(...) &(XLogReaderRoutine){__VA_ARGS__}
+
+typedef struct
+{
+ /* Is this block ref in use? */
+ bool in_use;
+
+ /* Identify the block this refers to */
+ RelFileLocator rlocator;
+ ForkNumber forknum;
+ BlockNumber blkno;
+
+ /* Prefetching workspace. */
+ Buffer prefetch_buffer;
+
+ /* copy of the fork_flags field from the XLogRecordBlockHeader */
+ uint8 flags;
+
+ /* Information on full-page image, if any */
+ bool has_image; /* has image, even for consistency checking */
+ bool apply_image; /* has image that should be restored */
+ char *bkp_image;
+ uint16 hole_offset;
+ uint16 hole_length;
+ uint16 bimg_len;
+ uint8 bimg_info;
+
+ /* Buffer holding the rmgr-specific data associated with this block */
+ bool has_data;
+ char *data;
+ uint16 data_len;
+ uint16 data_bufsz;
+} DecodedBkpBlock;
+
+/*
+ * The decoded contents of a record. This occupies a contiguous region of
+ * memory, with main_data and blocks[n].data pointing to memory after the
+ * members declared here.
+ */
+typedef struct DecodedXLogRecord
+{
+ /* Private member used for resource management. */
+ size_t size; /* total size of decoded record */
+ bool oversized; /* outside the regular decode buffer? */
+ struct DecodedXLogRecord *next; /* decoded record queue link */
+
+ /* Public members. */
+ XLogRecPtr lsn; /* location */
+ XLogRecPtr next_lsn; /* location of next record */
+ XLogRecord header; /* header */
+ RepOriginId record_origin;
+ TransactionId toplevel_xid; /* XID of top-level transaction */
+ char *main_data; /* record's main data portion */
+ uint32 main_data_len; /* main data portion's length */
+ int max_block_id; /* highest block_id in use (-1 if none) */
+ DecodedBkpBlock blocks[FLEXIBLE_ARRAY_MEMBER];
+} DecodedXLogRecord;
+
+struct XLogReaderState
+{
+ /*
+ * Operational callbacks
+ */
+ XLogReaderRoutine routine;
+
+ /* ----------------------------------------
+ * Public parameters
+ * ----------------------------------------
+ */
+
+ /*
+ * System identifier of the xlog files we're about to read. Set to zero
+ * (the default value) if unknown or unimportant.
+ */
+ uint64 system_identifier;
+
+ /*
+ * Opaque data for callbacks to use. Not used by XLogReader.
+ */
+ void *private_data;
+
+ /*
+ * Start and end point of last record read. EndRecPtr is also used as the
+ * position to read next. Calling XLogBeginRead() sets EndRecPtr to the
+ * starting position and ReadRecPtr to invalid.
+ *
+ * Start and end point of last record returned by XLogReadRecord(). These
+ * are also available as record->lsn and record->next_lsn.
+ */
+ XLogRecPtr ReadRecPtr; /* start of last record read */
+ XLogRecPtr EndRecPtr; /* end+1 of last record read */
+
+ /*
+ * Set at the end of recovery: the start point of a partial record at the
+ * end of WAL (InvalidXLogRecPtr if there wasn't one), and the start
+ * location of its first contrecord that went missing.
+ */
+ XLogRecPtr abortedRecPtr;
+ XLogRecPtr missingContrecPtr;
+ /* Set when XLP_FIRST_IS_OVERWRITE_CONTRECORD is found */
+ XLogRecPtr overwrittenRecPtr;
+
+
+ /* ----------------------------------------
+ * Decoded representation of current record
+ *
+ * Use XLogRecGet* functions to investigate the record; these fields
+ * should not be accessed directly.
+ * ----------------------------------------
+ * Start and end point of the last record read and decoded by
+ * XLogReadRecordInternal(). NextRecPtr is also used as the position to
+ * decode next. Calling XLogBeginRead() sets NextRecPtr and EndRecPtr to
+ * the requested starting position.
+ */
+ XLogRecPtr DecodeRecPtr; /* start of last record decoded */
+ XLogRecPtr NextRecPtr; /* end+1 of last record decoded */
+ XLogRecPtr PrevRecPtr; /* start of previous record decoded */
+
+ /* Last record returned by XLogReadRecord(). */
+ DecodedXLogRecord *record;
+
+ /* ----------------------------------------
+ * private/internal state
+ * ----------------------------------------
+ */
+
+ /*
+ * Buffer for decoded records. This is a circular buffer, though
+ * individual records can't be split in the middle, so some space is often
+ * wasted at the end. Oversized records that don't fit in this space are
+ * allocated separately.
+ */
+ char *decode_buffer;
+ size_t decode_buffer_size;
+ bool free_decode_buffer; /* need to free? */
+ char *decode_buffer_head; /* data is read from the head */
+ char *decode_buffer_tail; /* new data is written at the tail */
+
+ /*
+ * Queue of records that have been decoded. This is a linked list that
+ * usually consists of consecutive records in decode_buffer, but may also
+ * contain oversized records allocated with palloc().
+ */
+ DecodedXLogRecord *decode_queue_head; /* oldest decoded record */
+ DecodedXLogRecord *decode_queue_tail; /* newest decoded record */
+
+ /*
+ * Buffer for currently read page (XLOG_BLCKSZ bytes, valid up to at least
+ * readLen bytes)
+ */
+ char *readBuf;
+ uint32 readLen;
+
+ /* last read XLOG position for data currently in readBuf */
+ WALSegmentContext segcxt;
+ WALOpenSegment seg;
+ uint32 segoff;
+
+ /*
+ * beginning of prior page read, and its TLI. Doesn't necessarily
+ * correspond to what's in readBuf; used for timeline sanity checks.
+ */
+ XLogRecPtr latestPagePtr;
+ TimeLineID latestPageTLI;
+
+ /* beginning of the WAL record being read. */
+ XLogRecPtr currRecPtr;
+ /* timeline to read it from, 0 if a lookup is required */
+ TimeLineID currTLI;
+
+ /*
+ * Safe point to read to in currTLI if current TLI is historical
+ * (tliSwitchPoint) or InvalidXLogRecPtr if on current timeline.
+ *
+ * Actually set to the start of the segment containing the timeline switch
+ * that ends currTLI's validity, not the LSN of the switch its self, since
+ * we can't assume the old segment will be present.
+ */
+ XLogRecPtr currTLIValidUntil;
+
+ /*
+ * If currTLI is not the most recent known timeline, the next timeline to
+ * read from when currTLIValidUntil is reached.
+ */
+ TimeLineID nextTLI;
+
+ /*
+ * Buffer for current ReadRecord result (expandable), used when a record
+ * crosses a page boundary.
+ */
+ char *readRecordBuf;
+ uint32 readRecordBufSize;
+
+ /* Buffer to hold error message */
+ char *errormsg_buf;
+ bool errormsg_deferred;
+
+ /*
+ * Flag to indicate to XLogPageReadCB that it should not block waiting for
+ * data.
+ */
+ bool nonblocking;
+};
+
+/*
+ * Check if XLogNextRecord() has any more queued records or an error to return.
+ */
+static inline bool
+XLogReaderHasQueuedRecordOrError(XLogReaderState *state)
+{
+ return (state->decode_queue_head != NULL) || state->errormsg_deferred;
+}
+
+/* Get a new XLogReader */
+extern XLogReaderState *XLogReaderAllocate(int wal_segment_size,
+ const char *waldir,
+ XLogReaderRoutine *routine,
+ void *private_data);
+
+/* Free an XLogReader */
+extern void XLogReaderFree(XLogReaderState *state);
+
+/* Optionally provide a circular decoding buffer to allow readahead. */
+extern void XLogReaderSetDecodeBuffer(XLogReaderState *state,
+ void *buffer,
+ size_t size);
+
+/* Position the XLogReader to given record */
+extern void XLogBeginRead(XLogReaderState *state, XLogRecPtr RecPtr);
+extern XLogRecPtr XLogFindNextRecord(XLogReaderState *state, XLogRecPtr RecPtr);
+
+/* Return values from XLogPageReadCB. */
+typedef enum XLogPageReadResult
+{
+ XLREAD_SUCCESS = 0, /* record is successfully read */
+ XLREAD_FAIL = -1, /* failed during reading a record */
+ XLREAD_WOULDBLOCK = -2 /* nonblocking mode only, no data */
+} XLogPageReadResult;
+
+/* Read the next XLog record. Returns NULL on end-of-WAL or failure */
+extern struct XLogRecord *XLogReadRecord(XLogReaderState *state,
+ char **errormsg);
+
+/* Consume the next record or error. */
+extern DecodedXLogRecord *XLogNextRecord(XLogReaderState *state,
+ char **errormsg);
+
+/* Release the previously returned record, if necessary. */
+extern XLogRecPtr XLogReleasePreviousRecord(XLogReaderState *state);
+
+/* Try to read ahead, if there is data and space. */
+extern DecodedXLogRecord *XLogReadAhead(XLogReaderState *state,
+ bool nonblocking);
+
+/* Validate a page */
+extern bool XLogReaderValidatePageHeader(XLogReaderState *state,
+ XLogRecPtr recptr, char *phdr);
+
+/* Forget error produced by XLogReaderValidatePageHeader(). */
+extern void XLogReaderResetError(XLogReaderState *state);
+
+/*
+ * Error information from WALRead that both backend and frontend caller can
+ * process. Currently only errors from pg_pread can be reported.
+ */
+typedef struct WALReadError
+{
+ int wre_errno; /* errno set by the last pg_pread() */
+ int wre_off; /* Offset we tried to read from. */
+ int wre_req; /* Bytes requested to be read. */
+ int wre_read; /* Bytes read by the last read(). */
+ WALOpenSegment wre_seg; /* Segment we tried to read from. */
+} WALReadError;
+
+extern bool WALRead(XLogReaderState *state,
+ char *buf, XLogRecPtr startptr, Size count,
+ TimeLineID tli, WALReadError *errinfo);
+
+/* Functions for decoding an XLogRecord */
+
+extern size_t DecodeXLogRecordRequiredSpace(size_t xl_tot_len);
+extern bool DecodeXLogRecord(XLogReaderState *state,
+ DecodedXLogRecord *decoded,
+ XLogRecord *record,
+ XLogRecPtr lsn,
+ char **errormsg);
+
+/*
+ * Macros that provide access to parts of the record most recently returned by
+ * XLogReadRecord() or XLogNextRecord().
+ */
+#define XLogRecGetTotalLen(decoder) ((decoder)->record->header.xl_tot_len)
+#define XLogRecGetPrev(decoder) ((decoder)->record->header.xl_prev)
+#define XLogRecGetInfo(decoder) ((decoder)->record->header.xl_info)
+#define XLogRecGetRmid(decoder) ((decoder)->record->header.xl_rmid)
+#define XLogRecGetXid(decoder) ((decoder)->record->header.xl_xid)
+#define XLogRecGetOrigin(decoder) ((decoder)->record->record_origin)
+#define XLogRecGetTopXid(decoder) ((decoder)->record->toplevel_xid)
+#define XLogRecGetData(decoder) ((decoder)->record->main_data)
+#define XLogRecGetDataLen(decoder) ((decoder)->record->main_data_len)
+#define XLogRecHasAnyBlockRefs(decoder) ((decoder)->record->max_block_id >= 0)
+#define XLogRecMaxBlockId(decoder) ((decoder)->record->max_block_id)
+#define XLogRecGetBlock(decoder, i) (&(decoder)->record->blocks[(i)])
+#define XLogRecHasBlockRef(decoder, block_id) \
+ (((decoder)->record->max_block_id >= (block_id)) && \
+ ((decoder)->record->blocks[block_id].in_use))
+#define XLogRecHasBlockImage(decoder, block_id) \
+ ((decoder)->record->blocks[block_id].has_image)
+#define XLogRecBlockImageApply(decoder, block_id) \
+ ((decoder)->record->blocks[block_id].apply_image)
+#define XLogRecHasBlockData(decoder, block_id) \
+ ((decoder)->record->blocks[block_id].has_data)
+
+#ifndef FRONTEND
+extern FullTransactionId XLogRecGetFullXid(XLogReaderState *record);
+#endif
+
+extern bool RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page);
+extern char *XLogRecGetBlockData(XLogReaderState *record, uint8 block_id, Size *len);
+extern void XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id,
+ RelFileLocator *rlocator, ForkNumber *forknum,
+ BlockNumber *blknum);
+extern bool XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id,
+ RelFileLocator *rlocator, ForkNumber *forknum,
+ BlockNumber *blknum,
+ Buffer *prefetch_buffer);
+
+#endif /* XLOGREADER_H */
diff --git a/contrib/libs/libpq/src/include/access/xlogrecord.h b/contrib/libs/libpq/src/include/access/xlogrecord.h
new file mode 100644
index 0000000000..f355e08e1d
--- /dev/null
+++ b/contrib/libs/libpq/src/include/access/xlogrecord.h
@@ -0,0 +1,248 @@
+/*
+ * xlogrecord.h
+ *
+ * Definitions for the WAL record format.
+ *
+ * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/access/xlogrecord.h
+ */
+#ifndef XLOGRECORD_H
+#define XLOGRECORD_H
+
+#include "access/rmgr.h"
+#include "access/xlogdefs.h"
+#include "port/pg_crc32c.h"
+#include "storage/block.h"
+#include "storage/relfilelocator.h"
+
+/*
+ * The overall layout of an XLOG record is:
+ * Fixed-size header (XLogRecord struct)
+ * XLogRecordBlockHeader struct
+ * XLogRecordBlockHeader struct
+ * ...
+ * XLogRecordDataHeader[Short|Long] struct
+ * block data
+ * block data
+ * ...
+ * main data
+ *
+ * There can be zero or more XLogRecordBlockHeaders, and 0 or more bytes of
+ * rmgr-specific data not associated with a block. XLogRecord structs
+ * always start on MAXALIGN boundaries in the WAL files, but the rest of
+ * the fields are not aligned.
+ *
+ * The XLogRecordBlockHeader, XLogRecordDataHeaderShort and
+ * XLogRecordDataHeaderLong structs all begin with a single 'id' byte. It's
+ * used to distinguish between block references, and the main data structs.
+ */
+typedef struct XLogRecord
+{
+ uint32 xl_tot_len; /* total len of entire record */
+ TransactionId xl_xid; /* xact id */
+ XLogRecPtr xl_prev; /* ptr to previous record in log */
+ uint8 xl_info; /* flag bits, see below */
+ RmgrId xl_rmid; /* resource manager for this record */
+ /* 2 bytes of padding here, initialize to zero */
+ pg_crc32c xl_crc; /* CRC for this record */
+
+ /* XLogRecordBlockHeaders and XLogRecordDataHeader follow, no padding */
+
+} XLogRecord;
+
+#define SizeOfXLogRecord (offsetof(XLogRecord, xl_crc) + sizeof(pg_crc32c))
+
+/*
+ * The high 4 bits in xl_info may be used freely by rmgr. The
+ * XLR_SPECIAL_REL_UPDATE and XLR_CHECK_CONSISTENCY bits can be passed by
+ * XLogInsert caller. The rest are set internally by XLogInsert.
+ */
+#define XLR_INFO_MASK 0x0F
+#define XLR_RMGR_INFO_MASK 0xF0
+
+/*
+ * XLogReader needs to allocate all the data of a WAL record in a single
+ * chunk. This means that a single XLogRecord cannot exceed MaxAllocSize
+ * in length if we ignore any allocation overhead of the XLogReader.
+ *
+ * To accommodate some overhead, this value allows for 4M of allocation
+ * overhead, that should be plenty enough for what
+ * DecodeXLogRecordRequiredSpace() expects as extra.
+ */
+#define XLogRecordMaxSize (1020 * 1024 * 1024)
+
+/*
+ * If a WAL record modifies any relation files, in ways not covered by the
+ * usual block references, this flag is set. This is not used for anything
+ * by PostgreSQL itself, but it allows external tools that read WAL and keep
+ * track of modified blocks to recognize such special record types.
+ */
+#define XLR_SPECIAL_REL_UPDATE 0x01
+
+/*
+ * Enforces consistency checks of replayed WAL at recovery. If enabled,
+ * each record will log a full-page write for each block modified by the
+ * record and will reuse it afterwards for consistency checks. The caller
+ * of XLogInsert can use this value if necessary, but if
+ * wal_consistency_checking is enabled for a rmgr this is set unconditionally.
+ */
+#define XLR_CHECK_CONSISTENCY 0x02
+
+/*
+ * Header info for block data appended to an XLOG record.
+ *
+ * 'data_length' is the length of the rmgr-specific payload data associated
+ * with this block. It does not include the possible full page image, nor
+ * XLogRecordBlockHeader struct itself.
+ *
+ * Note that we don't attempt to align the XLogRecordBlockHeader struct!
+ * So, the struct must be copied to aligned local storage before use.
+ */
+typedef struct XLogRecordBlockHeader
+{
+ uint8 id; /* block reference ID */
+ uint8 fork_flags; /* fork within the relation, and flags */
+ uint16 data_length; /* number of payload bytes (not including page
+ * image) */
+
+ /* If BKPBLOCK_HAS_IMAGE, an XLogRecordBlockImageHeader struct follows */
+ /* If BKPBLOCK_SAME_REL is not set, a RelFileLocator follows */
+ /* BlockNumber follows */
+} XLogRecordBlockHeader;
+
+#define SizeOfXLogRecordBlockHeader (offsetof(XLogRecordBlockHeader, data_length) + sizeof(uint16))
+
+/*
+ * Additional header information when a full-page image is included
+ * (i.e. when BKPBLOCK_HAS_IMAGE is set).
+ *
+ * The XLOG code is aware that PG data pages usually contain an unused "hole"
+ * in the middle, which contains only zero bytes. Since we know that the
+ * "hole" is all zeros, we remove it from the stored data (and it's not counted
+ * in the XLOG record's CRC, either). Hence, the amount of block data actually
+ * present is (BLCKSZ - <length of "hole" bytes>).
+ *
+ * Additionally, when wal_compression is enabled, we will try to compress full
+ * page images using one of the supported algorithms, after removing the
+ * "hole". This can reduce the WAL volume, but at some extra cost of CPU spent
+ * on the compression during WAL logging. In this case, since the "hole"
+ * length cannot be calculated by subtracting the number of page image bytes
+ * from BLCKSZ, basically it needs to be stored as an extra information.
+ * But when no "hole" exists, we can assume that the "hole" length is zero
+ * and no such an extra information needs to be stored. Note that
+ * the original version of page image is stored in WAL instead of the
+ * compressed one if the number of bytes saved by compression is less than
+ * the length of extra information. Hence, when a page image is successfully
+ * compressed, the amount of block data actually present is less than
+ * BLCKSZ - the length of "hole" bytes - the length of extra information.
+ */
+typedef struct XLogRecordBlockImageHeader
+{
+ uint16 length; /* number of page image bytes */
+ uint16 hole_offset; /* number of bytes before "hole" */
+ uint8 bimg_info; /* flag bits, see below */
+
+ /*
+ * If BKPIMAGE_HAS_HOLE and BKPIMAGE_COMPRESSED(), an
+ * XLogRecordBlockCompressHeader struct follows.
+ */
+} XLogRecordBlockImageHeader;
+
+#define SizeOfXLogRecordBlockImageHeader \
+ (offsetof(XLogRecordBlockImageHeader, bimg_info) + sizeof(uint8))
+
+/* Information stored in bimg_info */
+#define BKPIMAGE_HAS_HOLE 0x01 /* page image has "hole" */
+#define BKPIMAGE_APPLY 0x02 /* page image should be restored
+ * during replay */
+/* compression methods supported */
+#define BKPIMAGE_COMPRESS_PGLZ 0x04
+#define BKPIMAGE_COMPRESS_LZ4 0x08
+#define BKPIMAGE_COMPRESS_ZSTD 0x10
+
+#define BKPIMAGE_COMPRESSED(info) \
+ ((info & (BKPIMAGE_COMPRESS_PGLZ | BKPIMAGE_COMPRESS_LZ4 | \
+ BKPIMAGE_COMPRESS_ZSTD)) != 0)
+
+/*
+ * Extra header information used when page image has "hole" and
+ * is compressed.
+ */
+typedef struct XLogRecordBlockCompressHeader
+{
+ uint16 hole_length; /* number of bytes in "hole" */
+} XLogRecordBlockCompressHeader;
+
+#define SizeOfXLogRecordBlockCompressHeader \
+ sizeof(XLogRecordBlockCompressHeader)
+
+/*
+ * Maximum size of the header for a block reference. This is used to size a
+ * temporary buffer for constructing the header.
+ */
+#define MaxSizeOfXLogRecordBlockHeader \
+ (SizeOfXLogRecordBlockHeader + \
+ SizeOfXLogRecordBlockImageHeader + \
+ SizeOfXLogRecordBlockCompressHeader + \
+ sizeof(RelFileLocator) + \
+ sizeof(BlockNumber))
+
+/*
+ * The fork number fits in the lower 4 bits in the fork_flags field. The upper
+ * bits are used for flags.
+ */
+#define BKPBLOCK_FORK_MASK 0x0F
+#define BKPBLOCK_FLAG_MASK 0xF0
+#define BKPBLOCK_HAS_IMAGE 0x10 /* block data is an XLogRecordBlockImage */
+#define BKPBLOCK_HAS_DATA 0x20
+#define BKPBLOCK_WILL_INIT 0x40 /* redo will re-init the page */
+#define BKPBLOCK_SAME_REL 0x80 /* RelFileLocator omitted, same as
+ * previous */
+
+/*
+ * XLogRecordDataHeaderShort/Long are used for the "main data" portion of
+ * the record. If the length of the data is less than 256 bytes, the short
+ * form is used, with a single byte to hold the length. Otherwise the long
+ * form is used.
+ *
+ * (These structs are currently not used in the code, they are here just for
+ * documentation purposes).
+ */
+typedef struct XLogRecordDataHeaderShort
+{
+ uint8 id; /* XLR_BLOCK_ID_DATA_SHORT */
+ uint8 data_length; /* number of payload bytes */
+} XLogRecordDataHeaderShort;
+
+#define SizeOfXLogRecordDataHeaderShort (sizeof(uint8) * 2)
+
+typedef struct XLogRecordDataHeaderLong
+{
+ uint8 id; /* XLR_BLOCK_ID_DATA_LONG */
+ /* followed by uint32 data_length, unaligned */
+} XLogRecordDataHeaderLong;
+
+#define SizeOfXLogRecordDataHeaderLong (sizeof(uint8) + sizeof(uint32))
+
+/*
+ * Block IDs used to distinguish different kinds of record fragments. Block
+ * references are numbered from 0 to XLR_MAX_BLOCK_ID. A rmgr is free to use
+ * any ID number in that range (although you should stick to small numbers,
+ * because the WAL machinery is optimized for that case). A few ID
+ * numbers are reserved to denote the "main" data portion of the record,
+ * as well as replication-supporting transaction metadata.
+ *
+ * The maximum is currently set at 32, quite arbitrarily. Most records only
+ * need a handful of block references, but there are a few exceptions that
+ * need more.
+ */
+#define XLR_MAX_BLOCK_ID 32
+
+#define XLR_BLOCK_ID_DATA_SHORT 255
+#define XLR_BLOCK_ID_DATA_LONG 254
+#define XLR_BLOCK_ID_ORIGIN 253
+#define XLR_BLOCK_ID_TOPLEVEL_XID 252
+
+#endif /* XLOGRECORD_H */