From 1cc244cf8e8b6f27f2261f99250b3cb60c9ac371 Mon Sep 17 00:00:00 2001 From: Arpan Agrawal Date: Fri, 5 May 2023 16:26:21 +0530 Subject: [PATCH] [MERGE PG15] Initdb: Bootstrap step working Summary: Initdb bootstrap step working Test Plan: Jenkins: skip Reviewers: mihnea, neil Reviewed By: neil Subscribers: yql Differential Revision: https://phabricator.dev.yugabyte.com/D25090 --- src/postgres/src/backend/access/brin/brin.c | 3 +- src/postgres/src/backend/access/heap/heapam.c | 8 +- .../src/backend/access/heap/heapam_handler.c | 542 ++++++++++-------- .../src/backend/access/yb_access/yb_lsm.c | 2 +- .../src/backend/access/yb_access/yb_scan.c | 24 +- src/postgres/src/backend/catalog/aclchk.c | 7 +- src/postgres/src/backend/catalog/heap.c | 17 +- src/postgres/src/backend/catalog/index.c | 5 +- src/postgres/src/backend/catalog/indexing.c | 28 +- .../src/backend/commands/tablegroup.c | 8 +- .../src/backend/commands/yb_profile.c | 10 +- src/postgres/src/backend/commands/ybccmds.c | 5 +- .../src/backend/executor/ybcModifyTable.c | 4 +- .../src/backend/utils/cache/relcache.c | 20 +- .../src/backend/utils/cache/syscache.c | 19 +- .../src/backend/utils/misc/pg_yb_utils.c | 8 - src/postgres/src/include/access/tableam.h | 16 +- src/yb/common/pg_system_attr.h | 15 +- 18 files changed, 402 insertions(+), 339 deletions(-) diff --git a/src/postgres/src/backend/access/brin/brin.c b/src/postgres/src/backend/access/brin/brin.c index 69bf0fa1f9d5..7709808b63ff 100644 --- a/src/postgres/src/backend/access/brin/brin.c +++ b/src/postgres/src/backend/access/brin/brin.c @@ -1399,7 +1399,8 @@ summarize_range(IndexInfo *indexInfo, BrinBuildState *state, Relation heapRel, state->bs_currRangeStart = heapBlk; table_index_build_range_scan(heapRel, state->bs_irel, indexInfo, false, true, false, heapBlk, scanNumBlks, - brinbuildCallback, (void *) state, NULL); + brinbuildCallback, (void *) state, NULL, + NULL /* bfinfo */, NULL /* bfresult */); /* * Now we update the values obtained by the scan with the placeholder diff --git a/src/postgres/src/backend/access/heap/heapam.c b/src/postgres/src/backend/access/heap/heapam.c index b51c80ddc965..1f11d1574d71 100644 --- a/src/postgres/src/backend/access/heap/heapam.c +++ b/src/postgres/src/backend/access/heap/heapam.c @@ -1310,6 +1310,12 @@ heap_endscan(TableScanDesc sscan) HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction) { + + if (IsYBRelation(sscan->rs_rd)) + { + return ybc_heap_getnext(sscan); + } + HeapScanDesc scan = (HeapScanDesc) sscan; /* @@ -6067,7 +6073,7 @@ heap_inplace_update(Relation relation, HeapTuple tuple, bool yb_shared_update) YB_FOR_EACH_DB(pg_db_tuple) { - Oid dboid = YbHeapTupleGetOid(pg_db_tuple); /* TODO(Alex) */ + Oid dboid = ((Form_pg_database) GETSTRUCT(pg_db_tuple))->oid; /* YB doesn't use PG locks so it's okay not to take them. */ YBCUpdateSysCatalogTupleForDb(dboid, relation, NULL /* oldtuple */, tuple); } diff --git a/src/postgres/src/backend/access/heap/heapam_handler.c b/src/postgres/src/backend/access/heap/heapam_handler.c index a43b2eefbf3d..ba82ea07433a 100644 --- a/src/postgres/src/backend/access/heap/heapam_handler.c +++ b/src/postgres/src/backend/access/heap/heapam_handler.c @@ -45,6 +45,9 @@ #include "utils/builtins.h" #include "utils/rel.h" +/* Yugabyte includes */ +#include "access/yb_scan.h" + static void reform_and_rewrite_tuple(HeapTuple tuple, Relation OldHeap, Relation NewHeap, Datum *values, bool *isnull, RewriteState rwstate); @@ -1165,7 +1168,9 @@ heapam_index_build_range_scan(Relation heapRelation, BlockNumber numblocks, IndexBuildCallback callback, void *callback_state, - TableScanDesc scan) + TableScanDesc scan, + YbBackfillInfo *bfinfo, + YbPgExecOutParam *bfresult) { HeapScanDesc hscan; bool is_system_catalog; @@ -1184,6 +1189,8 @@ heapam_index_build_range_scan(Relation heapRelation, BlockNumber previous_blkno = InvalidBlockNumber; BlockNumber root_blkno = InvalidBlockNumber; OffsetNumber root_offsets[MaxHeapTuplesPerPage]; + MemoryContext oldcontext = GetCurrentMemoryContext(); + int yb_tuples_done = 0; /* * sanity checks @@ -1252,6 +1259,22 @@ heapam_index_build_range_scan(Relation heapRelation, NULL, /* scan key */ true, /* buffer access strategy OK */ allow_sync); /* syncscan OK? */ + if (IsYBRelation(heapRelation)) + { + YBCPgExecParameters *exec_params = &estate->yb_exec_params; + if (bfinfo) + { + if (bfinfo->bfinstr) + exec_params->bfinstr = pstrdup(bfinfo->bfinstr); + exec_params->backfill_read_time = bfinfo->read_time; + exec_params->partition_key = + pstrdup(bfinfo->row_bounds->partition_key); + exec_params->out_param = bfresult; + exec_params->is_index_backfill = true; + } + + ((YbScanDesc) scan)->exec_params = exec_params; + } } else { @@ -1267,6 +1290,7 @@ heapam_index_build_range_scan(Relation heapRelation, snapshot = scan->rs_snapshot; } + /* YB_TODO(arpan): scan can be instance of YBScanDesc as well. */ hscan = (HeapScanDesc) scan; /* @@ -1311,6 +1335,9 @@ heapam_index_build_range_scan(Relation heapRelation, reltuples = 0; + if (IsYBRelation(indexRelation)) + MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); + /* * Scan all tuples in the base relation. */ @@ -1333,287 +1360,297 @@ heapam_index_build_range_scan(Relation heapRelation, } } - /* - * When dealing with a HOT-chain of updated tuples, we want to index - * the values of the live tuple (if any), but index it under the TID - * of the chain's root tuple. This approach is necessary to preserve - * the HOT-chain structure in the heap. So we need to be able to find - * the root item offset for every tuple that's in a HOT-chain. When - * first reaching a new page of the relation, call - * heap_get_root_tuples() to build a map of root item offsets on the - * page. - * - * It might look unsafe to use this information across buffer - * lock/unlock. However, we hold ShareLock on the table so no - * ordinary insert/update/delete should occur; and we hold pin on the - * buffer continuously while visiting the page, so no pruning - * operation can occur either. - * - * In cases with only ShareUpdateExclusiveLock on the table, it's - * possible for some HOT tuples to appear that we didn't know about - * when we first read the page. To handle that case, we re-obtain the - * list of root offsets when a HOT tuple points to a root item that we - * don't know about. - * - * Also, although our opinions about tuple liveness could change while - * we scan the page (due to concurrent transaction commits/aborts), - * the chain root locations won't, so this info doesn't need to be - * rebuilt after waiting for another transaction. - * - * Note the implied assumption that there is no more than one live - * tuple per HOT-chain --- else we could create more than one index - * entry pointing to the same root tuple. - */ - if (hscan->rs_cblock != root_blkno) + if (!IsYBRelation(heapRelation)) { - Page page = BufferGetPage(hscan->rs_cbuf); - - LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE); - heap_get_root_tuples(page, root_offsets); - LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK); + /* + * When dealing with a HOT-chain of updated tuples, we want to index + * the values of the live tuple (if any), but index it under the TID + * of the chain's root tuple. This approach is necessary to preserve + * the HOT-chain structure in the heap. So we need to be able to find + * the root item offset for every tuple that's in a HOT-chain. When + * first reaching a new page of the relation, call + * heap_get_root_tuples() to build a map of root item offsets on the + * page. + * + * It might look unsafe to use this information across buffer + * lock/unlock. However, we hold ShareLock on the table so no + * ordinary insert/update/delete should occur; and we hold pin on the + * buffer continuously while visiting the page, so no pruning + * operation can occur either. + * + * In cases with only ShareUpdateExclusiveLock on the table, it's + * possible for some HOT tuples to appear that we didn't know about + * when we first read the page. To handle that case, we re-obtain the + * list of root offsets when a HOT tuple points to a root item that we + * don't know about. + * + * Also, although our opinions about tuple liveness could change while + * we scan the page (due to concurrent transaction commits/aborts), + * the chain root locations won't, so this info doesn't need to be + * rebuilt after waiting for another transaction. + * + * Note the implied assumption that there is no more than one live + * tuple per HOT-chain --- else we could create more than one index + * entry pointing to the same root tuple. + */ + if (hscan->rs_cblock != root_blkno) + { + Page page = BufferGetPage(hscan->rs_cbuf); - root_blkno = hscan->rs_cblock; - } + LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE); + heap_get_root_tuples(page, root_offsets); + LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK); - if (snapshot == SnapshotAny) - { - /* do our own time qual check */ - bool indexIt; - TransactionId xwait; + root_blkno = hscan->rs_cblock; + } - recheck: + if (snapshot == SnapshotAny) + { + /* do our own time qual check */ + bool indexIt; + TransactionId xwait; - /* - * We could possibly get away with not locking the buffer here, - * since caller should hold ShareLock on the relation, but let's - * be conservative about it. (This remark is still correct even - * with HOT-pruning: our pin on the buffer prevents pruning.) - */ - LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE); + recheck: - /* - * The criteria for counting a tuple as live in this block need to - * match what analyze.c's heapam_scan_analyze_next_tuple() does, - * otherwise CREATE INDEX and ANALYZE may produce wildly different - * reltuples values, e.g. when there are many recently-dead - * tuples. - */ - switch (HeapTupleSatisfiesVacuum(heapTuple, OldestXmin, - hscan->rs_cbuf)) - { - case HEAPTUPLE_DEAD: - /* Definitely dead, we can ignore it */ - indexIt = false; - tupleIsAlive = false; - break; - case HEAPTUPLE_LIVE: - /* Normal case, index and unique-check it */ - indexIt = true; - tupleIsAlive = true; - /* Count it as live, too */ - reltuples += 1; - break; - case HEAPTUPLE_RECENTLY_DEAD: + /* + * We could possibly get away with not locking the buffer here, + * since caller should hold ShareLock on the relation, but let's + * be conservative about it. (This remark is still correct even + * with HOT-pruning: our pin on the buffer prevents pruning.) + */ + LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE); - /* - * If tuple is recently deleted then we must index it - * anyway to preserve MVCC semantics. (Pre-existing - * transactions could try to use the index after we finish - * building it, and may need to see such tuples.) - * - * However, if it was HOT-updated then we must only index - * the live tuple at the end of the HOT-chain. Since this - * breaks semantics for pre-existing snapshots, mark the - * index as unusable for them. - * - * We don't count recently-dead tuples in reltuples, even - * if we index them; see heapam_scan_analyze_next_tuple(). - */ - if (HeapTupleIsHotUpdated(heapTuple)) - { + /* + * The criteria for counting a tuple as live in this block need to + * match what analyze.c's heapam_scan_analyze_next_tuple() does, + * otherwise CREATE INDEX and ANALYZE may produce wildly different + * reltuples values, e.g. when there are many recently-dead + * tuples. + */ + switch (HeapTupleSatisfiesVacuum(heapTuple, OldestXmin, + hscan->rs_cbuf)) + { + case HEAPTUPLE_DEAD: + /* Definitely dead, we can ignore it */ indexIt = false; - /* mark the index as unsafe for old snapshots */ - indexInfo->ii_BrokenHotChain = true; - } - else - indexIt = true; - /* In any case, exclude the tuple from unique-checking */ - tupleIsAlive = false; - break; - case HEAPTUPLE_INSERT_IN_PROGRESS: - - /* - * In "anyvisible" mode, this tuple is visible and we - * don't need any further checks. - */ - if (anyvisible) - { + tupleIsAlive = false; + break; + case HEAPTUPLE_LIVE: + /* Normal case, index and unique-check it */ indexIt = true; tupleIsAlive = true; + /* Count it as live, too */ reltuples += 1; break; - } - - /* - * Since caller should hold ShareLock or better, normally - * the only way to see this is if it was inserted earlier - * in our own transaction. However, it can happen in - * system catalogs, since we tend to release write lock - * before commit there. Give a warning if neither case - * applies. - */ - xwait = HeapTupleHeaderGetXmin(heapTuple->t_data); - if (!TransactionIdIsCurrentTransactionId(xwait)) - { - if (!is_system_catalog) - elog(WARNING, "concurrent insert in progress within table \"%s\"", - RelationGetRelationName(heapRelation)); + case HEAPTUPLE_RECENTLY_DEAD: /* - * If we are performing uniqueness checks, indexing - * such a tuple could lead to a bogus uniqueness - * failure. In that case we wait for the inserting - * transaction to finish and check again. - */ - if (checking_uniqueness) + * If tuple is recently deleted then we must index it + * anyway to preserve MVCC semantics. (Pre-existing + * transactions could try to use the index after we finish + * building it, and may need to see such tuples.) + * + * However, if it was HOT-updated then we must only index + * the live tuple at the end of the HOT-chain. Since this + * breaks semantics for pre-existing snapshots, mark the + * index as unusable for them. + * + * We don't count recently-dead tuples in reltuples, even + * if we index them; see heapam_scan_analyze_next_tuple(). + */ + if (HeapTupleIsHotUpdated(heapTuple)) { - /* - * Must drop the lock on the buffer before we wait - */ - LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK); - XactLockTableWait(xwait, heapRelation, - &heapTuple->t_self, - XLTW_InsertIndexUnique); - CHECK_FOR_INTERRUPTS(); - goto recheck; + indexIt = false; + /* mark the index as unsafe for old snapshots */ + indexInfo->ii_BrokenHotChain = true; } - } - else - { - /* - * For consistency with - * heapam_scan_analyze_next_tuple(), count - * HEAPTUPLE_INSERT_IN_PROGRESS tuples as live only - * when inserted by our own transaction. - */ - reltuples += 1; - } - - /* - * We must index such tuples, since if the index build - * commits then they're good. - */ - indexIt = true; - tupleIsAlive = true; - break; - case HEAPTUPLE_DELETE_IN_PROGRESS: - - /* - * As with INSERT_IN_PROGRESS case, this is unexpected - * unless it's our own deletion or a system catalog; but - * in anyvisible mode, this tuple is visible. - */ - if (anyvisible) - { - indexIt = true; + else + indexIt = true; + /* In any case, exclude the tuple from unique-checking */ tupleIsAlive = false; - reltuples += 1; break; - } + case HEAPTUPLE_INSERT_IN_PROGRESS: - xwait = HeapTupleHeaderGetUpdateXid(heapTuple->t_data); - if (!TransactionIdIsCurrentTransactionId(xwait)) - { - if (!is_system_catalog) - elog(WARNING, "concurrent delete in progress within table \"%s\"", - RelationGetRelationName(heapRelation)); + /* + * In "anyvisible" mode, this tuple is visible and we + * don't need any further checks. + */ + if (anyvisible) + { + indexIt = true; + tupleIsAlive = true; + reltuples += 1; + break; + } /* - * If we are performing uniqueness checks, assuming - * the tuple is dead could lead to missing a - * uniqueness violation. In that case we wait for the - * deleting transaction to finish and check again. - * - * Also, if it's a HOT-updated tuple, we should not - * index it but rather the live tuple at the end of - * the HOT-chain. However, the deleting transaction - * could abort, possibly leaving this tuple as live - * after all, in which case it has to be indexed. The - * only way to know what to do is to wait for the - * deleting transaction to finish and check again. - */ - if (checking_uniqueness || - HeapTupleIsHotUpdated(heapTuple)) + * Since caller should hold ShareLock or better, normally + * the only way to see this is if it was inserted earlier + * in our own transaction. However, it can happen in + * system catalogs, since we tend to release write lock + * before commit there. Give a warning if neither case + * applies. + */ + xwait = HeapTupleHeaderGetXmin(heapTuple->t_data); + if (!TransactionIdIsCurrentTransactionId(xwait)) + { + if (!is_system_catalog) + elog(WARNING, "concurrent insert in progress within table \"%s\"", + RelationGetRelationName(heapRelation)); + + /* + * If we are performing uniqueness checks, indexing + * such a tuple could lead to a bogus uniqueness + * failure. In that case we wait for the inserting + * transaction to finish and check again. + */ + if (checking_uniqueness) + { + /* + * Must drop the lock on the buffer before we wait + */ + LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK); + XactLockTableWait(xwait, heapRelation, + &heapTuple->t_self, + XLTW_InsertIndexUnique); + CHECK_FOR_INTERRUPTS(); + goto recheck; + } + } + else { /* - * Must drop the lock on the buffer before we wait - */ - LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK); - XactLockTableWait(xwait, heapRelation, - &heapTuple->t_self, - XLTW_InsertIndexUnique); - CHECK_FOR_INTERRUPTS(); - goto recheck; + * For consistency with + * heapam_scan_analyze_next_tuple(), count + * HEAPTUPLE_INSERT_IN_PROGRESS tuples as live only + * when inserted by our own transaction. + */ + reltuples += 1; } /* - * Otherwise index it but don't check for uniqueness, - * the same as a RECENTLY_DEAD tuple. - */ + * We must index such tuples, since if the index build + * commits then they're good. + */ indexIt = true; + tupleIsAlive = true; + break; + case HEAPTUPLE_DELETE_IN_PROGRESS: /* - * Count HEAPTUPLE_DELETE_IN_PROGRESS tuples as live, - * if they were not deleted by the current - * transaction. That's what - * heapam_scan_analyze_next_tuple() does, and we want - * the behavior to be consistent. - */ - reltuples += 1; - } - else if (HeapTupleIsHotUpdated(heapTuple)) - { - /* - * It's a HOT-updated tuple deleted by our own xact. - * We can assume the deletion will commit (else the - * index contents don't matter), so treat the same as - * RECENTLY_DEAD HOT-updated tuples. - */ - indexIt = false; - /* mark the index as unsafe for old snapshots */ - indexInfo->ii_BrokenHotChain = true; - } - else - { - /* - * It's a regular tuple deleted by our own xact. Index - * it, but don't check for uniqueness nor count in - * reltuples, the same as a RECENTLY_DEAD tuple. - */ - indexIt = true; - } - /* In any case, exclude the tuple from unique-checking */ - tupleIsAlive = false; - break; - default: - elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result"); - indexIt = tupleIsAlive = false; /* keep compiler quiet */ - break; - } + * As with INSERT_IN_PROGRESS case, this is unexpected + * unless it's our own deletion or a system catalog; but + * in anyvisible mode, this tuple is visible. + */ + if (anyvisible) + { + indexIt = true; + tupleIsAlive = false; + reltuples += 1; + break; + } - LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK); + xwait = HeapTupleHeaderGetUpdateXid(heapTuple->t_data); + if (!TransactionIdIsCurrentTransactionId(xwait)) + { + if (!is_system_catalog) + elog(WARNING, "concurrent delete in progress within table \"%s\"", + RelationGetRelationName(heapRelation)); - if (!indexIt) - continue; + /* + * If we are performing uniqueness checks, assuming + * the tuple is dead could lead to missing a + * uniqueness violation. In that case we wait for the + * deleting transaction to finish and check again. + * + * Also, if it's a HOT-updated tuple, we should not + * index it but rather the live tuple at the end of + * the HOT-chain. However, the deleting transaction + * could abort, possibly leaving this tuple as live + * after all, in which case it has to be indexed. The + * only way to know what to do is to wait for the + * deleting transaction to finish and check again. + */ + if (checking_uniqueness || + HeapTupleIsHotUpdated(heapTuple)) + { + /* + * Must drop the lock on the buffer before we wait + */ + LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK); + XactLockTableWait(xwait, heapRelation, + &heapTuple->t_self, + XLTW_InsertIndexUnique); + CHECK_FOR_INTERRUPTS(); + goto recheck; + } + + /* + * Otherwise index it but don't check for uniqueness, + * the same as a RECENTLY_DEAD tuple. + */ + indexIt = true; + + /* + * Count HEAPTUPLE_DELETE_IN_PROGRESS tuples as live, + * if they were not deleted by the current + * transaction. That's what + * heapam_scan_analyze_next_tuple() does, and we want + * the behavior to be consistent. + */ + reltuples += 1; + } + else if (HeapTupleIsHotUpdated(heapTuple)) + { + /* + * It's a HOT-updated tuple deleted by our own xact. + * We can assume the deletion will commit (else the + * index contents don't matter), so treat the same as + * RECENTLY_DEAD HOT-updated tuples. + */ + indexIt = false; + /* mark the index as unsafe for old snapshots */ + indexInfo->ii_BrokenHotChain = true; + } + else + { + /* + * It's a regular tuple deleted by our own xact. Index + * it, but don't check for uniqueness nor count in + * reltuples, the same as a RECENTLY_DEAD tuple. + */ + indexIt = true; + } + /* In any case, exclude the tuple from unique-checking */ + tupleIsAlive = false; + break; + default: + elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result"); + indexIt = tupleIsAlive = false; /* keep compiler quiet */ + break; + } + + LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK); + + if (!indexIt) + continue; + } + else + { + /* heap_getnext did the time qual check */ + tupleIsAlive = true; + reltuples += 1; + } } else { - /* heap_getnext did the time qual check */ + /* In YugaByte mode DocDB will only send live tuples. */ tupleIsAlive = true; reltuples += 1; } - MemoryContextReset(econtext->ecxt_per_tuple_memory); + if (!IsYBRelation(indexRelation)) + MemoryContextReset(econtext->ecxt_per_tuple_memory); /* Set up for predicate or expression evaluation */ ExecStoreBufferHeapTuple(heapTuple, slot, hscan->rs_cbuf); @@ -1625,7 +1662,12 @@ heapam_index_build_range_scan(Relation heapRelation, if (predicate != NULL) { if (!ExecQual(predicate, econtext)) + { + if (IsYBRelation(indexRelation) && !indexInfo->ii_Concurrent) + pgstat_progress_update_param(PROGRESS_CREATEIDX_TUPLES_DONE, + ++yb_tuples_done); continue; + } } /* @@ -1645,7 +1687,7 @@ heapam_index_build_range_scan(Relation heapRelation, * pass the values[] and isnull[] arrays, instead. */ - if (HeapTupleIsHeapOnly(heapTuple)) + if (!IsYugaByteEnabled() && HeapTupleIsHeapOnly(heapTuple)) { /* * For a heap-only tuple, pretend its TID is that of the root. See @@ -1691,6 +1733,13 @@ heapam_index_build_range_scan(Relation heapRelation, callback(indexRelation, &heapTuple->t_self, values, isnull, tupleIsAlive, callback_state); } + if (IsYBRelation(indexRelation)) + { + MemoryContextReset(econtext->ecxt_per_tuple_memory); + if (!indexInfo->ii_Concurrent) + pgstat_progress_update_param(PROGRESS_CREATEIDX_TUPLES_DONE, + ++yb_tuples_done); + } } /* Report scan progress one last time. */ @@ -1712,6 +1761,9 @@ heapam_index_build_range_scan(Relation heapRelation, blks_done); } + if (IsYBRelation(indexRelation)) + MemoryContextSwitchTo(oldcontext); + table_endscan(scan); /* we can now forget our snapshot, if set and registered by us */ diff --git a/src/postgres/src/backend/access/yb_access/yb_lsm.c b/src/postgres/src/backend/access/yb_access/yb_lsm.c index ece55c95a781..65fa66121357 100644 --- a/src/postgres/src/backend/access/yb_access/yb_lsm.c +++ b/src/postgres/src/backend/access/yb_access/yb_lsm.c @@ -290,7 +290,7 @@ ybcininsert(Relation index, Datum *values, bool *isnull, ItemPointer tid, Relati YB_FOR_EACH_DB(pg_db_tuple) { - Oid dboid = YbHeapTupleGetOid(pg_db_tuple); /* TODO(Alex) */ + Oid dboid = ((Form_pg_database) GETSTRUCT(pg_db_tuple))->oid; /* * Since this is a catalog index, we assume it exists in all databases. * YB doesn't use PG locks so it's okay not to take them. diff --git a/src/postgres/src/backend/access/yb_access/yb_scan.c b/src/postgres/src/backend/access/yb_access/yb_scan.c index 23a33d149e92..b2094279827d 100644 --- a/src/postgres/src/backend/access/yb_access/yb_scan.c +++ b/src/postgres/src/backend/access/yb_access/yb_scan.c @@ -795,23 +795,23 @@ YbShouldPushdownScanPrimaryKey(Relation relation, YbScanPlan scan_plan, /* int comparator for qsort() */ static int int_compar_cb(const void *v1, const void *v2) { - const int *k1 = v1; - const int *k2 = v2; + const int *k1 = v1; + const int *k2 = v2; - if (*k1 < *k2) - return -1; + if (*k1 < *k2) + return -1; - if (*k1 > *k2) - return 1; + if (*k1 > *k2) + return 1; - return 0; + return 0; } /* Use the scan-descriptor and scan-plan to setup scan key for filtering */ static void ybcSetupScanKeys(YbScanDesc ybScan, YbScanPlan scan_plan) { - TableScanDesc tsdesc = (TableScanDesc)ybScan; + TableScanDesc tsdesc = (TableScanDesc) ybScan; /* * Find the scan keys that are the primary key. @@ -830,8 +830,8 @@ ybcSetupScanKeys(YbScanDesc ybScan, YbScanPlan scan_plan) bool is_primary_key = bms_is_member(idx, scan_plan->primary_key); if (is_primary_key && - YbShouldPushdownScanPrimaryKey( - tsdesc->rs_rd, scan_plan, attnum, ybScan->keys[i])) + YbShouldPushdownScanPrimaryKey(tsdesc->rs_rd, scan_plan, attnum, + ybScan->keys[i])) { scan_plan->sk_cols = bms_add_member(scan_plan->sk_cols, idx); } @@ -2535,7 +2535,7 @@ TableScanDesc ybc_heap_beginscan(Relation relation, HeapTuple ybc_heap_getnext(TableScanDesc tsdesc) { bool recheck = false; - YbScanDesc ybdesc = (YbScanDesc)tsdesc; + YbScanDesc ybdesc = (YbScanDesc) tsdesc; HeapTuple tuple; Assert(PointerIsValid(tsdesc)); @@ -2547,7 +2547,7 @@ HeapTuple ybc_heap_getnext(TableScanDesc tsdesc) void ybc_heap_endscan(TableScanDesc tsdesc) { - YbScanDesc ybdesc = (YbScanDesc)tsdesc; + YbScanDesc ybdesc = (YbScanDesc) tsdesc; if (tsdesc->rs_flags & SO_TEMP_SNAPSHOT) UnregisterSnapshot(tsdesc->rs_snapshot); diff --git a/src/postgres/src/backend/catalog/aclchk.c b/src/postgres/src/backend/catalog/aclchk.c index 58694c9e2086..a721cac96407 100644 --- a/src/postgres/src/backend/catalog/aclchk.c +++ b/src/postgres/src/backend/catalog/aclchk.c @@ -3323,9 +3323,10 @@ ExecGrant_Tablegroup(InternalGrant *istmt) * We need to redesign "TableGroup" feature and not using table with oids. */ /* Update the shared dependency ACL info */ - updateAclDependencies(YbTablegroupRelationId, YbHeapTupleGetOid(tuple), 0, - ownerId, noldmembers, oldmembers, - nnewmembers, newmembers); + updateAclDependencies(YbTablegroupRelationId, + ((Form_pg_yb_tablegroup) GETSTRUCT(tuple))->oid, + 0, ownerId, noldmembers, oldmembers, nnewmembers, + newmembers); ReleaseSysCache(tuple); diff --git a/src/postgres/src/backend/catalog/heap.c b/src/postgres/src/backend/catalog/heap.c index bff8a9c3a5c1..819bbba74f91 100644 --- a/src/postgres/src/backend/catalog/heap.c +++ b/src/postgres/src/backend/catalog/heap.c @@ -1469,8 +1469,14 @@ heap_create_with_catalog(const char *relname, if (!(relkind == RELKIND_SEQUENCE || relkind == RELKIND_TOASTVALUE || relkind == RELKIND_INDEX || - relkind == RELKIND_PARTITIONED_INDEX) && - !IsCatalogRelation(new_rel_desc)) + relkind == RELKIND_PARTITIONED_INDEX)) + /* && !IsCatalogRelation(new_rel_desc) + * YB_TODO(arpan): + * The above (now commented) condition was preventing creation of + * _pg_statistic row in pg_type relation. As a result + * checlpg_statistic_ext_data realtion creation was failing. This + * condition is also not present in PG15. + */ { Oid new_array_oid; ObjectAddress new_type_addr; @@ -1506,7 +1512,12 @@ heap_create_with_catalog(const char *relname, /* Now create the array type. */ relarrayname = makeArrayTypeName(relname, relnamespace); - Assert(!shared_relation); + /* + * YB_TODO(arpan): + * Due to the below assertion pg_database relation creation fails. It is + * not present in PG11/PG15. Why have we added it in YB? + * Assert(!shared_relation); + */ TypeCreate(new_array_oid, /* force the type's OID to this */ relarrayname, /* Array type name */ relnamespace, /* Same namespace as parent */ diff --git a/src/postgres/src/backend/catalog/index.c b/src/postgres/src/backend/catalog/index.c index 65393406a79c..27bceea85e39 100644 --- a/src/postgres/src/backend/catalog/index.c +++ b/src/postgres/src/backend/catalog/index.c @@ -3340,7 +3340,6 @@ IndexBackfillHeapRangeScan(Relation table_rel, YbPgExecOutParam *bfresult) { /* YB_TODO(neil@yugabyte) - * - Need to pass bfinfo and bfresult to table.h * - Check for the value of the new flag "progress". */ Assert(0); @@ -3354,7 +3353,9 @@ IndexBackfillHeapRangeScan(Relation table_rel, InvalidBlockNumber /* num_blocks */, callback, callback_state, - NULL /* scan */); + NULL, /* scan */ + bfinfo, + bfresult); } /* diff --git a/src/postgres/src/backend/catalog/indexing.c b/src/postgres/src/backend/catalog/indexing.c index 5064915228c0..6a1f11c9e5dc 100644 --- a/src/postgres/src/backend/catalog/indexing.c +++ b/src/postgres/src/backend/catalog/indexing.c @@ -370,7 +370,7 @@ YBCatalogTupleInsert(Relation heapRel, HeapTuple tup, bool yb_shared_insert) YB_FOR_EACH_DB(pg_db_tuple) { - Oid dboid = YbHeapTupleGetOid(pg_db_tuple); /* TODO(Alex) */ + Oid dboid = ((Form_pg_database) GETSTRUCT(pg_db_tuple))->oid; /* * Since this is a catalog table, we assume it exists in all databases. * YB doesn't use PG locks so it's okay not to take them. @@ -421,8 +421,6 @@ CatalogTupleInsertWithInfo(Relation heapRel, HeapTuple tup, { CatalogTupleCheckConstraints(heapRel, tup); - CatalogTupleCheckConstraints(heapRel, tup); - if (IsYugaByteEnabled()) { /* Keep ybctid consistent across all databases. */ @@ -435,7 +433,7 @@ CatalogTupleInsertWithInfo(Relation heapRel, HeapTuple tup, YB_FOR_EACH_DB(pg_db_tuple) { - Oid dboid = YbHeapTupleGetOid(pg_db_tuple); /* TODO(Alex) */ + Oid dboid = ((Form_pg_database) GETSTRUCT(pg_db_tuple))->oid; /* * Since this is a catalog table, we assume it exists in all databases. * YB doesn't use PG locks so it's okay not to take them. @@ -484,6 +482,28 @@ CatalogTuplesMultiInsertWithInfo(Relation heapRel, TupleTableSlot **slot, if (ntuples <= 0) return; + if (IsYugaByteEnabled()) + { + /* + * YB_TODO(arpan): Is there a multi tuple equivalent of + * YBCExecuteInsertForDb? + */ + for (int i = 0; i < ntuples; i++) + { + bool should_free; + HeapTuple tuple; + + tuple = ExecFetchSlotHeapTuple(slot[i], true, &should_free); + tuple->t_tableOid = slot[i]->tts_tableOid; + CatalogTupleInsertWithInfo(heapRel, tuple, indstate, + yb_shared_insert); + + if (should_free) + heap_freetuple(tuple); + } + return; + } + heap_multi_insert(heapRel, slot, ntuples, GetCurrentCommandId(true), 0, NULL); diff --git a/src/postgres/src/backend/commands/tablegroup.c b/src/postgres/src/backend/commands/tablegroup.c index 50ace6f4782f..94e2b3ea0c40 100644 --- a/src/postgres/src/backend/commands/tablegroup.c +++ b/src/postgres/src/backend/commands/tablegroup.c @@ -294,7 +294,7 @@ get_tablegroup_oid(const char *tablegroupname, bool missing_ok) /* We assume that there can be at most one matching tuple */ if (HeapTupleIsValid(tuple)) - result = YbHeapTupleGetOid(tuple); + result = ((Form_pg_yb_tablegroup) GETSTRUCT(tuple))->oid; else result = InvalidOid; @@ -372,7 +372,7 @@ RemoveTablegroupById(Oid grp_oid) * Find the tablegroup to delete. */ ScanKeyInit(&skey[0], - ObjectIdAttributeNumber, + Anum_pg_yb_tablegroup_oid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(grp_oid)); scandesc = table_beginscan_catalog(pg_tblgrp_rel, 1, skey); @@ -455,7 +455,7 @@ RenameTablegroup(const char *oldname, const char *newname) oldname))); } - tablegroupoid = YbHeapTupleGetOid(tuple); + tablegroupoid = ((Form_pg_yb_tablegroup) GETSTRUCT(tuple))->oid; /* must be owner or superuser */ if (!superuser() && !pg_tablegroup_ownercheck(tablegroupoid, GetUserId())) @@ -523,7 +523,7 @@ AlterTablegroupOwner(const char *grpname, Oid newOwnerId) (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("tablegroup \"%s\" does not exist", grpname))); - tablegroupoid = YbHeapTupleGetOid(tuple); + tablegroupoid = ((Form_pg_yb_tablegroup) GETSTRUCT(tuple))->oid; datForm = (Form_pg_yb_tablegroup) GETSTRUCT(tuple); /* diff --git a/src/postgres/src/backend/commands/yb_profile.c b/src/postgres/src/backend/commands/yb_profile.c index 2a30d1cb0bdd..a1446871f14e 100644 --- a/src/postgres/src/backend/commands/yb_profile.c +++ b/src/postgres/src/backend/commands/yb_profile.c @@ -182,7 +182,7 @@ yb_get_profile_oid(const char *prfname, bool missing_ok) /* We assume that there can be at most one matching tuple */ if (HeapTupleIsValid(tuple)) - result = YbHeapTupleGetOid(tuple); + result = ((Form_pg_yb_profile) GETSTRUCT(tuple))->oid; else result = InvalidOid; @@ -212,7 +212,7 @@ yb_get_profile_tuple(Oid prfid) */ rel = table_open(YbProfileRelationId, AccessShareLock); - ScanKeyInit(&entry[0], ObjectIdAttributeNumber, + ScanKeyInit(&entry[0], Anum_pg_yb_profile_oid, BTEqualStrategyNumber, F_OIDEQ, prfid); scandesc = table_beginscan_catalog(rel, 1, entry); tuple = heap_getnext(scandesc, ForwardScanDirection); @@ -308,7 +308,7 @@ YbDropProfile(YbDropProfileStmt *stmt) return; } - prfid = YbHeapTupleGetOid(tuple); + prfid = ((Form_pg_yb_profile) GETSTRUCT(tuple))->oid; /* * TODO(profile): disallow drop of the default profile once we introduce a @@ -456,7 +456,7 @@ yb_get_role_profile_tuple_by_oid(Oid rolprfoid) */ rel = table_open(YbRoleProfileRelationId, AccessShareLock); - ScanKeyInit(&entry[0], ObjectIdAttributeNumber, + ScanKeyInit(&entry[0], Anum_pg_yb_role_profile_oid, BTEqualStrategyNumber, F_OIDEQ, rolprfoid); scandesc = table_beginscan_catalog(rel, 1, entry); tuple = heap_getnext(scandesc, ForwardScanDirection); @@ -501,7 +501,7 @@ yb_update_role_profile(Oid roleid, const char *rolename, Datum *new_record, /* We assume that there can be at most one matching tuple */ if (HeapTupleIsValid(tuple)) { - roleprfid = YbHeapTupleGetOid(tuple); + roleprfid = ((Form_pg_yb_role_profile) GETSTRUCT(tuple))->oid; new_tuple = heap_modify_tuple(tuple, pg_yb_role_profile_dsc, new_record, new_record_nulls, new_record_repl); CatalogTupleUpdate(pg_yb_role_profile_rel, &tuple->t_self, new_tuple); diff --git a/src/postgres/src/backend/commands/ybccmds.c b/src/postgres/src/backend/commands/ybccmds.c index 0cece02aa1dc..25c0dd4f1835 100644 --- a/src/postgres/src/backend/commands/ybccmds.c +++ b/src/postgres/src/backend/commands/ybccmds.c @@ -1064,11 +1064,8 @@ YBCPrepareAlterTableCmd(AlterTableCmd* cmd, Relation rel, List *handles, } } - /* YB_TODO(neil@yugabyte) - * Read typeOid from "values" and "nulls" instead of tuple header. - */ typeTuple = typenameType(NULL, colDef->typeName, &typmod); - typeOid = YbHeapTupleGetOid(typeTuple); + typeOid = ((Form_pg_type) GETSTRUCT(typeTuple))->oid; ReleaseSysCache(typeTuple); order = RelationGetNumberOfAttributes(rel) + *col; diff --git a/src/postgres/src/backend/executor/ybcModifyTable.c b/src/postgres/src/backend/executor/ybcModifyTable.c index d0843b79da1d..4c9932e235e9 100644 --- a/src/postgres/src/backend/executor/ybcModifyTable.c +++ b/src/postgres/src/backend/executor/ybcModifyTable.c @@ -323,9 +323,7 @@ static Oid YBCExecuteInsertInternal(Oid dboid, YBCPgAddIntoForeignKeyReferenceCache(relid, HEAPTUPLE_YBCTID(tuple)); bms_free(pkey); -#ifdef NEIL_NEED_WORK - /* Read typeOid from "values" and "nulls" instead of tuple header */ -#endif + /* YB_TODO(arpan): return value is unused, return void instead. */ return YbHeapTupleGetOid(tuple); } diff --git a/src/postgres/src/backend/utils/cache/relcache.c b/src/postgres/src/backend/utils/cache/relcache.c index 5d48747d3c39..c329c73eebae 100644 --- a/src/postgres/src/backend/utils/cache/relcache.c +++ b/src/postgres/src/backend/utils/cache/relcache.c @@ -927,10 +927,7 @@ YBRelationBuildRuleLock(Relation relation) rule = (RewriteRule *) MemoryContextAlloc(rulescxt, sizeof(RewriteRule)); -#ifdef NEIL_NEED_WORK - /* Read typeOid from "values" and "nulls" instead of tuple header */ -#endif - rule->ruleId = YbHeapTupleGetOid(rewrite_tuple); + rule->ruleId = rewrite_form->oid; rule->event = rewrite_form->ev_type - '0'; rule->enabled = rewrite_form->ev_enabled; @@ -1368,12 +1365,10 @@ YBLoadRelations() HeapTuple pg_class_tuple; while (HeapTupleIsValid(pg_class_tuple = systable_getnext(scandesc))) { - /* YB_TODO(neil@yugabyte) - * Read Oid from "values" and "nulls" instead of tuple header. - */ - Oid relid = YbHeapTupleGetOid(pg_class_tuple); + /* get information from the pg_class_tuple */ + Form_pg_class relp = (Form_pg_class) GETSTRUCT(pg_class_tuple); + Oid relid = relp->oid; - /* YB_TODO(neil@yugabyte) Read Oid from "values" and "nulls" instead of tuple header */ /* * Insert newly created relation into relcache hash table if needed: * a. If it's not already there (e.g. new table or initialization). @@ -1392,9 +1387,6 @@ YBLoadRelations() continue; } - /* get information from the pg_class_tuple */ - Form_pg_class relp = (Form_pg_class) GETSTRUCT(pg_class_tuple); - /* * allocate storage for the relation descriptor, and copy pg_class_tuple * to relation->rd_rel. @@ -1454,8 +1446,8 @@ YBLoadRelations() relation->rd_partdesc = NULL; relation->rd_pdcxt = NULL; - /* if it's an index, initialize index-related information */ - if (OidIsValid(relation->rd_rel->relam)) + if (relation->rd_rel->relkind == RELKIND_INDEX || + relation->rd_rel->relkind == RELKIND_PARTITIONED_INDEX) RelationInitIndexAccessInfo(relation); /* extract reloptions if any */ diff --git a/src/postgres/src/backend/utils/cache/syscache.c b/src/postgres/src/backend/utils/cache/syscache.c index 445e8f855497..2968e44cdce6 100644 --- a/src/postgres/src/backend/utils/cache/syscache.c +++ b/src/postgres/src/backend/utils/cache/syscache.c @@ -888,23 +888,6 @@ static const struct cachedesc cacheinfo[] = { }, 64 }, -#ifdef YB_TODO - /* YB_TODO(alex@yugabyte) - * - Do we need an OID column? - * - Shouldn't we need to define system table for the tablegroup here? - */ - {YbTablegroupRelationId, /* TABLEGROUPOID */ - YbTablegroupOidIndexId, - 1, - { - Anum_pg_yb_tablegroup_oid, - 0, - 0, - 0, - }, - 4 - }, -#endif {TableSpaceRelationId, /* TABLESPACEOID */ TablespaceOidIndexId, 1, @@ -1085,7 +1068,7 @@ static const struct cachedesc cacheinfo[] = { YbTablegroupOidIndexId, 1, { - ObjectIdAttributeNumber, + Anum_pg_yb_tablegroup_oid, 0, 0, 0, diff --git a/src/postgres/src/backend/utils/misc/pg_yb_utils.c b/src/postgres/src/backend/utils/misc/pg_yb_utils.c index 5e4a4c334550..e038f3f62e79 100644 --- a/src/postgres/src/backend/utils/misc/pg_yb_utils.c +++ b/src/postgres/src/backend/utils/misc/pg_yb_utils.c @@ -2946,14 +2946,6 @@ void YBGetCollationInfo( } switch (type_entity->type_oid) { case NAMEOID: - /* - * In bootstrap code, postgres 11.2 hard coded to InvalidOid but - * postgres 13.2 hard coded to C_COLLATION_OID. Adjust the assertion - * when we upgrade to postgres 13.2. - */ - Assert(collation_id == InvalidOid); - collation_id = C_COLLATION_OID; - break; case TEXTOID: case BPCHAROID: case VARCHAROID: diff --git a/src/postgres/src/include/access/tableam.h b/src/postgres/src/include/access/tableam.h index fe869c6c1841..2c316e9ab2d6 100644 --- a/src/postgres/src/include/access/tableam.h +++ b/src/postgres/src/include/access/tableam.h @@ -677,7 +677,9 @@ typedef struct TableAmRoutine BlockNumber numblocks, IndexBuildCallback callback, void *callback_state, - TableScanDesc scan); + TableScanDesc scan, + YbBackfillInfo *bfinfo, + YbPgExecOutParam *bfresult); /* see table_index_validate_scan for reference about parameters */ void (*index_validate_scan) (Relation table_rel, @@ -1763,7 +1765,9 @@ table_index_build_scan(Relation table_rel, InvalidBlockNumber, callback, callback_state, - scan); + scan, + NULL, /* bfinfo */ + NULL /* bfresult */); } /* @@ -1787,7 +1791,9 @@ table_index_build_range_scan(Relation table_rel, BlockNumber numblocks, IndexBuildCallback callback, void *callback_state, - TableScanDesc scan) + TableScanDesc scan, + YbBackfillInfo *bfinfo, + YbPgExecOutParam *bfresult) { return table_rel->rd_tableam->index_build_range_scan(table_rel, index_rel, @@ -1799,7 +1805,9 @@ table_index_build_range_scan(Relation table_rel, numblocks, callback, callback_state, - scan); + scan, + bfinfo, + bfresult); } /* diff --git a/src/yb/common/pg_system_attr.h b/src/yb/common/pg_system_attr.h index d26c50adc8cc..04b0b4cc8a0d 100644 --- a/src/yb/common/pg_system_attr.h +++ b/src/yb/common/pg_system_attr.h @@ -20,15 +20,14 @@ namespace yb { enum class PgSystemAttrNum : int { // Postgres system columns. kSelfItemPointer = -1, // ctid. - kObjectId = -2, // oid. - kMinTransactionId = -3, // xmin - kMinCommandId = -4, // cmin - kMaxTransactionId = -5, // xmax - kMaxCommandId = -6, // cmax - kTableOid = -7, // tableoid + kMinTransactionId = -2, // xmin + kMinCommandId = -3, // cmin + kMaxTransactionId = -4, // xmax + kMaxCommandId = -5, // cmax + kTableOid = -6, // tableoid // YugaByte system columns. - kYBTupleId = -8, // ybctid: virtual column representing DocDB-encoded key. + kYBTupleId = -7, // ybctid: virtual column representing DocDB-encoded key. // YB analogue of Postgres's SelfItemPointer/ctid column. // The following attribute numbers are stored persistently in the table schema. For this reason, @@ -40,6 +39,8 @@ enum class PgSystemAttrNum : int { // (where null == null). For each index row will be set to: // - the base table ctid when one or more indexed cols are null // - to null otherwise (all indexed cols are non-null). + kObjectId = -103, // YB_TODO(arpan): remove it, added it temporarily for consistency + // with sysattr.h. }; } // namespace yb