diff --git a/migtests/tests/oracle/assessment-report-test/expectedAssessmentReport.json b/migtests/tests/oracle/assessment-report-test/expectedAssessmentReport.json index a238ee8513..5d857da7d9 100644 --- a/migtests/tests/oracle/assessment-report-test/expectedAssessmentReport.json +++ b/migtests/tests/oracle/assessment-report-test/expectedAssessmentReport.json @@ -140,8 +140,7 @@ "MemoryPerInstance": 16, "OptimalSelectConnectionsPerNode": 8, "OptimalInsertConnectionsPerNode": 12, - "EstimatedTimeInMinForImport": 1, - "ParallelVoyagerJobs": 1 + "EstimatedTimeInMinForImport": 1 }, "FailureReasoning": "" }, diff --git a/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild1AssessmentReport.json b/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild1AssessmentReport.json index 91100541e7..6ac557ab8c 100644 --- a/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild1AssessmentReport.json +++ b/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild1AssessmentReport.json @@ -69,8 +69,7 @@ "MemoryPerInstance": 16, "OptimalSelectConnectionsPerNode": 8, "OptimalInsertConnectionsPerNode": 12, - "EstimatedTimeInMinForImport": 0, - "ParallelVoyagerJobs": 1 + "EstimatedTimeInMinForImport": 0 }, "FailureReasoning": "" }, diff --git a/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild2AssessmentReport.json b/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild2AssessmentReport.json index d67d01860e..6e4f094fa1 100644 --- a/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild2AssessmentReport.json +++ b/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild2AssessmentReport.json @@ -99,8 +99,7 @@ "MemoryPerInstance": 16, "OptimalSelectConnectionsPerNode": 8, "OptimalInsertConnectionsPerNode": 12, - "EstimatedTimeInMinForImport": 1, - "ParallelVoyagerJobs": 1 + "EstimatedTimeInMinForImport": 1 }, "FailureReasoning": "" }, diff --git a/migtests/tests/pg/adventureworks/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/adventureworks/expected_files/expectedAssessmentReport.json index 12884e9d65..3c39646247 100755 --- a/migtests/tests/pg/adventureworks/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/adventureworks/expected_files/expectedAssessmentReport.json @@ -150,8 +150,7 @@ "MemoryPerInstance": 16, "OptimalSelectConnectionsPerNode": 8, "OptimalInsertConnectionsPerNode": 12, - "EstimatedTimeInMinForImport": 0, - "ParallelVoyagerJobs": 1 + "EstimatedTimeInMinForImport": 0 }, "FailureReasoning": "" }, diff --git a/migtests/tests/pg/assessment-report-test/expectedAssessmentReport.json b/migtests/tests/pg/assessment-report-test/expectedAssessmentReport.json index e3aa333e28..35ef6b7ea2 100644 --- a/migtests/tests/pg/assessment-report-test/expectedAssessmentReport.json +++ b/migtests/tests/pg/assessment-report-test/expectedAssessmentReport.json @@ -184,8 +184,7 @@ "MemoryPerInstance": 16, "OptimalSelectConnectionsPerNode": 8, "OptimalInsertConnectionsPerNode": 12, - "EstimatedTimeInMinForImport": 1, - "ParallelVoyagerJobs": 1 + "EstimatedTimeInMinForImport": 1 }, "FailureReasoning": "" }, diff --git a/migtests/tests/pg/mgi/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/mgi/expected_files/expectedAssessmentReport.json index e977afb880..1a6a0c74b8 100644 --- a/migtests/tests/pg/mgi/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/mgi/expected_files/expectedAssessmentReport.json @@ -241,7 +241,6 @@ "OptimalSelectConnectionsPerNode": 8, "OptimalInsertConnectionsPerNode": 12, "EstimatedTimeInMinForImport": 0, - "ParallelVoyagerJobs": 1 }, "FailureReasoning": "" }, diff --git a/migtests/tests/pg/omnibus/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/omnibus/expected_files/expectedAssessmentReport.json index a6acd0153c..71f179e0e1 100755 --- a/migtests/tests/pg/omnibus/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/omnibus/expected_files/expectedAssessmentReport.json @@ -485,8 +485,7 @@ "MemoryPerInstance": 16, "OptimalSelectConnectionsPerNode": 8, "OptimalInsertConnectionsPerNode": 12, - "EstimatedTimeInMinForImport": 1, - "ParallelVoyagerJobs": 1 + "EstimatedTimeInMinForImport": 1 }, "FailureReasoning": "" }, diff --git a/migtests/tests/pg/osm/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/osm/expected_files/expectedAssessmentReport.json index 83933b92ad..131035d7a6 100755 --- a/migtests/tests/pg/osm/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/osm/expected_files/expectedAssessmentReport.json @@ -58,8 +58,7 @@ "MemoryPerInstance": 16, "OptimalSelectConnectionsPerNode": 8, "OptimalInsertConnectionsPerNode": 12, - "EstimatedTimeInMinForImport": 1, - "ParallelVoyagerJobs": 1 + "EstimatedTimeInMinForImport": 1 }, "FailureReasoning": "" }, diff --git a/migtests/tests/pg/pgtbrus/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/pgtbrus/expected_files/expectedAssessmentReport.json index 53c130bfd1..461a5fe96d 100755 --- a/migtests/tests/pg/pgtbrus/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/pgtbrus/expected_files/expectedAssessmentReport.json @@ -72,8 +72,7 @@ "MemoryPerInstance": 16, "OptimalSelectConnectionsPerNode": 8, "OptimalInsertConnectionsPerNode": 12, - "EstimatedTimeInMinForImport": 1, - "ParallelVoyagerJobs": 1 + "EstimatedTimeInMinForImport": 1 }, "FailureReasoning": "" }, diff --git a/migtests/tests/pg/rna/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/rna/expected_files/expectedAssessmentReport.json index 5b83660cca..77032e3050 100644 --- a/migtests/tests/pg/rna/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/rna/expected_files/expectedAssessmentReport.json @@ -369,8 +369,7 @@ "MemoryPerInstance": 32, "OptimalSelectConnectionsPerNode": 16, "OptimalInsertConnectionsPerNode": 24, - "EstimatedTimeInMinForImport": 0, - "ParallelVoyagerJobs": 3 + "EstimatedTimeInMinForImport": 0 }, "FailureReasoning": "" }, diff --git a/migtests/tests/pg/sakila/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/sakila/expected_files/expectedAssessmentReport.json index cb4ae18aee..849de98eed 100755 --- a/migtests/tests/pg/sakila/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/sakila/expected_files/expectedAssessmentReport.json @@ -110,8 +110,7 @@ "MemoryPerInstance": 16, "OptimalSelectConnectionsPerNode": 8, "OptimalInsertConnectionsPerNode": 12, - "EstimatedTimeInMinForImport": 0, - "ParallelVoyagerJobs": 1 + "EstimatedTimeInMinForImport": 0 }, "FailureReasoning": "" }, diff --git a/migtests/tests/pg/sample-is/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/sample-is/expected_files/expectedAssessmentReport.json index 48942b5a41..96d65788ba 100755 --- a/migtests/tests/pg/sample-is/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/sample-is/expected_files/expectedAssessmentReport.json @@ -68,8 +68,7 @@ "MemoryPerInstance": 16, "OptimalSelectConnectionsPerNode": 8, "OptimalInsertConnectionsPerNode": 12, - "EstimatedTimeInMinForImport": 0, - "ParallelVoyagerJobs": 1 + "EstimatedTimeInMinForImport": 0 }, "FailureReasoning": "" }, diff --git a/migtests/tests/pg/stackexchange/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/stackexchange/expected_files/expectedAssessmentReport.json index c15829b277..9285da53f2 100644 --- a/migtests/tests/pg/stackexchange/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/stackexchange/expected_files/expectedAssessmentReport.json @@ -74,8 +74,7 @@ "MemoryPerInstance": 16, "OptimalSelectConnectionsPerNode": 8, "OptimalInsertConnectionsPerNode": 12, - "EstimatedTimeInMinForImport": 0, - "ParallelVoyagerJobs": 1 + "EstimatedTimeInMinForImport": 0 }, "FailureReasoning": "" }, diff --git a/yb-voyager/cmd/templates/migration_assessment_report.template b/yb-voyager/cmd/templates/migration_assessment_report.template index c172979362..3a4efcf474 100644 --- a/yb-voyager/cmd/templates/migration_assessment_report.template +++ b/yb-voyager/cmd/templates/migration_assessment_report.template @@ -154,7 +154,6 @@ Memory per instance(GiB){{ .MemoryPerInstance }} Optimal select connections per node{{ if eq .OptimalSelectConnectionsPerNode 0 }}--{{else}}{{.OptimalSelectConnectionsPerNode }}{{end}} Optimal insert connections per node{{ if eq .OptimalInsertConnectionsPerNode 0 }}--{{else}}{{.OptimalInsertConnectionsPerNode}}{{end}} - Parallel Voyager Jobs{{ .ParallelVoyagerJobs }} Estimated time taken for data import {{ .EstimatedTimeInMinForImport }} min

Reasoning:

diff --git a/yb-voyager/src/migassessment/common.go b/yb-voyager/src/migassessment/common.go index ca752bc916..1046e612a7 100644 --- a/yb-voyager/src/migassessment/common.go +++ b/yb-voyager/src/migassessment/common.go @@ -36,7 +36,6 @@ type SizingRecommendation struct { OptimalSelectConnectionsPerNode int64 OptimalInsertConnectionsPerNode int64 EstimatedTimeInMinForImport float64 - ParallelVoyagerJobs float64 } type SizingAssessmentReport struct { diff --git a/yb-voyager/src/migassessment/sizing.go b/yb-voyager/src/migassessment/sizing.go index 1d45a1167b..edaa54e7c7 100644 --- a/yb-voyager/src/migassessment/sizing.go +++ b/yb-voyager/src/migassessment/sizing.go @@ -73,7 +73,6 @@ type ExpDataThroughput struct { type ExpDataLoadTime struct { csvSizeGB sql.NullFloat64 `db:"csv_size_gb,string"` migrationTimeSecs sql.NullFloat64 `db:"migration_time_secs,string"` - parallelThreads sql.NullInt64 `db:"parallel_threads,string"` rowCount sql.NullFloat64 `db:"row_count,string"` } @@ -100,7 +99,6 @@ type IntermediateRecommendation struct { OptimalSelectConnectionsPerNode int64 OptimalInsertConnectionsPerNode int64 EstimatedTimeInMinForImport float64 - ParallelVoyagerJobs float64 FailureReasoning string } @@ -231,7 +229,7 @@ func SizingAssessment() error { } // calculate time taken for colocated import - importTimeForColocatedObjects, parallelVoyagerJobsColocated, err := calculateTimeTakenAndParallelJobsForImport( + importTimeForColocatedObjects, err := calculateTimeTakenForImport( finalSizingRecommendation.ColocatedTables, sourceIndexMetadata, colocatedLoadTimes, indexImpactOnLoadTimeCommon, columnsImpactOnLoadTimeCommon, COLOCATED) if err != nil { @@ -240,7 +238,7 @@ func SizingAssessment() error { } // calculate time taken for sharded import - importTimeForShardedObjects, parallelVoyagerJobsSharded, err := calculateTimeTakenAndParallelJobsForImport( + importTimeForShardedObjects, err := calculateTimeTakenForImport( finalSizingRecommendation.ShardedTables, sourceIndexMetadata, shardedLoadTimes, indexImpactOnLoadTimeCommon, columnsImpactOnLoadTimeCommon, SHARDED) if err != nil { @@ -258,7 +256,6 @@ func SizingAssessment() error { NumNodes: finalSizingRecommendation.NumNodes, OptimalSelectConnectionsPerNode: finalSizingRecommendation.OptimalSelectConnectionsPerNode, OptimalInsertConnectionsPerNode: finalSizingRecommendation.OptimalInsertConnectionsPerNode, - ParallelVoyagerJobs: math.Min(float64(parallelVoyagerJobsColocated), float64(parallelVoyagerJobsSharded)), ColocatedReasoning: reasoning, EstimatedTimeInMinForImport: importTimeForColocatedObjects + importTimeForShardedObjects, } @@ -368,7 +365,6 @@ func findNumNodesNeededBasedOnThroughputRequirement(sourceIndexMetadata []Source NumNodes: nodesNeeded, OptimalSelectConnectionsPerNode: int64(math.Min(float64(previousRecommendation.OptimalSelectConnectionsPerNode), float64(shardedThroughput.selectConnPerNode.Int64))), OptimalInsertConnectionsPerNode: int64(math.Min(float64(previousRecommendation.OptimalInsertConnectionsPerNode), float64(shardedThroughput.insertConnPerNode.Int64))), - ParallelVoyagerJobs: previousRecommendation.ParallelVoyagerJobs, ColocatedSize: previousRecommendation.ColocatedSize, ShardedSize: previousRecommendation.ShardedSize, EstimatedTimeInMinForImport: previousRecommendation.EstimatedTimeInMinForImport, @@ -539,7 +535,6 @@ func checkShardedTableLimit(sourceIndexMetadata []SourceDBMetadata, shardedLimit NumNodes: previousRecommendation.NumNodes, OptimalSelectConnectionsPerNode: previousRecommendation.OptimalSelectConnectionsPerNode, OptimalInsertConnectionsPerNode: previousRecommendation.OptimalInsertConnectionsPerNode, - ParallelVoyagerJobs: previousRecommendation.ParallelVoyagerJobs, ColocatedSize: 0, ShardedSize: 0, EstimatedTimeInMinForImport: previousRecommendation.EstimatedTimeInMinForImport, @@ -622,7 +617,6 @@ func shardingBasedOnOperations(sourceIndexMetadata []SourceDBMetadata, NumNodes: previousRecommendation.NumNodes, OptimalSelectConnectionsPerNode: colocatedThroughput.selectConnPerNode.Int64, OptimalInsertConnectionsPerNode: colocatedThroughput.insertConnPerNode.Int64, - ParallelVoyagerJobs: previousRecommendation.ParallelVoyagerJobs, ColocatedSize: cumulativeColocatedSizeSum, ShardedSize: cumulativeSizeSharded, EstimatedTimeInMinForImport: previousRecommendation.EstimatedTimeInMinForImport, @@ -702,7 +696,6 @@ func shardingBasedOnTableSizeAndCount(sourceTableMetadata []SourceDBMetadata, NumNodes: previousRecommendation.NumNodes, OptimalSelectConnectionsPerNode: previousRecommendation.OptimalSelectConnectionsPerNode, OptimalInsertConnectionsPerNode: previousRecommendation.OptimalInsertConnectionsPerNode, - ParallelVoyagerJobs: previousRecommendation.ParallelVoyagerJobs, ColocatedSize: cumulativeColocatedSizeSum, ShardedSize: cumulativeSizeSharded, EstimatedTimeInMinForImport: previousRecommendation.EstimatedTimeInMinForImport, @@ -881,7 +874,7 @@ func createSizingRecommendationStructure(colocatedLimits []ExpDataColocatedLimit } /* -calculateTimeTakenAndParallelJobsForImport estimates the time taken for import of tables. +calculateTimeTakenForImport estimates the time taken for import of tables. It queries experimental data to find import time estimates for similar object sizes and configurations. For every table , it tries to find out how much time it would table for importing that table. The function adjusts the import time on that table by multiplying it by factor based on the indexes. The import time is also converted to @@ -897,13 +890,12 @@ Parameters: Returns: float64: The estimated time taken for import in minutes. - int64: Total parallel jobs used for import. error: Error if any */ -func calculateTimeTakenAndParallelJobsForImport(tables []SourceDBMetadata, +func calculateTimeTakenForImport(tables []SourceDBMetadata, sourceIndexMetadata []SourceDBMetadata, loadTimes []ExpDataLoadTime, indexImpactData []ExpDataLoadTimeIndexImpact, numColumnImpactData []ExpDataLoadTimeColumnsImpact, - objectType string) (float64, int64, error) { + objectType string) (float64, error) { var importTime float64 // we need to calculate the time taken for import for every table. @@ -926,7 +918,7 @@ func calculateTimeTakenAndParallelJobsForImport(tables []SourceDBMetadata, importTime += (loadTimeMultiplicationFactorWrtIndexes * loadTimeMultiplicationFactorWrtNumColumns * tableImportTimeSec) / 60 } - return math.Ceil(importTime), loadTimes[0].parallelThreads.Int64, nil + return math.Ceil(importTime), nil } /* @@ -945,8 +937,7 @@ Returns: func getExpDataLoadTime(experimentDB *sql.DB, vCPUPerInstance int, memPerCore int, tableType string) ([]ExpDataLoadTime, error) { selectQuery := fmt.Sprintf(` SELECT csv_size_gb, - migration_time_secs, - parallel_threads, + migration_time_secs, row_count FROM %v WHERE num_cores = ? @@ -967,7 +958,7 @@ func getExpDataLoadTime(experimentDB *sql.DB, vCPUPerInstance int, memPerCore in var loadTimes []ExpDataLoadTime for rows.Next() { var loadTime ExpDataLoadTime - if err = rows.Scan(&loadTime.csvSizeGB, &loadTime.migrationTimeSecs, &loadTime.parallelThreads, &loadTime.rowCount); err != nil { + if err = rows.Scan(&loadTime.csvSizeGB, &loadTime.migrationTimeSecs, &loadTime.rowCount); err != nil { return nil, fmt.Errorf("cannot fetch data from experiment data table with query [%s]: %w", selectQuery, err) } diff --git a/yb-voyager/src/migassessment/sizing_test.go b/yb-voyager/src/migassessment/sizing_test.go index bdf28050fc..6852b0857f 100644 --- a/yb-voyager/src/migassessment/sizing_test.go +++ b/yb-voyager/src/migassessment/sizing_test.go @@ -896,10 +896,10 @@ func TestPickBestRecommendation_PickLastMaxCoreRecommendationWhenNoneCanSupport( } /* -===== Test functions to test calculateTimeTakenAndParallelJobsForImport function ===== +===== Test functions to test calculateTimeTakenForImport function ===== */ // validate the formula to calculate the import time for colocated table without index -func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithoutIndex_Colocated(t *testing.T) { +func TestCalculateTimeTakenForImport_ValidateImportTimeTableWithoutIndex_Colocated(t *testing.T) { // Define test data colocatedTables := []SourceDBMetadata{ { @@ -913,11 +913,11 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho colocatedLoadTimes := []ExpDataLoadTime{ { csvSizeGB: sql.NullFloat64{Float64: 19}, migrationTimeSecs: sql.NullFloat64{Float64: 1134}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000}, + rowCount: sql.NullFloat64{Float64: 100000}, }, { csvSizeGB: sql.NullFloat64{Float64: 29}, migrationTimeSecs: sql.NullFloat64{Float64: 1657}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000}, + rowCount: sql.NullFloat64{Float64: 100000}, }, } var indexImpacts []ExpDataLoadTimeIndexImpact @@ -930,7 +930,7 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho } // Call the function - estimatedTime, parallelJobs, err := calculateTimeTakenAndParallelJobsForImport(colocatedTables, + estimatedTime, err := calculateTimeTakenForImport(colocatedTables, sourceIndexMetadata, colocatedLoadTimes, indexImpacts, columnImpacts, COLOCATED) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -939,16 +939,14 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho // Define expected results // Calculated as table0: 1 * ((1134 * 23) / 19) / 60 expectedTime := 23.0 - expectedJobs := int64(1) - if estimatedTime != expectedTime || parallelJobs != expectedJobs { - t.Errorf("calculateTimeTakenAndParallelJobsForImport() = (%v, %v), want (%v, %v)", - estimatedTime, parallelJobs, expectedTime, expectedJobs) + if estimatedTime != expectedTime { + t.Errorf("calculateTimeTakenForImport() = (%v), want (%v)", estimatedTime, expectedTime) } } // validate the formula to calculate the import time for colocated table with one index -func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithOneIndex_Colocated(t *testing.T) { +func TestCalculateTimeTakenForImport_ValidateImportTimeTableWithOneIndex_Colocated(t *testing.T) { // Define test data colocatedTables := []SourceDBMetadata{ { @@ -963,11 +961,11 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithO colocatedLoadTimes := []ExpDataLoadTime{ { csvSizeGB: sql.NullFloat64{Float64: 19}, migrationTimeSecs: sql.NullFloat64{Float64: 1461}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, { csvSizeGB: sql.NullFloat64{Float64: 29}, migrationTimeSecs: sql.NullFloat64{Float64: 2009}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, } @@ -986,7 +984,7 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithO } // Call the function - estimatedTime, parallelJobs, err := calculateTimeTakenAndParallelJobsForImport(colocatedTables, + estimatedTime, err := calculateTimeTakenForImport(colocatedTables, sourceIndexMetadata, colocatedLoadTimes, indexImpacts, columnImpacts, COLOCATED) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -995,16 +993,14 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithO // Define expected results // Calculated as table0: 1.77777 * ((1461 * 23) / 19) / 60 expectedTime := 53.0 // double the time required when there are no indexes. - expectedJobs := int64(1) - if estimatedTime != expectedTime || parallelJobs != expectedJobs { - t.Errorf("calculateTimeTakenAndParallelJobsForImport() = (%v, %v), want (%v, %v)", - estimatedTime, parallelJobs, expectedTime, expectedJobs) + if estimatedTime != expectedTime { + t.Errorf("calculateTimeTakenForImport() = (%v), want (%v)", estimatedTime, expectedTime) } } // validate the formula to calculate the import time for colocated table with 5 indexes -func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithFiveIndexes_Colocated(t *testing.T) { +func TestCalculateTimeTakenForImport_ValidateImportTimeTableWithFiveIndexes_Colocated(t *testing.T) { // Define test data colocatedTables := []SourceDBMetadata{ { @@ -1022,11 +1018,11 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithF colocatedLoadTimes := []ExpDataLoadTime{ { csvSizeGB: sql.NullFloat64{Float64: 19}, migrationTimeSecs: sql.NullFloat64{Float64: 1461}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, { csvSizeGB: sql.NullFloat64{Float64: 29}, migrationTimeSecs: sql.NullFloat64{Float64: 2009}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, } //TODO: modify index impact with actual colocated data when it is available and adjust the calculations @@ -1042,7 +1038,7 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithF }, } // Call the function - estimatedTime, parallelJobs, err := calculateTimeTakenAndParallelJobsForImport(colocatedTables, + estimatedTime, err := calculateTimeTakenForImport(colocatedTables, sourceIndexMetadata, colocatedLoadTimes, indexImpacts, columnsImpact, COLOCATED) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -1051,16 +1047,14 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithF // Define expected results // Calculated as table0: 4.66666 * ((1461 * 23) / 19) / 60 expectedTime := 138.0 - expectedJobs := int64(1) - if estimatedTime != expectedTime || parallelJobs != expectedJobs { - t.Errorf("calculateTimeTakenAndParallelJobsForImport() = (%v, %v), want (%v, %v)", - estimatedTime, parallelJobs, expectedTime, expectedJobs) + if estimatedTime != expectedTime { + t.Errorf("calculateTimeTakenForImport() = (%v), want (%v)", estimatedTime, expectedTime) } } // validate the formula to calculate the import time for sharded table without index -func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithoutIndex_Sharded(t *testing.T) { +func TestCalculateTimeTakenForImport_ValidateImportTimeTableWithoutIndex_Sharded(t *testing.T) { // Define test data shardedTables := []SourceDBMetadata{ { @@ -1072,11 +1066,11 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho shardedLoadTimes := []ExpDataLoadTime{ { csvSizeGB: sql.NullFloat64{Float64: 19}, migrationTimeSecs: sql.NullFloat64{Float64: 1134}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, { csvSizeGB: sql.NullFloat64{Float64: 29}, migrationTimeSecs: sql.NullFloat64{Float64: 1657}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, } var indexImpacts []ExpDataLoadTimeIndexImpact @@ -1088,7 +1082,7 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho }, } // Call the function - estimatedTime, parallelJobs, err := calculateTimeTakenAndParallelJobsForImport(shardedTables, sourceIndexMetadata, + estimatedTime, err := calculateTimeTakenForImport(shardedTables, sourceIndexMetadata, shardedLoadTimes, indexImpacts, columnsImpact, SHARDED) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -1097,16 +1091,14 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho // Define expected results // Calculated as table0: 1 * ((1134 * 23) / 19) / 60 expectedTime := 23.0 - expectedJobs := int64(1) - if estimatedTime != expectedTime || parallelJobs != expectedJobs { - t.Errorf("calculateTimeTakenAndParallelJobsForImport() = (%v, %v), want (%v, %v)", - estimatedTime, parallelJobs, expectedTime, expectedJobs) + if estimatedTime != expectedTime { + t.Errorf("calculateTimeTakenForImport() = (%v), want (%v)", estimatedTime, expectedTime) } } // validate the formula to calculate the import time for sharded table with one index -func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithOneIndex_Sharded(t *testing.T) { +func TestCalculateTimeTakenForImport_ValidateImportTimeTableWithOneIndex_Sharded(t *testing.T) { // Define test data shardedTables := []SourceDBMetadata{ { @@ -1120,11 +1112,11 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithO shardedLoadTimes := []ExpDataLoadTime{ { csvSizeGB: sql.NullFloat64{Float64: 19}, migrationTimeSecs: sql.NullFloat64{Float64: 1134}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, { csvSizeGB: sql.NullFloat64{Float64: 29}, migrationTimeSecs: sql.NullFloat64{Float64: 1657}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, } indexImpacts := []ExpDataLoadTimeIndexImpact{ @@ -1138,8 +1130,8 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithO }, } // Call the function - estimatedTime, parallelJobs, err := - calculateTimeTakenAndParallelJobsForImport(shardedTables, sourceIndexMetadata, shardedLoadTimes, + estimatedTime, err := + calculateTimeTakenForImport(shardedTables, sourceIndexMetadata, shardedLoadTimes, indexImpacts, columnsImpact, SHARDED) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -1148,16 +1140,15 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithO // Define expected results // Calculated as table0: 1.76 * ((1134 * 23) / 19) / 60 expectedTime := 41.0 // double the time required when there are no indexes. - expectedJobs := int64(1) - if estimatedTime != expectedTime || parallelJobs != expectedJobs { - t.Errorf("calculateTimeTakenAndParallelJobsForImport() = (%v, %v), want (%v, %v)", - estimatedTime, parallelJobs, expectedTime, expectedJobs) + + if estimatedTime != expectedTime { + t.Errorf("calculateTimeTakenForImport() = (%v), want (%v)", estimatedTime, expectedTime) } } // validate the formula to calculate the import time for sharded table with 5 indexes -func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithFiveIndexes_Sharded(t *testing.T) { +func TestCalculateTimeTakenForImport_ValidateImportTimeTableWithFiveIndexes_Sharded(t *testing.T) { // Define test data shardedTables := []SourceDBMetadata{ { @@ -1175,11 +1166,11 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithF shardedLoadTimes := []ExpDataLoadTime{ { csvSizeGB: sql.NullFloat64{Float64: 19}, migrationTimeSecs: sql.NullFloat64{Float64: 1134}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, { csvSizeGB: sql.NullFloat64{Float64: 29}, migrationTimeSecs: sql.NullFloat64{Float64: 1657}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, } @@ -1195,8 +1186,8 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithF }, } // Call the function - estimatedTime, parallelJobs, err := - calculateTimeTakenAndParallelJobsForImport(shardedTables, sourceIndexMetadata, shardedLoadTimes, + estimatedTime, err := + calculateTimeTakenForImport(shardedTables, sourceIndexMetadata, shardedLoadTimes, indexImpacts, columnsImpact, SHARDED) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -1205,16 +1196,15 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithF // Define expected results // Calculated as table0: 4.6 * ((1134 * 23) / 19) / 60 expectedTime := 106.0 - expectedJobs := int64(1) - if estimatedTime != expectedTime || parallelJobs != expectedJobs { - t.Errorf("calculateTimeTakenAndParallelJobsForImport() = (%v, %v), want (%v, %v)", - estimatedTime, parallelJobs, expectedTime, expectedJobs) + + if estimatedTime != expectedTime { + t.Errorf("calculateTimeTakenForImport() = (%v), want (%v)", estimatedTime, expectedTime) } } // validate the formula to calculate the import time for colocated table without index having 5 columns -func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithoutIndex5Columns_Colocated(t *testing.T) { +func TestCalculateTimeTakenForImport_ValidateImportTimeTableWithoutIndex5Columns_Colocated(t *testing.T) { // Define test data colocatedTables := []SourceDBMetadata{ { @@ -1226,11 +1216,11 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho colocatedLoadTimes := []ExpDataLoadTime{ { csvSizeGB: sql.NullFloat64{Float64: 19}, migrationTimeSecs: sql.NullFloat64{Float64: 1134}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, { csvSizeGB: sql.NullFloat64{Float64: 29}, migrationTimeSecs: sql.NullFloat64{Float64: 1657}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, } var indexImpacts []ExpDataLoadTimeIndexImpact // doesn't have any impact as there are no indexes @@ -1243,7 +1233,7 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho } // Call the function - estimatedTime, parallelJobs, err := calculateTimeTakenAndParallelJobsForImport(colocatedTables, + estimatedTime, err := calculateTimeTakenForImport(colocatedTables, sourceIndexMetadata, colocatedLoadTimes, indexImpacts, columnImpacts, COLOCATED) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -1252,16 +1242,15 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho // Define expected results // Calculated as table0: 1 * ((1134 * 23) / 19) / 60 expectedTime := 23.0 - expectedJobs := int64(1) - if estimatedTime != expectedTime || parallelJobs != expectedJobs { - t.Errorf("calculateTimeTakenAndParallelJobsForImport() = (%v, %v), want (%v, %v)", - estimatedTime, parallelJobs, expectedTime, expectedJobs) + + if estimatedTime != expectedTime { + t.Errorf("calculateTimeTakenForImport() = (%v), want (%v)", estimatedTime, expectedTime) } } // validate the formula to calculate the import time for colocated table without index having 40 columns -func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithoutIndex40Columns_Colocated(t *testing.T) { +func TestCalculateTimeTakenForImport_ValidateImportTimeTableWithoutIndex40Columns_Colocated(t *testing.T) { // Define test data colocatedTables := []SourceDBMetadata{ { @@ -1273,11 +1262,11 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho colocatedLoadTimes := []ExpDataLoadTime{ { csvSizeGB: sql.NullFloat64{Float64: 19}, migrationTimeSecs: sql.NullFloat64{Float64: 1134}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, { csvSizeGB: sql.NullFloat64{Float64: 29}, migrationTimeSecs: sql.NullFloat64{Float64: 1657}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, } var indexImpacts []ExpDataLoadTimeIndexImpact // doesn't have any impact as there are no indexes @@ -1295,7 +1284,7 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho } // Call the function - estimatedTime, parallelJobs, err := calculateTimeTakenAndParallelJobsForImport(colocatedTables, + estimatedTime, err := calculateTimeTakenForImport(colocatedTables, sourceIndexMetadata, colocatedLoadTimes, indexImpacts, columnImpacts, COLOCATED) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -1304,16 +1293,15 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho // Define expected results // Calculated as table0: 1.57 * ((1134 * 23) / 19) / 60 expectedTime := 36.0 - expectedJobs := int64(1) - if estimatedTime != expectedTime || parallelJobs != expectedJobs { - t.Errorf("calculateTimeTakenAndParallelJobsForImport() = (%v, %v), want (%v, %v)", - estimatedTime, parallelJobs, expectedTime, expectedJobs) + + if estimatedTime != expectedTime { + t.Errorf("calculateTimeTakenForImport() = (%v), want (%v)", estimatedTime, expectedTime) } } // validate the formula to calculate the import time for colocated table without index having 100 columns -func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithoutIndex100Columns_Colocated(t *testing.T) { +func TestCalculateTimeTakenForImport_ValidateImportTimeTableWithoutIndex100Columns_Colocated(t *testing.T) { // Define test data colocatedTables := []SourceDBMetadata{ { @@ -1325,11 +1313,11 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho colocatedLoadTimes := []ExpDataLoadTime{ { csvSizeGB: sql.NullFloat64{Float64: 19}, migrationTimeSecs: sql.NullFloat64{Float64: 1134}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, { csvSizeGB: sql.NullFloat64{Float64: 29}, migrationTimeSecs: sql.NullFloat64{Float64: 1657}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, } var indexImpacts []ExpDataLoadTimeIndexImpact // doesn't have any impact as there are no indexes @@ -1347,7 +1335,7 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho } // Call the function - estimatedTime, parallelJobs, err := calculateTimeTakenAndParallelJobsForImport(colocatedTables, + estimatedTime, err := calculateTimeTakenForImport(colocatedTables, sourceIndexMetadata, colocatedLoadTimes, indexImpacts, columnImpacts, COLOCATED) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -1357,16 +1345,15 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho // multiplication factor: high MF of 160 columns ==> 4.13 // Calculated as table0: 4.13 * ((1134 * 23) / 19) / 60 expectedTime := 95.0 - expectedJobs := int64(1) - if estimatedTime != expectedTime || parallelJobs != expectedJobs { - t.Errorf("calculateTimeTakenAndParallelJobsForImport() = (%v, %v), want (%v, %v)", - estimatedTime, parallelJobs, expectedTime, expectedJobs) + + if estimatedTime != expectedTime { + t.Errorf("calculateTimeTakenForImport() = (%v), want (%v)", estimatedTime, expectedTime) } } // validate the formula to calculate the import time for colocated table without index having 250 columns -func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWithoutIndex250Columns_Colocated(t *testing.T) { +func TestCalculateTimeTakenForImport_ValidateImportTimeTableWithoutIndex250Columns_Colocated(t *testing.T) { // Define test data colocatedTables := []SourceDBMetadata{ { @@ -1378,11 +1365,11 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho colocatedLoadTimes := []ExpDataLoadTime{ { csvSizeGB: sql.NullFloat64{Float64: 19}, migrationTimeSecs: sql.NullFloat64{Float64: 1134}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, { csvSizeGB: sql.NullFloat64{Float64: 29}, migrationTimeSecs: sql.NullFloat64{Float64: 1657}, - parallelThreads: sql.NullInt64{Int64: 1}, rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, + rowCount: sql.NullFloat64{Float64: 100000, Valid: true}, }, } var indexImpacts []ExpDataLoadTimeIndexImpact // doesn't have any impact as there are no indexes @@ -1400,7 +1387,7 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho } // Call the function - estimatedTime, parallelJobs, err := calculateTimeTakenAndParallelJobsForImport(colocatedTables, + estimatedTime, err := calculateTimeTakenForImport(colocatedTables, sourceIndexMetadata, colocatedLoadTimes, indexImpacts, columnImpacts, COLOCATED) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -1410,10 +1397,9 @@ func TestCalculateTimeTakenAndParallelJobsForImport_ValidateImportTimeTableWitho // multiplication factor: closest MF of 160 columns ==> (4.13/160 : x/250) ==> x = 6.45 // Calculated as table0: 6.45 * ((1134 * 23) / 19) / 60 expectedTime := 148.0 - expectedJobs := int64(1) - if estimatedTime != expectedTime || parallelJobs != expectedJobs { - t.Errorf("calculateTimeTakenAndParallelJobsForImport() = (%v, %v), want (%v, %v)", - estimatedTime, parallelJobs, expectedTime, expectedJobs) + + if estimatedTime != expectedTime { + t.Errorf("calculateTimeTakenForImport() = (%v), want (%v)", estimatedTime, expectedTime) } }