Skip to content

Commit

Permalink
v13.0.1 - Updated distribution algorithm (#107)
Browse files Browse the repository at this point in the history
* Updated distribution algorithm for `--concurrencytype` of `Server` and `MaxPerServer` when number of Batch nodes is very close to the number of SQL Server targets. Was yielding less than the number of nodes
*  Nuget updates
  • Loading branch information
mmckechney authored Apr 27, 2021
1 parent 6712b32 commit 31fa8c6
Show file tree
Hide file tree
Showing 9 changed files with 190 additions and 71 deletions.
4 changes: 4 additions & 0 deletions docs/change_notes.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@
# SQL Build Manager Change Notes


### Version 13.0.1

- *FIXED:* Updated distribution algorithm for `--concurrencytype` of `Server` and `MaxPerServer` when number of Batch nodes is very close to the number of SQL Server targets. Was yielding less than the number of nodes.

### Version 13.0.0

- *ADDED:* New option to leverage Azure Service Bus Topic as a database target source. See the [Azure Batch](azure_batch.md) docs for more detail
Expand Down
4 changes: 2 additions & 2 deletions src/AssemblyVersioning.cs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
// These can be found in SqlBuildManager.Setup -> Organize Your Setup -> General Information
// ** Also, don't forget to update the change_notes.xml and .html files!

[assembly: AssemblyVersion("13.0.0")]
[assembly: AssemblyFileVersion("13.0.0")]
[assembly: AssemblyVersion("13.0.1")]
[assembly: AssemblyFileVersion("13.0.1")]


142 changes: 113 additions & 29 deletions src/SqlBuildManager.Console.UnitTest/ConcurrencyTest.cs
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,41 @@ internal static (string, MultiDbData) GetMultiDbData()

return (tmpCfg, multiData);
}
internal static (string, MultiDbData) CreateDefinedMultiDbData(int serverCount, int[] dbCount)
{
if(serverCount != dbCount.Length)
{
return ("", null);
}
var tmpCfg = Path.GetTempPath() + Guid.NewGuid().ToString() + ".cfg";
StringBuilder sb = new StringBuilder();
for (int s = 0; s < serverCount; s++)
{
int tmpDbCount = dbCount[s];
for (int d = 0; d < tmpDbCount; d++)
{
sb.AppendLine($"server{s}:default,database{d}");
}
}
File.WriteAllText(tmpCfg, sb.ToString());
MultiDbData multiData;
string[] errorMessages;
CommandLineArgs cmdLine = new CommandLineArgs()
{
MultiDbRunConfigFileName = tmpCfg
};
string message = string.Empty;

int tmpValReturn = Validation.ValidateAndLoadMultiDbData(cmdLine.MultiDbRunConfigFileName, cmdLine, out multiData, out errorMessages);
if (tmpValReturn != 0)
{
var msg = new LogMsg() { Message = String.Join(";", errorMessages), LogType = LogType.Error };
throw new Exception(String.Join(";", errorMessages));
}

internal static (string, MultiDbData) CreateMultiDbData(int serverCount, int minDbCount, int maxDbCount, out int[] matrix)
return (tmpCfg, multiData);
}
internal static (string, MultiDbData) CreateRandomizedMultiDbData(int serverCount, int minDbCount, int maxDbCount, out int[] matrix)
{
var tmpCfg = Path.GetTempPath() + Guid.NewGuid().ToString() + ".cfg";
Random rnd = new Random();
Expand Down Expand Up @@ -75,42 +108,92 @@ internal static (string, MultiDbData) CreateMultiDbData(int serverCount, int min
[TestMethod]
public void MatchServersToFixedBucket()
{
int targetBuckets = 3;
int serverCount = 40;
int minDbCount = 10;
int maxDbCount = 500;
Random rnd = new Random();

string tmpFile = string.Empty;
MultiDbData multiData;
try
{
for (int i = 0; i < 100; i++)
int targetBuckets = 3;
int serverCount = 40;
int minDbCount = 10;
int maxDbCount = 500;
Random rnd = new Random();

string tmpFile = string.Empty;
MultiDbData multiData;
try
{
targetBuckets = rnd.Next(2, 51);
serverCount = rnd.Next(targetBuckets + 1, 400);
minDbCount = rnd.Next(10, 201);
maxDbCount = rnd.Next(minDbCount+1 , 600);
int[] matrix;
(tmpFile, multiData) = CreateMultiDbData(serverCount, minDbCount, maxDbCount,out matrix);

var buckets = Concurrency.RecombineServersToFixedBucketCount(multiData, targetBuckets);
var flattened = Concurrency.ConcurrencyByServer(multiData);
int maxBucket = flattened.Max(c => c.Count());
int medianBucket = flattened.OrderBy(c => c.Count()).ToList()[(flattened.Count() / 2)+1].Count();
var idealBucket = Math.Ceiling((double)flattened.Sum(c => c.Count()) / (double)targetBuckets);
string message = $"Buckets: {targetBuckets}; Servers: {serverCount}; Matrix: {string.Join(",", matrix)}";
Assert.AreEqual(targetBuckets, buckets.Count(), message);
Assert.IsTrue(buckets.Max(c => c.Count()) <= idealBucket + maxBucket, message);

var str = Concurrency.ConvertBucketsToConfigLines(buckets);
for (int i = 0; i < 100; i++)
{
targetBuckets = rnd.Next(2, 51);
serverCount = rnd.Next(targetBuckets + 1, 200);
minDbCount = rnd.Next(10, 201);
maxDbCount = rnd.Next(minDbCount + 1, 300);
int[] matrix;
(tmpFile, multiData) = CreateRandomizedMultiDbData(serverCount, minDbCount, maxDbCount, out matrix);

var buckets = Concurrency.RecombineServersToFixedBucketCount(multiData, targetBuckets);
var flattened = Concurrency.ConcurrencyByServer(multiData);
int maxBucket = flattened.Max(c => c.Count());
int medianBucket = flattened.OrderBy(c => c.Count()).ToList()[(flattened.Count() / 2) + 1].Count();
var idealBucket = Math.Ceiling((double)flattened.Sum(c => c.Count()) / (double)targetBuckets);
string message = $"Buckets: {targetBuckets}; Servers: {serverCount}; Matrix: {string.Join(",", matrix)}";
Assert.AreEqual(targetBuckets, buckets.Count(), message);
Assert.IsTrue(buckets.Max(c => c.Count()) <= idealBucket + maxBucket, message);

var str = Concurrency.ConvertBucketsToConfigLines(buckets);

if (File.Exists(tmpFile))
{
File.Delete(tmpFile);
}
}

}
finally
{
if (File.Exists(tmpFile))
{
File.Delete(tmpFile);
}
}
}
catch(OutOfMemoryException)
{
//GitHub actions sometimes will run out of memory running this test!
}

}

[DataRow(3, 8, new int[] { 92, 225, 126, 135, 266, 186, 280, 115 })]
[DataRow(26, 27, new int[] { 554, 436, 194, 441, 382, 440, 337, 242, 85, 449, 513, 426, 475, 151, 507, 460, 138, 425, 529, 120, 262, 117, 123, 391, 344, 260, 119 })] //Actual:<23>
[DataRow(32, 38, new int[] { 218, 532, 396, 63, 227, 207, 185, 106, 556, 453, 528, 476, 512, 395, 73, 487, 121, 75, 450, 560, 456, 199, 488, 413, 311, 439, 132, 405, 448, 238, 266, 101, 368, 84, 133, 171, 31, 276 })] //Actual:<30>
[DataRow(48, 52, new int[] { 155, 365, 406, 341, 92, 116, 294, 268, 495, 239, 260, 250, 214, 101, 190, 212, 319, 277, 137, 316, 199, 428, 198, 353, 166, 408, 239, 45, 71, 458, 231, 140, 129, 117, 451, 211, 168, 320, 378, 448, 337, 161, 149, 99, 178, 198, 43, 151, 131, 211, 407, 361 })] // Actual:<46>.
[DataRow(39, 40, new int[] { 475, 159, 167, 155, 263, 279, 342, 258, 255, 303, 433, 473, 356, 352, 188, 405, 395, 467, 431, 474, 162, 411, 427, 208, 458, 370, 295, 419, 135, 130, 455, 273, 440, 247, 233, 252, 406, 346, 445, 417 })] //Actual:<37>
[DataRow(34, 37, new int[] { 512, 68, 299, 503, 442, 170, 200, 336, 435, 507, 124, 264, 509, 449, 18, 406, 238, 491, 42, 485, 240, 152, 388, 468, 510, 536, 380, 336, 371, 404, 334, 365, 161, 274, 135, 19, 153 })] //Actual:<31>
[DataRow(32, 36, new int[] { 429, 295, 251, 206, 436, 155, 285, 203, 214, 89, 53, 70, 232, 194, 298, 87, 315, 298, 377, 412, 231, 270, 392, 286, 354, 299, 320, 235, 98, 87, 130, 75, 247, 56, 141, 441 })] //Actual:<30>
[DataRow(21, 22, new int[] { 259, 68, 318, 114, 406, 462, 159, 322, 233, 288, 382, 151, 397, 294, 76, 347, 337, 282, 398, 444, 207, 128 })] //Actual:<19>
[DataTestMethod]
public void MatchDefinedServersToFixedBucket(int targetBuckets, int serverCount, int[] dbsPerServer)
{
string tmpFile = string.Empty;
MultiDbData multiData;
try
{
(tmpFile, multiData) = CreateDefinedMultiDbData(serverCount, dbsPerServer);

var buckets = Concurrency.RecombineServersToFixedBucketCount(multiData, targetBuckets);
var flattened = Concurrency.ConcurrencyByServer(multiData);
int maxBucket = flattened.Max(c => c.Count());
int medianBucket = flattened.OrderBy(c => c.Count()).ToList()[(flattened.Count() / 2) + 1].Count();
var idealBucket = Math.Ceiling((double)flattened.Sum(c => c.Count()) / (double)targetBuckets);
string message = $"Buckets: {targetBuckets}; Servers: {serverCount}; Matrix: {string.Join(",", dbsPerServer)}";
Assert.AreEqual(targetBuckets, buckets.Count(), message);
Assert.IsTrue(buckets.Max(c => c.Count()) <= idealBucket + maxBucket, message);

var str = Concurrency.ConvertBucketsToConfigLines(buckets);

if (File.Exists(tmpFile))
{
File.Delete(tmpFile);
}
}
finally
{
Expand All @@ -119,7 +202,6 @@ public void MatchServersToFixedBucket()
File.Delete(tmpFile);
}
}

}

[TestMethod]
Expand All @@ -142,7 +224,7 @@ public void MatchServersToFixedBucket_ToConfigLines()
minDbCount = rnd.Next(10, 201);
maxDbCount = rnd.Next(minDbCount + 1, 600);
int[] matrix;
(tmpFile, multiData) = CreateMultiDbData(serverCount, minDbCount, maxDbCount, out matrix);
(tmpFile, multiData) = CreateRandomizedMultiDbData(serverCount, minDbCount, maxDbCount, out matrix);

var buckets = Concurrency.RecombineServersToFixedBucketCount(multiData, targetBuckets);
var str = Concurrency.ConvertBucketsToConfigLines(buckets);
Expand Down Expand Up @@ -364,5 +446,7 @@ public void ChunkAlgo_RandomTest()


}


}
}
2 changes: 1 addition & 1 deletion src/SqlBuildManager.Console.UnitTest/SynchronizeTest.cs
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ public void GetDatabaseRunHistoryDifferenceTest1()
///A test for ParseAndValidateFlags
///</summary>
[TestMethod(), Ignore("not complete")]
[DeploymentItem("SqlBuildManager.Console.exe")]
[DeploymentItem("sbm.exe")]
public void ParseAndValidateFlagsTest()
{
string[] args = null;
Expand Down
92 changes: 58 additions & 34 deletions src/SqlBuildManager.Console/Threaded/Concurrency.cs
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ public class Concurrency

public static List<IEnumerable<(string, List<DatabaseOverride>)>> RecombineServersToFixedBucketCount(MultiDbData multiData, int fixedBucketCount)
{
List<IEnumerable<(string, List<DatabaseOverride>)>> consolidated = new List<IEnumerable<(string, List<DatabaseOverride>)>>();
List<IEnumerable<(string, List<DatabaseOverride>)>> consolidated = new List<IEnumerable<(string, List<DatabaseOverride>)>>();
//Get a bucket per server
var buckets = ConcurrencyByServer(multiData);
int itemCheckSum = buckets.Sum(b => b.Count());
Expand All @@ -109,53 +109,77 @@ public class Concurrency
over.ToList().ForEach(o => buckets.Remove(o));
}

//Start creating -- fill as best to start, but not exceeding the ideal size and equalling the bucket cound
while (buckets.Count() > 0)
//Special case... is the number of buckets close to the number of servers? If so, do minumum consolidation
var gap = Math.Abs((consolidated.Count() + buckets.Count()) - fixedBucketCount);
if (gap <= 6 && fixedBucketCount / gap > 2)
{
int bucketSum = 0;
var nextSet = buckets.OrderByDescending(b => b.Count()).TakeWhile(p =>
//Combine the smallest buckets until we hit the fixed bucket count
while(buckets.Count() > 0 && consolidated.Count() + buckets.Count() > fixedBucketCount)
{
bool exceeded = bucketSum > idealBucketSize;
bucketSum += p.Count();
return !exceeded;
});
var nextTmp = nextSet.ToList();
if (nextTmp.Count() > 0)
var nextTwo = buckets.OrderBy(b => b.Count()).Take(2).ToList();
var tmp = new List<(string, List<DatabaseOverride>)>();
nextTwo.ForEach(n => tmp.AddRange(n));
consolidated.Add(tmp);
nextTwo.ForEach(o => buckets.Remove(o));
}

//now just add the remaining buckets to the consolidated collection
buckets.ForEach(b => consolidated.Add(b));
buckets.Clear();
}
else
{
//Start creating -- fill as best to start, but not exceeding the ideal size and equalling the bucket cound
while (buckets.Count() > 0)
{
while(nextTmp.Sum(n => n.Count()) > idealBucketSize)
int bucketSum = 0;
var nextSet = buckets.OrderByDescending(b => b.Count()).TakeWhile(p =>
{
nextTmp.RemoveAt(nextTmp.Count() - 1);
bool exceeded = bucketSum > idealBucketSize;
bucketSum += p.Count();
return !exceeded;
});
var nextTmp = nextSet.ToList();
if (nextTmp.Count() > 0)
{
while (nextTmp.Sum(n => n.Count()) > idealBucketSize)
{
nextTmp.RemoveAt(nextTmp.Count() - 1);
}
var tmp = new List<(string, List<DatabaseOverride>)>();
foreach (var n in nextTmp)
{
tmp.AddRange(n);
}
nextTmp.ForEach(o => buckets.Remove(o));
consolidated.Add(tmp);
}
var tmp = new List<(string, List<DatabaseOverride>)>();
foreach (var n in nextTmp)
if (consolidated.Count() == fixedBucketCount)
{
tmp.AddRange(n);
break;
}
nextTmp.ForEach(o => buckets.Remove(o));
consolidated.Add(tmp);
}
if(consolidated.Count() == fixedBucketCount)
{
break;
}
}

if(buckets.Count() > 0)
{
consolidated = consolidated.OrderBy(c => c.Count()).ToList();
while(buckets.Count() > 0)
//Capture any left over buckets
if (buckets.Count() > 0)
{
for(int i=0;i<consolidated.Count();i++)

consolidated = consolidated.OrderBy(c => c.Count()).ToList();
while (buckets.Count() > 0)
{
if (buckets.Count() == 0) break;
var t = consolidated[i].ToList();
t.AddRange(buckets.First());
consolidated[i] = t;
buckets.RemoveAt(0);
for (int i = 0; i < consolidated.Count(); i++)

{
if (buckets.Count() == 0) break;
var t = consolidated[i].ToList();
t.AddRange(buckets.First());
consolidated[i] = t;
buckets.RemoveAt(0);
}
}
}
}


if(itemCheckSum != consolidated.Sum(c => c.Count()))
{
throw new Exception($"While filling concurrency buckets, the end count of {consolidated.Sum(c => c.Count())} does not equal the start count of {itemCheckSum}");
Expand Down
8 changes: 4 additions & 4 deletions src/SqlBuildManager.Console/sbm.csproj
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,13 @@
<PackageReference Include="Microsoft.Azure.KeyVault.Core" Version="3.0.5" />
<PackageReference Include="Microsoft.Azure.Services.AppAuthentication" Version="1.6.1" />
<PackageReference Include="Microsoft.IdentityModel.Clients.ActiveDirectory" Version="5.2.9" />
<PackageReference Include="Microsoft.IdentityModel.JsonWebTokens" Version="6.10.2" />
<PackageReference Include="Microsoft.IdentityModel.Logging" Version="6.10.2" />
<PackageReference Include="Microsoft.IdentityModel.Tokens" Version="6.10.2" />
<PackageReference Include="Microsoft.IdentityModel.JsonWebTokens" Version="6.11.0" />
<PackageReference Include="Microsoft.IdentityModel.Logging" Version="6.11.0" />
<PackageReference Include="Microsoft.IdentityModel.Tokens" Version="6.11.0" />
<PackageReference Include="Microsoft.Rest.ClientRuntime" Version="3.0.3" />
<PackageReference Include="Microsoft.Rest.ClientRuntime.Azure" Version="4.0.3" />
<PackageReference Include="Newtonsoft.Json" Version="13.0.1" />
<PackageReference Include="Azure.Storage.Blobs" Version="12.8.1" />
<PackageReference Include="Azure.Storage.Blobs" Version="12.8.2" />
</ItemGroup>
<PropertyGroup>
<GenerateAssemblyInfo>false</GenerateAssemblyInfo>
Expand Down
2 changes: 1 addition & 1 deletion src/SqlSync/SqlBuild/CommandLineBuilderForm.cs
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ private void ddAuthentication_SelectionChangeCommitted(object sender, EventArgs
}
private void btnConstructCommand_Click(object sender, EventArgs e)
{
string exePath = Path.Combine(Path.GetDirectoryName(System.Reflection.Assembly.GetExecutingAssembly().Location), "SqlBuildManager.Console.exe");
string exePath = Path.Combine(Path.GetDirectoryName(System.Reflection.Assembly.GetExecutingAssembly().Location), "sbm.exe");
StringBuilder sb = new StringBuilder();

sb.Append("\"" + exePath + "\" ");
Expand Down
4 changes: 4 additions & 0 deletions src/SqlSync/change_notes.html
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,10 @@
</head>
<body>
<h1>SQL Build Manager Change Notes</h1>
<div class="version">Version 13.0.1</div>
<div>
<span class="fixed">FIXED: </span>Updated distribution algorithm for `--concurrencytype` of `Server` and `MaxPerServer` when number of Batch nodes is very close to the number of SQL Server targets. Was yielding less than the number of nodes. </div>
<br />
<div class="version">Version 13.0.0</div>
<div>
<span class="added">ADDED: </span>New option to leverage Azure Service Bus Topic as a database target source. See the [Azure Batch](azure_batch.md) docs for more detail</div>
Expand Down
Loading

0 comments on commit 31fa8c6

Please sign in to comment.