diff --git a/changelog.txt b/changelog.txt index 609ae9760a..767bc5dae4 100644 --- a/changelog.txt +++ b/changelog.txt @@ -1,3 +1,65 @@ +v0.17.0-rc1 (2018-08-19) +~~~~~~~~~~~ + +New visible features: +* Implemented ponder support. +* Tablebases are supported now (only WDL probe for now). + Command line parameter is + --syzygy-paths=/path/to/syzygy/ +* Old smart pruning flag is gone. Instead there is + --futile-search-aversion flag. + --futile-search-aversion=0 is equivalent to old --no-smart-pruning. + --futile-search-aversion=1 is equivalent to old --smart-pruning. + Now default is 1.47, which means that engine will sometimes decide to + stop search earlier even when there is theoretical chance (but not very + probable) that best move decision could be changed if allowed to think more. +* Lc0 now supports configuration files. Options can be listed there instead of + command line flags / uci params. + Config should be named lc0.config and located in the same directory as lc0. + Should list one command line option per line, with '--' in the beginning + being optional, for example: + + syzygy-paths=/path/to/syzygy/ + +* In uci info, "depth" is now average depth rather than full depth + (which was 4 all the time). + Also, depth values do not include reused tree, only nodes visited during the + current search session. +* --sticky-checkmates experimental flag (default off), supposed to find shorter + checkmate sequences. +* More features in backend "check". + + +Performance optimizations: +* Release windows executables are built with "whole program optimization". +* Added --out-of-order-eval flag (default is off). + Switching it on makes cached/terminal nodes higher priority, which increases + nps. +* OpenCL backend now supports batches (up to 5x speedup!) +* Performance optimizations for BLAS backend. +* Total visited policy (for FPU reduction) is now cached. +* Values of priors (P) are stored now as 16-bit float rather than 32-bit float, + that saves considerable amount of RAM. + + +Bugfixes: +* Fixed en passant detection bug which caused the position after pawn moving by + two squares not counted towards threefold repetition even if en passant was + not possible. +* Fixed the bug which caused --cache-history-length for values 2..7 work the + same as --cache-history-length=1. + This is fixed, but default is temporarily changed to --cache-history-length=1 + during play. (For training games, it's 7) + + +Removed features: +* Backpropagation beta / backpropagation gamma parameters have been removed. + + +Other changes: +* Release lc0-windows-cuda.zip package now contains NVdia CUDA and cuDNN .dlls. + + v0.16.0 (2018-07-20) ~~~~~~~ diff --git a/src/engine.h b/src/engine.h index 40685b42b5..7131a295b7 100644 --- a/src/engine.h +++ b/src/engine.h @@ -33,8 +33,8 @@ #include "neural/network.h" #include "syzygy/syzygy.h" #include "utils/mutex.h" -#include "utils/optionsparser.h" #include "utils/optional.h" +#include "utils/optionsparser.h" // CUDNN eval // comment/disable this to enable tensor flow path diff --git a/src/main.cc b/src/main.cc index 017f7f23d0..367d891f13 100644 --- a/src/main.cc +++ b/src/main.cc @@ -34,7 +34,8 @@ int main(int argc, const char** argv) { std::cerr << " _" << std::endl; std::cerr << "| _ | |" << std::endl; - std::cerr << "|_ |_ |_| v" << GetVersionStr() << " built " << __DATE__ << std::endl; + std::cerr << "|_ |_ |_| v" << GetVersionStr() << " built " << __DATE__ + << std::endl; using namespace lczero; CommandLine::Init(argc, argv); CommandLine::RegisterMode("uci", "(default) Act as UCI engine"); diff --git a/src/mcts/node.cc b/src/mcts/node.cc index ebd907a8e0..bcde029ba5 100644 --- a/src/mcts/node.cc +++ b/src/mcts/node.cc @@ -197,8 +197,7 @@ void Node::CreateEdges(const MoveList& moves) { Node::ConstIterator Node::Edges() const { return {edges_, &child_}; } Node::Iterator Node::Edges() { return {edges_, &child_}; } -float Node::GetVisitedPolicy() const { return visited_policy_; } - +float Node::GetVisitedPolicy() const { return visited_policy_; } Edge* Node::GetEdgeToNode(const Node* node) const { assert(node->parent_ == this); @@ -247,7 +246,6 @@ void Node::FinalizeScoreUpdate(float v) { --n_in_flight_; } - Node::NodeRange Node::ChildNodes() const { return child_.get(); } void Node::ReleaseChildren() { gNodeGc.AddToGcQueue(std::move(child_)); } diff --git a/src/mcts/search.cc b/src/mcts/search.cc index 54736bdd42..2e01dd211a 100644 --- a/src/mcts/search.cc +++ b/src/mcts/search.cc @@ -390,8 +390,8 @@ std::pair Search::GetBestMoveInternal() const Move ponder_move; // Default is "null move" which means "don't display // anything". if (best_node.HasNode() && best_node.node()->HasChildren()) { - ponder_move = - GetBestChildNoTemperature(best_node.node()).GetMove(!played_history_.IsBlackToMove()); + ponder_move = GetBestChildNoTemperature(best_node.node()) + .GetMove(!played_history_.IsBlackToMove()); } return {best_node.GetMove(played_history_.IsBlackToMove()), ponder_move}; } @@ -604,6 +604,7 @@ void SearchWorker::GatherMinibatch() { if (picked_node.nn_queried) computation_->PopCacheHit(); minibatch_.pop_back(); --minibatch_size; + ++number_out_of_order; } } } @@ -735,12 +736,12 @@ void SearchWorker::ExtendNode(Node* node) { node->MakeTerminal(GameResult::DRAW); return; } - + // Neither by-position or by-rule termination, but maybe it's a TB position. if (search_->syzygy_tb_ && board.castlings().no_legal_castle() && history_.Last().GetNoCaptureNoPawnPly() == 0 && (board.ours() + board.theirs()).count() <= - search_->syzygy_tb_->max_cardinality()) { + search_->syzygy_tb_->max_cardinality()) { ProbeState state; WDLScore wdl = search_->syzygy_tb_->probe_wdl(history_.Last(), &state); // Only fail state means the WDL is wrong, probe_wdl may produce correct @@ -751,8 +752,8 @@ void SearchWorker::ExtendNode(Node* node) { node->MakeTerminal(GameResult::BLACK_WON); } else if (wdl == WDL_LOSS) { node->MakeTerminal(GameResult::WHITE_WON); - } else { // Cursed wins and blessed losses count as draws. - node->MakeTerminal(GameResult::DRAW); + } else { // Cursed wins and blessed losses count as draws. + node->MakeTerminal(GameResult::DRAW); } search_->tb_hits_.fetch_add(1, std::memory_order_acq_rel); return; diff --git a/src/neural/blas/convolution1.cc b/src/neural/blas/convolution1.cc index 08e7f2a430..4e65f8f6cd 100644 --- a/src/neural/blas/convolution1.cc +++ b/src/neural/blas/convolution1.cc @@ -59,7 +59,6 @@ void Convolution1::Forward(const size_t batch_size, const size_t input_channels, 0.0f, // beta batch_output, // C kSquares); // ldc, leading rank of B - } } diff --git a/src/neural/blas/convolution1.h b/src/neural/blas/convolution1.h index 80b567a5cc..8f160acfad 100644 --- a/src/neural/blas/convolution1.h +++ b/src/neural/blas/convolution1.h @@ -38,4 +38,4 @@ class Convolution1 { static constexpr auto kHeight = 8; static constexpr auto kSquares = kWidth * kHeight; }; -} +} // namespace lczero diff --git a/src/neural/blas/fully_connected_layer.h b/src/neural/blas/fully_connected_layer.h index 60b7d12c9b..c49c2e2896 100644 --- a/src/neural/blas/fully_connected_layer.h +++ b/src/neural/blas/fully_connected_layer.h @@ -46,4 +46,4 @@ class FullyConnectedLayer { static constexpr auto kSquares = kWidth * kHeight; }; -} // lczero +} // namespace lczero diff --git a/src/neural/blas/network_blas.cc b/src/neural/blas/network_blas.cc index 2fd9f7a3c6..1a1b1e3668 100644 --- a/src/neural/blas/network_blas.cc +++ b/src/neural/blas/network_blas.cc @@ -16,13 +16,13 @@ along with Leela Chess. If not, see . */ -#include "neural/network.h" #include "neural/blas/batchnorm.h" #include "neural/blas/blas.h" #include "neural/blas/convolution1.h" #include "neural/blas/fully_connected_layer.h" #include "neural/blas/winograd_convolution3.h" #include "neural/factory.h" +#include "neural/network.h" #include #include diff --git a/src/neural/blas/winograd_convolution3.h b/src/neural/blas/winograd_convolution3.h index a72b0fb42a..76569c4c90 100644 --- a/src/neural/blas/winograd_convolution3.h +++ b/src/neural/blas/winograd_convolution3.h @@ -83,4 +83,4 @@ class WinogradConvolution3 { std::vector V_; std::vector M_; }; -} +} // namespace lczero diff --git a/src/neural/blas/winograd_transform.ispc b/src/neural/blas/winograd_transform.ispc index 69ffa0c526..75e6b61413 100644 --- a/src/neural/blas/winograd_transform.ispc +++ b/src/neural/blas/winograd_transform.ispc @@ -22,8 +22,8 @@ uniform const size_t kWidth = 8; uniform const size_t kHeight = 8; uniform const size_t kSquares = kWidth * kHeight; -uniform const size_t kWtiles = 4; //(kWidth + 1) / 2; -uniform const size_t kTiles = kWtiles * kWtiles; // 16 +uniform const size_t kWtiles = 4; //(kWidth + 1) / 2; +uniform const size_t kTiles = kWtiles * kWtiles; // 16 uniform const size_t kWinogradAlpha = 4; uniform const size_t kWinogradTile = kWinogradAlpha * kWinogradAlpha; @@ -31,9 +31,7 @@ uniform const size_t kWinogradTile = kWinogradAlpha * kWinogradAlpha; export void winograd_TransformIn_ispc(uniform size_t batch_size, const uniform float input[], uniform size_t channels, - uniform float output[]) -{ - + uniform float output[]) { float x[kWinogradAlpha][kWinogradAlpha]; float T1[kWinogradAlpha][kWinogradAlpha]; @@ -47,18 +45,16 @@ export void winograd_TransformIn_ispc(uniform size_t batch_size, const uniform int yin = 2 * block_y - 1; const uniform int xin = 2 * block_x - 1; - foreach(channel = 0 ... channels) { + foreach (channel = 0 ... channels) { size_t V_channel = V_batch + channel; size_t input_channel = input_batch + channel * (kWidth * kHeight); for (uniform int i = 0; i < kWinogradAlpha; i++) { for (uniform int j = 0; j < kWinogradAlpha; j++) { - if ((yin + i) >= 0 && (xin + j) >= 0 && - (yin + i) < kHeight && (xin + j) < kWidth) { - x[i][j] = input[input_channel + - (yin + i) * kWidth + (xin + j)]; - } - else { + if ((yin + i) >= 0 && (xin + j) >= 0 && (yin + i) < kHeight && + (xin + j) < kWidth) { + x[i][j] = input[input_channel + (yin + i) * kWidth + (xin + j)]; + } else { x[i][j] = 0.0f; } } @@ -82,8 +78,8 @@ export void winograd_TransformIn_ispc(uniform size_t batch_size, T1[3][3] = x[1][3] - x[3][3]; const size_t V_incr = channels * kTiles * batch_size; - const size_t wTile_V = V_channel + - channels * (block_y * kWtiles + block_x); + const size_t wTile_V = + V_channel + channels * (block_y * kWtiles + block_x); output[wTile_V + V_incr * 0] = T1[0][0] - T1[0][2]; output[wTile_V + V_incr * 1] = T1[0][1] + T1[0][2]; @@ -107,11 +103,10 @@ export void winograd_TransformIn_ispc(uniform size_t batch_size, } } - export void winograd_TransformOut_ispc(uniform size_t batch_size, - const uniform float input[], uniform size_t channels, - uniform float output[]) -{ + const uniform float input[], + uniform size_t channels, + uniform float output[]) { float m[kWinogradTile]; for (uniform size_t batch_index = 0; batch_index < batch_size; @@ -132,7 +127,7 @@ export void winograd_TransformOut_ispc(uniform size_t batch_size, const uniform int M_incr = channels * kTiles * batch_size; for (uniform int wTile = 0; wTile < kWinogradTile; wTile++) { - m[wTile] = input[M_wtile + wTile*M_incr]; + m[wTile] = input[M_wtile + wTile * M_incr]; } float o11 = m[0 * 4 + 0] + m[0 * 4 + 1] + m[0 * 4 + 2] + @@ -160,4 +155,3 @@ export void winograd_TransformOut_ispc(uniform size_t batch_size, } } } - diff --git a/src/neural/loader.cc b/src/neural/loader.cc index 68ee509f2c..25b11d3a0f 100644 --- a/src/neural/loader.cc +++ b/src/neural/loader.cc @@ -42,7 +42,6 @@ namespace lczero { - namespace { const std::uint32_t kWeightMagic = 0x1c0; @@ -125,7 +124,8 @@ FloatVectors LoadFloatsFromPbFile(const std::string& buffer) { net.min_version().patch()); if (net_ver > lc0_ver) - throw Exception("Invalid weight file: lc0 version >= " + min_version + " required."); + throw Exception("Invalid weight file: lc0 version >= " + min_version + + " required."); if (net.format().weights_encoding() != pblczero::Format::LINEAR16) throw Exception("Invalid weight file: wrong encoding."); @@ -258,7 +258,7 @@ std::string DiscoverWeightsFile() { // First byte of the protobuf stream is 0x0d for fixed32, so we ignore it as // our own magic should suffice. - auto magic = reinterpret_cast(buf+1); + auto magic = reinterpret_cast(buf + 1); if (*magic == kWeightMagic) { std::cerr << "Found pb network file: " << candidate.second << std::endl; return candidate.second; diff --git a/src/neural/network_check.cc b/src/neural/network_check.cc index 519d75574a..89be0c5fdf 100644 --- a/src/neural/network_check.cc +++ b/src/neural/network_check.cc @@ -25,8 +25,8 @@ Program grant you additional permission to convey the resulting work. */ -#include "neural/network.h" #include "neural/factory.h" +#include "neural/network.h" #include "utils/histogram.h" #include "utils/random.h" diff --git a/src/neural/network_mux.cc b/src/neural/network_mux.cc index b8a2759e87..3f071b13d3 100644 --- a/src/neural/network_mux.cc +++ b/src/neural/network_mux.cc @@ -208,5 +208,5 @@ void MuxingComputation::ComputeBlocking() { } // namespace REGISTER_NETWORK("multiplexing", MuxingNetwork, -1000) - + } // namespace lczero diff --git a/src/neural/network_random.cc b/src/neural/network_random.cc index 6f45773d91..0a35856d08 100644 --- a/src/neural/network_random.cc +++ b/src/neural/network_random.cc @@ -35,12 +35,13 @@ namespace lczero { class RandomNetworkComputation : public NetworkComputation { public: - RandomNetworkComputation(int delay, int seed) : delay_ms_(delay), seed_(seed) {} + RandomNetworkComputation(int delay, int seed) + : delay_ms_(delay), seed_(seed) {} void AddInput(InputPlanes&& input) override { std::uint64_t hash = seed_; for (const auto& plane : input) { hash = HashCat({hash, plane.mask}); - std::uint64_t value_hash = + std::uint64_t value_hash = *reinterpret_cast(&plane.value); hash = HashCat({hash, value_hash}); } @@ -71,7 +72,7 @@ class RandomNetworkComputation : public NetworkComputation { class RandomNetwork : public Network { public: RandomNetwork(const Weights& /*weights*/, const OptionsDict& options) - : delay_ms_(options.GetOrDefault("delay", 0)), + : delay_ms_(options.GetOrDefault("delay", 0)), seed_(options.GetOrDefault("seed", 0)) {} std::unique_ptr NewComputation() override { return std::make_unique(delay_ms_, seed_); diff --git a/src/neural/network_st_batch.h b/src/neural/network_st_batch.h index 4b672d8c01..0ac95b3eae 100644 --- a/src/neural/network_st_batch.h +++ b/src/neural/network_st_batch.h @@ -69,8 +69,8 @@ class SingleThreadBatchingNetworkComputation : public NetworkComputation { // Adds a sample to the parent batch. void AddInput(InputPlanes&& input) override; - // May not actually compute immediately. Instead computes when all computations - // of the network called this. + // May not actually compute immediately. Instead computes when all + // computations of the network called this. void ComputeBlocking() override; // Returns how many times AddInput() was called. int GetBatchSize() const override { return batch_size_; } diff --git a/src/neural/opencl/OpenCLParams.h b/src/neural/opencl/OpenCLParams.h index 03b9ebd04e..9653d5eddb 100644 --- a/src/neural/opencl/OpenCLParams.h +++ b/src/neural/opencl/OpenCLParams.h @@ -26,5 +26,4 @@ struct OpenCLParams { bool force_tune = false; bool tune_exhaustive = false; int tune_batch_size = 1; - }; diff --git a/src/neural/opencl/OpenCLTuner.cc b/src/neural/opencl/OpenCLTuner.cc index 950e30141e..3285c6c6db 100644 --- a/src/neural/opencl/OpenCLTuner.cc +++ b/src/neural/opencl/OpenCLTuner.cc @@ -350,8 +350,8 @@ std::string Tuner::tune_sgemm(const int m, const int n, const int k, } } if (best_time == 0) { - std::cerr << "Failed to find a working configuration." << std::endl << - "Check your OpenCL drivers." << std::endl; + std::cerr << "Failed to find a working configuration." << std::endl + << "Check your OpenCL drivers." << std::endl; throw std::runtime_error("Tuner failed to find working configuration."); } return best_params; diff --git a/src/neural/opencl/network_opencl.cc b/src/neural/opencl/network_opencl.cc index 0fe7dd32b4..3c101275a5 100644 --- a/src/neural/opencl/network_opencl.cc +++ b/src/neural/opencl/network_opencl.cc @@ -16,12 +16,12 @@ along with Leela Chess. If not, see . */ -#include "neural/network.h" #include "neural/blas/batchnorm.h" #include "neural/blas/blas.h" #include "neural/blas/fully_connected_layer.h" #include "neural/blas/winograd_convolution3.h" #include "neural/factory.h" +#include "neural/network.h" #include "neural/opencl/OpenCL.h" #include "neural/opencl/OpenCLParams.h" @@ -105,9 +105,9 @@ class OpenCLComputation : public NetworkComputation { // Now get the score. auto winrate = FullyConnectedLayer::Forward0D( - num_value_channels, weights_.ip2_val_w.data(), - &output_val[j * num_value_channels]) + - weights_.ip2_val_b[0]; + num_value_channels, weights_.ip2_val_w.data(), + &output_val[j * num_value_channels]) + + weights_.ip2_val_b[0]; q_values_.emplace_back(std::tanh(winrate)); } @@ -162,7 +162,7 @@ class OpenCLNetwork : public Network { params_.tune_only = options.GetOrDefault("tune_only", false); params_.tune_exhaustive = options.GetOrDefault("tune_exhaustive", false); - + // By default batch size is 1, as many old cards may not support more. auto max_batch_size_ = static_cast(options.GetOrDefault("batch_size", 1)); @@ -178,7 +178,6 @@ class OpenCLNetwork : public Network { // tune. params_.tune_batch_size = options.GetOrDefault("tune_batch_size", max_batch_size_); - const auto inputChannels = static_cast(kInputPlanes); const auto channels = weights.input.biases.size(); @@ -196,7 +195,7 @@ class OpenCLNetwork : public Network { // num_policy_input_planes = 32 // num_value_channels = 128 // num_output_policy = 1858 - + static constexpr auto kWinogradAlpha = 4; opencl_.initialize(channels, params_); diff --git a/src/selfplay/game.cc b/src/selfplay/game.cc index 2ddf424d00..feabf906f1 100644 --- a/src/selfplay/game.cc +++ b/src/selfplay/game.cc @@ -80,7 +80,7 @@ void SelfPlayGame::Play(int white_threads, int black_threads, *tree_[idx], options_[idx].network, options_[idx].best_move_callback, options_[idx].info_callback, options_[idx].search_limits, *options_[idx].uci_options, options_[idx].cache, nullptr); - // TODO: add Syzygy option for selfplay. + // TODO: add Syzygy option for selfplay. } // Do search. diff --git a/src/selfplay/game.h b/src/selfplay/game.h index 8e7c9d86d6..ae989b5d89 100644 --- a/src/selfplay/game.h +++ b/src/selfplay/game.h @@ -64,7 +64,7 @@ class SelfPlayGame { static void PopulateUciParams(OptionsParser* options); // Starts the game and blocks until the game is finished. - void Play(int white_threads, int black_threads, bool enable_resign=true); + void Play(int white_threads, int black_threads, bool enable_resign = true); // Aborts the game currently played, doesn't matter if it's synchronous or // not. void Abort(); diff --git a/src/selfplay/loop.cc b/src/selfplay/loop.cc index 74e785b03b..87d2b08e63 100644 --- a/src/selfplay/loop.cc +++ b/src/selfplay/loop.cc @@ -91,7 +91,7 @@ void SelfPlayLoop::SendGameInfo(const GameInfo& info) { // and move list potentially contain spaces. if (info.min_false_positive_threshold) { std::string resign_res = "resign_report"; - resign_res += + resign_res += " fp_threshold " + std::to_string(*info.min_false_positive_threshold); responses.push_back(resign_res); } diff --git a/src/selfplay/tournament.cc b/src/selfplay/tournament.cc index 279ed673fc..4860ae47cf 100644 --- a/src/selfplay/tournament.cc +++ b/src/selfplay/tournament.cc @@ -50,7 +50,7 @@ const char* kNnBackendStr = "NN backend to use"; const char* kNnBackendOptionsStr = "NN backend parameters"; const char* kVerboseThinkingStr = "Show verbose thinking messages"; const char* kResignPlaythroughStr = - "The percentage of games which ignore resign"; + "The percentage of games which ignore resign"; // Value for network autodiscover. const char* kAutoDiscover = ""; @@ -82,11 +82,12 @@ void SelfPlayTournament::PopulateOptions(OptionsParser* options) { Search::PopulateUciParams(options); SelfPlayGame::PopulateUciParams(options); auto defaults = options->GetMutableDefaultsOptions(); - defaults->Set(Search::kMiniBatchSizeStr, 32); // Minibatch size - defaults->Set(Search::kAggressiveTimePruningStr, 0.0f); // No smart pruning - defaults->Set(Search::kTemperatureStr, 1.0f); // Temperature = 1.0 + defaults->Set(Search::kMiniBatchSizeStr, 32); // Minibatch size + defaults->Set(Search::kAggressiveTimePruningStr, + 0.0f); // No smart pruning + defaults->Set(Search::kTemperatureStr, 1.0f); // Temperature = 1.0 defaults->Set(Search::kNoiseStr, true); // Dirichlet noise - defaults->Set(Search::kFpuReductionStr, 0.0f); // No FPU reduction. + defaults->Set(Search::kFpuReductionStr, 0.0f); // No FPU reduction. } SelfPlayTournament::SelfPlayTournament(const OptionsDict& options, diff --git a/src/syzygy/syzygy.cc b/src/syzygy/syzygy.cc index b6011f9fbc..02eda80aeb 100644 --- a/src/syzygy/syzygy.cc +++ b/src/syzygy/syzygy.cc @@ -1314,8 +1314,8 @@ class SyzygyTablebaseImpl { Key key = calc_key_from_position(pos); // Test for KvK - if (type == WDL && pos.ours() == pos.our_king() - && pos.theirs() == pos.their_king()) { + if (type == WDL && pos.ours() == pos.our_king() && + pos.theirs() == pos.their_king()) { return 0; } diff --git a/src/utils/configfile.cc b/src/utils/configfile.cc index 88bb054a01..432eb64757 100644 --- a/src/utils/configfile.cc +++ b/src/utils/configfile.cc @@ -35,15 +35,16 @@ #include "utils/string.h" namespace lczero { - namespace { - const char* kConfigFileStr = "Configuration file path"; - const char* kDefaultConfigFile = "lc0.config"; - } +namespace { +const char* kConfigFileStr = "Configuration file path"; +const char* kDefaultConfigFile = "lc0.config"; +} // namespace std::vector ConfigFile::arguments_; void ConfigFile::PopulateOptions(OptionsParser* options) { - options->Add(kConfigFileStr, "config", 'c') = kDefaultConfigFile; + options->Add(kConfigFileStr, "config", 'c') = + kDefaultConfigFile; } bool ConfigFile::Init(OptionsParser* options) { @@ -51,7 +52,7 @@ bool ConfigFile::Init(OptionsParser* options) { // Process flags to get the config file parameter. if (!options->ProcessAllFlags()) return false; - + // Calculate the relative path of the config file. OptionsDict dict = options->GetOptionsDict(); std::string filename = dict.Get(kConfigFileStr); @@ -81,7 +82,7 @@ bool ConfigFile::ParseFile(const std::string filename, OptionsParser* options) { std::cerr << "Found configuration file: " << filename << std::endl; - for(std::string line; getline( input, line );) { + for (std::string line; getline(input, line);) { // Remove all leading and trailing whitespace. line = Trim(line); // Ignore comments. @@ -89,8 +90,10 @@ bool ConfigFile::ParseFile(const std::string filename, OptionsParser* options) { // Skip blank lines. if (line.length() == 0) continue; // Allow long form arugments that omit '--'. If omitted, add here. - if (line.substr(0, 1) != "-" && line.substr(0, 2) != "--") line = "--" + line; - // Fail now if the argument does begin with '--'. + if (line.substr(0, 1) != "-" && line.substr(0, 2) != "--") { + line = "--" + line; + } + // Fail now if the argument does not begin with '--'. if (line.substr(0, 2) != "--") { std::cerr << "Only '--' arguments are supported in the " << "configuration file: '" << line << "'." << std::endl; diff --git a/src/utils/filesystem.h b/src/utils/filesystem.h index 1a2936b04c..f0708ec0c0 100644 --- a/src/utils/filesystem.h +++ b/src/utils/filesystem.h @@ -27,10 +27,9 @@ #pragma once +#include #include #include -#include - namespace lczero { diff --git a/src/utils/filesystem.posix.cc b/src/utils/filesystem.posix.cc index e85227eb07..f100766f6c 100644 --- a/src/utils/filesystem.posix.cc +++ b/src/utils/filesystem.posix.cc @@ -45,20 +45,20 @@ std::vector GetFileList(const std::string& directory) { DIR* dir = opendir(directory.c_str()); if (!dir) return result; while (auto* entry = readdir(dir)) { - bool exists=false; + bool exists = false; switch (entry->d_type) { case DT_REG: - exists=true; + exists = true; break; case DT_LNK: // check that the soft link actually points to a regular file. const std::string filename = directory + "/" + entry->d_name; struct stat s; - exists=stat(filename.c_str(), &s)==0 && (s.st_mode&S_IFMT)==S_IFREG; + exists = + stat(filename.c_str(), &s) == 0 && (s.st_mode & S_IFMT) == S_IFREG; break; } - if (exists) - result.push_back(entry->d_name); + if (exists) result.push_back(entry->d_name); } closedir(dir); return result; diff --git a/src/utils/filesystem.win32.cc b/src/utils/filesystem.win32.cc index 1a96911a9d..0c6be35982 100644 --- a/src/utils/filesystem.win32.cc +++ b/src/utils/filesystem.win32.cc @@ -67,8 +67,8 @@ time_t GetFileTime(const std::string& filename) { if (!GetFileAttributesExA(filename.c_str(), GetFileExInfoStandard, &s)) { throw Exception("Cannot stat file: " + filename); } - return (static_cast(s.ftLastWriteTime.dwHighDateTime) - << 32) + s.ftLastWriteTime.dwLowDateTime; + return (static_cast(s.ftLastWriteTime.dwHighDateTime) << 32) + + s.ftLastWriteTime.dwLowDateTime; } } // namespace lczero diff --git a/src/utils/histogram.cc b/src/utils/histogram.cc index 839b8472f9..1d2ef9a153 100644 --- a/src/utils/histogram.cc +++ b/src/utils/histogram.cc @@ -25,13 +25,13 @@ Program grant you additional permission to convey the resulting work. */ +#include "utils/histogram.h" +#include +#include #include #include #include #include -#include -#include -#include "utils/histogram.h" namespace lczero { @@ -48,7 +48,7 @@ std::string Format(const std::string& format, double value) { int len = snprintf(buffer, kMaxBufferSize, format.c_str(), value); return std::string(buffer, buffer + len); } -} // namespace +} // namespace Histogram::Histogram() : Histogram(kDefaultMinExp, kDefaultMaxExp, kDefaultMinorScales) {} @@ -128,7 +128,8 @@ int Histogram::GetIndex(double val) const { // 2: -15 : -15.1 ... -14.9 2 ... 3 // 1: -15.3 ... -15.1 // 0: -15.5 ... -15.3 0 ... 1 - int index = static_cast(std::floor(2.5 + minor_scales_ * (log10 - min_exp_))); + int index = + static_cast(std::floor(2.5 + minor_scales_ * (log10 - min_exp_))); if (index < 0) return 0; if (index >= total_scales_) return total_scales_ + 3; return index + 2; diff --git a/src/utils/optionsparser.cc b/src/utils/optionsparser.cc index 71ea58dd91..a0bf24ae0f 100644 --- a/src/utils/optionsparser.cc +++ b/src/utils/optionsparser.cc @@ -92,7 +92,7 @@ const OptionsDict& OptionsParser::GetOptionsDict(const std::string& context) { bool OptionsParser::ProcessAllFlags() { return ProcessFlags(ConfigFile::Arguments()) && ProcessFlags(CommandLine::Arguments()); -} +} bool OptionsParser::ProcessFlags(const std::vector& args) { std::string context; diff --git a/src/utils/string.cc b/src/utils/string.cc index fb2f73e59a..a0ecb34403 100644 --- a/src/utils/string.cc +++ b/src/utils/string.cc @@ -73,15 +73,15 @@ std::vector ParseIntList(const std::string& str) { } std::string LeftTrim(std::string str) { - auto it = std::find_if(str.begin(), str.end(), [](int ch) - {return !std::isspace(ch);}); + auto it = std::find_if(str.begin(), str.end(), + [](int ch) { return !std::isspace(ch); }); str.erase(str.begin(), it); return str; } std::string RightTrim(std::string str) { - auto it = std::find_if(str.rbegin(), str.rend(), [](int ch) - {return !std::isspace(ch);}); + auto it = std::find_if(str.rbegin(), str.rend(), + [](int ch) { return !std::isspace(ch); }); str.erase(it.base(), str.end()); return str; } diff --git a/src/version.cc b/src/version.cc index e0cc2602bc..9c4e2d0138 100644 --- a/src/version.cc +++ b/src/version.cc @@ -30,7 +30,8 @@ std::uint32_t GetVersionInt(int major, int minor, int patch) { return major * 1000000 + minor * 1000 + patch; } -std::string GetVersionStr(int major, int minor, int patch, const std::string& postfix) { +std::string GetVersionStr(int major, int minor, int patch, + const std::string& postfix) { auto v = std::to_string(major) + "." + std::to_string(minor) + "." + std::to_string(patch); if (postfix.empty()) return v;